Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86/x86 KNF a little, remove some stupid comments, ...
details: https://anonhg.NetBSD.org/src/rev/13256b65b988
branches: trunk
changeset: 346209:13256b65b988
user: maxv <maxv%NetBSD.org@localhost>
date: Fri Jul 01 12:12:06 2016 +0000
description:
KNF a little, remove some stupid comments, and add some when needed.
diffstat:
sys/arch/x86/x86/pmap.c | 198 +++++++++++++++++++++--------------------------
1 files changed, 89 insertions(+), 109 deletions(-)
diffs (truncated from 484 to 300 lines):
diff -r e7cc2b9e0628 -r 13256b65b988 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Fri Jul 01 11:57:10 2016 +0000
+++ b/sys/arch/x86/x86/pmap.c Fri Jul 01 12:12:06 2016 +0000
@@ -1,11 +1,11 @@
-/* $NetBSD: pmap.c,v 1.203 2016/07/01 11:57:10 maxv Exp $ */
+/* $NetBSD: pmap.c,v 1.204 2016/07/01 12:12:06 maxv Exp $ */
/*-
- * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2010, 2016 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Andrew Doran.
+ * by Andrew Doran, and by Maxime Villard.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -171,7 +171,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.203 2016/07/01 11:57:10 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.204 2016/07/01 12:12:06 maxv Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -366,7 +366,7 @@
static bool cpu_pat_enabled __read_mostly = false;
/*
- * global data structures
+ * Global data structures
*/
static struct pmap kernel_pmap_store; /* the kernel's pmap (proc0) */
@@ -388,7 +388,7 @@
* pmap_largepages: if our processor supports PG_PS and we are
* using it, this is set to true.
*/
-int pmap_largepages __read_mostly;
+int pmap_largepages __read_mostly = 0;
/*
* i386 physical memory comes in a big contig chunk with a small
@@ -466,48 +466,41 @@
}
/*
- * other data structures
+ * Other data structures
*/
-static pt_entry_t protection_codes[8] __read_mostly; /* maps MI prot to i386
- prot code */
+static pt_entry_t protection_codes[8] __read_mostly;
+
static bool pmap_initialized __read_mostly = false; /* pmap_init done yet? */
/*
- * the following two vaddr_t's are used during system startup
- * to keep track of how much of the kernel's VM space we have used.
- * once the system is started, the management of the remaining kernel
- * VM space is turned over to the kernel_map vm_map.
+ * The following two vaddr_t's are used during system startup to keep track of
+ * how much of the kernel's VM space we have used. Once the system is started,
+ * the management of the remaining kernel VM space is turned over to the
+ * kernel_map vm_map.
*/
-
static vaddr_t virtual_avail __read_mostly; /* VA of first free KVA */
static vaddr_t virtual_end __read_mostly; /* VA of last free KVA */
/*
* pool that pmap structures are allocated from
*/
-
static struct pool_cache pmap_cache;
/*
* pv_entry cache
*/
-
static struct pool_cache pmap_pv_cache;
#ifdef __HAVE_DIRECT_MAP
-
extern phys_ram_seg_t mem_clusters[];
extern int mem_cluster_cnt;
-
#else
-
/*
- * MULTIPROCESSOR: special VA's/ PTE's are actually allocated inside a
- * maxcpus*NPTECL array of PTE's, to avoid cache line thrashing
- * due to false sharing.
+ * MULTIPROCESSOR: special VAs and PTEs are actually allocated inside a
+ * (maxcpus * NPTECL) array of PTE, to avoid cache line thrashing due to
+ * false sharing.
*/
-
#ifdef MULTIPROCESSOR
#define PTESLEW(pte, id) ((pte)+(id)*NPTECL)
#define VASLEW(va,id) ((va)+(id)*NPTECL*PAGE_SIZE)
@@ -517,7 +510,7 @@
#endif
/*
- * special VAs and the PTEs that map them
+ * Special VAs and the PTEs that map them
*/
static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte, *early_zero_pte;
static char *csrcp, *cdstp, *zerop, *ptpp;
@@ -546,7 +539,7 @@
};
#endif /* PAE */
-extern vaddr_t idt_vaddr; /* we allocate IDT early */
+extern vaddr_t idt_vaddr;
extern paddr_t idt_paddr;
extern int end;
@@ -556,38 +549,32 @@
extern vaddr_t pentium_idt_vaddr;
#endif
-
/*
- * local prototypes
+ * Local prototypes
*/
#ifdef __HAVE_DIRECT_MAP
static void pmap_init_directmap(struct pmap *);
#endif
-
#ifndef XEN
static void pmap_remap_largepages(void);
#endif
-static struct vm_page *pmap_get_ptp(struct pmap *, vaddr_t,
- pd_entry_t * const *);
-static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int);
-static void pmap_freepage(struct pmap *, struct vm_page *, int);
-static void pmap_free_ptp(struct pmap *, struct vm_page *,
- vaddr_t, pt_entry_t *,
- pd_entry_t * const *);
-static bool pmap_remove_pte(struct pmap *, struct vm_page *,
- pt_entry_t *, vaddr_t,
- struct pv_entry **);
-static void pmap_remove_ptes(struct pmap *, struct vm_page *,
- vaddr_t, vaddr_t, vaddr_t,
- struct pv_entry **);
-
-static bool pmap_get_physpage(vaddr_t, int, paddr_t *);
-static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int,
- long *);
-
-static bool pmap_reactivate(struct pmap *);
+static struct vm_page *pmap_get_ptp(struct pmap *, vaddr_t,
+ pd_entry_t * const *);
+static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int);
+static void pmap_freepage(struct pmap *, struct vm_page *, int);
+static void pmap_free_ptp(struct pmap *, struct vm_page *, vaddr_t,
+ pt_entry_t *, pd_entry_t * const *);
+static bool pmap_remove_pte(struct pmap *, struct vm_page *, pt_entry_t *,
+ vaddr_t, struct pv_entry **);
+static void pmap_remove_ptes(struct pmap *, struct vm_page *, vaddr_t, vaddr_t,
+ vaddr_t, struct pv_entry **);
+
+static bool pmap_get_physpage(vaddr_t, int, paddr_t *);
+static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int, long *);
+
+static bool pmap_reactivate(struct pmap *);
/*
* p m a p h e l p e r f u n c t i o n s
@@ -1008,7 +995,11 @@
npte |= pmap_pat_flags(flags);
opte = pmap_pte_testset(pte, npte); /* zap! */
#if defined(DIAGNOSTIC)
- /* XXX For now... */
+ /*
+ * XXX: make sure we are not dealing with a large page, since the only
+ * large pages created are for the kernel image, and they should never
+ * be kentered.
+ */
if (opte & PG_PS)
panic("%s: PG_PS", __func__);
#endif
@@ -1065,7 +1056,6 @@
} else {
tlbflush();
}
-
}
void
@@ -1186,7 +1176,7 @@
* pmap_bootstrap_valloc: allocate a virtual address in the bootstrap area.
* This function is to be used before any VM system has been set up.
*
- * The va is taken from virtual_avail.
+ * The va is taken from virtual_avail.
*/
static vaddr_t
pmap_bootstrap_valloc(size_t npages)
@@ -1200,7 +1190,7 @@
* pmap_bootstrap_palloc: allocate a physical address in the bootstrap area.
* This function is to be used before any VM system has been set up.
*
- * The pa is taken from avail_start.
+ * The pa is taken from avail_start.
*/
static paddr_t
pmap_bootstrap_palloc(size_t npages)
@@ -1211,13 +1201,12 @@
}
/*
- * pmap_bootstrap: get the system in a state where it can run with VM
- * properly enabled (called before main()). the VM system is
- * fully init'd later...
+ * pmap_bootstrap: get the system in a state where it can run with VM properly
+ * enabled (called before main()). The VM system is fully init'd later.
*
- * => on i386, locore.s has already enabled the MMU by allocating
- * a PDP for the kernel, and nkpde PTP's for the kernel.
- * => kva_start is the first free virtual address in kernel space
+ * => on i386, locore.S has already enabled the MMU by allocating a PDP for the
+ * kernel, and nkpde PTP's for the kernel.
+ * => kva_start is the first free virtual address in kernel space.
*/
void
pmap_bootstrap(vaddr_t kva_start)
@@ -1233,40 +1222,36 @@
pmap_pg_nx = (cpu_feature[2] & CPUID_NOX ? PG_NX : 0);
/*
- * set up our local static global vars that keep track of the
- * usage of KVM before kernel_map is set up
+ * Set up our local static global vars that keep track of the usage of
+ * KVM before kernel_map is set up.
*/
-
virtual_avail = kva_start; /* first free KVA */
virtual_end = VM_MAX_KERNEL_ADDRESS; /* last KVA */
/*
- * set up protection_codes: we need to be able to convert from
- * a MI protection code (some combo of VM_PROT...) to something
- * we can jam into a i386 PTE.
+ * Set up protection_codes: we need to be able to convert from a MI
+ * protection code (some combo of VM_PROT...) to something we can jam
+ * into a x86 PTE.
*/
-
- protection_codes[VM_PROT_NONE] = pmap_pg_nx; /* --- */
- protection_codes[VM_PROT_EXECUTE] = PG_RO | PG_X; /* --x */
- protection_codes[VM_PROT_READ] = PG_RO | pmap_pg_nx; /* -r- */
- protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO | PG_X;/* -rx */
- protection_codes[VM_PROT_WRITE] = PG_RW | pmap_pg_nx; /* w-- */
- protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW | PG_X;/* w-x */
+ protection_codes[VM_PROT_NONE] = pmap_pg_nx;
+ protection_codes[VM_PROT_EXECUTE] = PG_RO | PG_X;
+ protection_codes[VM_PROT_READ] = PG_RO | pmap_pg_nx;
+ protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO | PG_X;
+ protection_codes[VM_PROT_WRITE] = PG_RW | pmap_pg_nx;
+ protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW | PG_X;
protection_codes[VM_PROT_WRITE|VM_PROT_READ] = PG_RW | pmap_pg_nx;
- /* wr- */
- protection_codes[VM_PROT_ALL] = PG_RW | PG_X; /* wrx */
+ protection_codes[VM_PROT_ALL] = PG_RW | PG_X;
/*
- * now we init the kernel's pmap
+ * Now we init the kernel's pmap.
+ *
+ * The kernel pmap's pm_obj is not used for much. However, in user pmaps
+ * the pm_obj contains the list of active PTPs.
*
- * the kernel pmap's pm_obj is not used for much. however, in
- * user pmaps the pm_obj contains the list of active PTPs.
- * the pm_obj currently does not have a pager. it might be possible
- * to add a pager that would allow a process to read-only mmap its
- * own page tables (fast user level vtophys?). this may or may not
- * be useful.
+ * The pm_obj currently does not have a pager. It might be possible to
+ * add a pager that would allow a process to read-only mmap its own page
+ * tables (fast user-level vtophys?). This may or may not be useful.
*/
-
kpm = pmap_kernel();
for (i = 0; i < PTP_LEVELS - 1; i++) {
mutex_init(&kpm->pm_obj_lock[i], MUTEX_DEFAULT, IPL_NONE);
@@ -1359,8 +1344,8 @@
}
/*
- * now we allocate the "special" VAs which are used for tmp mappings
Home |
Main Index |
Thread Index |
Old Index