Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch KNF.



details:   https://anonhg.NetBSD.org/src/rev/dabad6da1f8b
branches:  trunk
changeset: 474746:dabad6da1f8b
user:      chs <chs%NetBSD.org@localhost>
date:      Sun Jul 18 21:33:20 1999 +0000

description:
KNF.

diffstat:

 sys/arch/i386/i386/pmap.c     |  5068 ++++++++++++++++++++--------------------
 sys/arch/i386/include/pmap.h  |   257 +-
 sys/arch/pc532/include/pmap.h |   240 +-
 sys/arch/pc532/pc532/pmap.c   |  4786 +++++++++++++++++++-------------------
 4 files changed, 5097 insertions(+), 5254 deletions(-)

diffs (truncated from 12091 to 300 lines):

diff -r d61c8729390f -r dabad6da1f8b sys/arch/i386/i386/pmap.c
--- a/sys/arch/i386/i386/pmap.c Sun Jul 18 17:54:19 1999 +0000
+++ b/sys/arch/i386/i386/pmap.c Sun Jul 18 21:33:20 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.73 1999/07/08 18:05:28 thorpej Exp $        */
+/*     $NetBSD: pmap.c,v 1.74 1999/07/18 21:33:20 chs Exp $    */
 
 /*
  *
@@ -39,21 +39,21 @@
  *
  * history of this pmap module: in addition to my own input, i used
  *    the following references for this rewrite of the i386 pmap:
- * 
- * [1] the NetBSD i386 pmap.   this pmap appears to be based on the 
+ *
+ * [1] the NetBSD i386 pmap.   this pmap appears to be based on the
  *     BSD hp300 pmap done by Mike Hibler at University of Utah.
  *     it was then ported to the i386 by William Jolitz of UUNET
  *     Technologies, Inc.   Then Charles M. Hannum of the NetBSD
  *     project fixed some bugs and provided some speed ups.
  *
- * [2] the FreeBSD i386 pmap.   this pmap seems to be the 
+ * [2] the FreeBSD i386 pmap.   this pmap seems to be the
  *     Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson
  *     and David Greenman.
- * 
+ *
  * [3] the Mach pmap.   this pmap, from CMU, seems to have migrated
  *     between several processors.   the VAX version was done by
  *     Avadis Tevanian, Jr., and Michael Wayne Young.    the i386
- *     version was done by Lance Berc, Mike Kupfer, Bob Baron, 
+ *     version was done by Lance Berc, Mike Kupfer, Bob Baron,
  *     David Golub, and Richard Draves.    the alpha version was
  *     done by Alessandro Forin (CMU/Mach) and Chris Demetriou
  *     (NetBSD/alpha).
@@ -87,11 +87,11 @@
 /*
  * general info:
  *
- *  - for an explanation of how the i386 MMU hardware works see 
+ *  - for an explanation of how the i386 MMU hardware works see
  *    the comments in <machine/pte.h>.
  *
  *  - for an explanation of the general memory structure used by
- *    this pmap (including the recursive mapping), see the comments 
+ *    this pmap (including the recursive mapping), see the comments
  *    in <machine/pmap.h>.
  *
  * this file contains the code for the "pmap module."   the module's
@@ -117,7 +117,7 @@
  * in the upper layer.
  *
  * data structures we use include:
- * 
+ *
  *  - struct pmap: describes the address space of one thread
  *  - struct pv_entry: describes one <PMAP,VA> mapping of a PA
  *  - struct pv_head: there is one pv_head per managed page of
@@ -139,23 +139,23 @@
  *  - there are three data structures that we must dynamically allocate:
  *
  * [A] new process' page directory page (PDP)
- *     - plan 1: done at pmap_pinit() we use 
- *       uvm_km_alloc(kernel_map, NBPG)  [fka kmem_alloc] to do this 
+ *     - plan 1: done at pmap_pinit() we use
+ *       uvm_km_alloc(kernel_map, NBPG)  [fka kmem_alloc] to do this
  *       allocation.
  *
- * if we are low in free physical memory then we sleep in 
+ * if we are low in free physical memory then we sleep in
  * uvm_km_alloc -- in this case this is ok since we are creating
  * a new pmap and should not be holding any locks.
- * 
+ *
  * if the kernel is totally out of virtual space
  * (i.e. uvm_km_alloc returns NULL), then we panic.
- * 
+ *
  * XXX: the fork code currently has no way to return an "out of
  * memory, try again" error code since uvm_fork [fka vm_fork]
  * is a void function.
  *
  * [B] new page tables pages (PTP)
- *     - plan 1: call uvm_pagealloc() 
+ *     - plan 1: call uvm_pagealloc()
  *             => success: zero page, add to pm_pdir
  *             => failure: we are out of free vm_pages
  *     - plan 2: using a linked LIST of active pmaps we attempt
@@ -168,9 +168,9 @@
  *
  * note: for kernel PTPs, we start with NKPTP of them.   as we map
  * kernel memory (at uvm_map time) we check to see if we've grown
- * the kernel pmap.   if so, we call the optional function 
- * pmap_growkernel() to grow the kernel PTPs in advance.    
- * 
+ * the kernel pmap.   if so, we call the optional function
+ * pmap_growkernel() to grow the kernel PTPs in advance.
+ *
  * [C] pv_entry structures
  *     - plan 1: try to allocate one off the free list
  *             => success: done!
@@ -187,12 +187,12 @@
  *             => success: map it in, free the pv_entry's, DONE!
  *             => failure: kmem_object locked, no free vm_pages, etc.
  *                     save VA for later call to [a], go to plan 3.
- *     - plan 3: using the pv_entry/pv_head lists find a pv_entry 
+ *     - plan 3: using the pv_entry/pv_head lists find a pv_entry
  *             structure that is part of a non-kernel lockable pmap
  *             and "steal" that pv_entry by removing the mapping
  *             and reusing that pv_entry.
  *             => success: done
- *             => failure: highly unlikely: unable to lock and steal 
+ *             => failure: highly unlikely: unable to lock and steal
  *                     pv_entry
  *     - plan 4: we panic.
  */
@@ -209,7 +209,7 @@
  *    access to the pmap system.   most operations lock the pmap
  *    structure first, then they lock the pv_lists (if needed).
  *    however, some operations such as pmap_page_protect lock
- *    the pv_lists and then lock pmaps.   in order to prevent a 
+ *    the pv_lists and then lock pmaps.   in order to prevent a
  *    cycle, we require a mutex lock when locking the pv_lists
  *    first.   thus, the "pmap = >pv_list" lockers must gain a
  *    read-lock on pmap_main_lock before locking the pmap.   and
@@ -218,7 +218,7 @@
  *    can write-lock a lock at a time, this provides mutex.
  *
  * "simple" locks:
- * 
+ *
  * - pmap lock (per pmap, part of uvm_object)
  *   this lock protects the fields in the pmap structure including
  *   the non-kernel PDEs in the PDP, and the PTEs.  it also locks
@@ -228,7 +228,7 @@
  * - pvh_lock (per pv_head)
  *   this lock protects the pv_entry list which is chained off the
  *   pv_head structure for a specific managed PA.   it is locked
- *   when traversing the list (e.g. adding/removing mappings, 
+ *   when traversing the list (e.g. adding/removing mappings,
  *   syncing R/M bits, etc.)
  *
  * - pvalloc_lock
@@ -291,7 +291,7 @@
 struct pmap kernel_pmap_store; /* the kernel's pmap (proc0) */
 
 /*
- * nkpde is the number of kernel PTPs allocated for the kernel at 
+ * nkpde is the number of kernel PTPs allocated for the kernel at
  * boot time (NKPTP is a compile time override).   this number can
  * grow dynamically as needed (but once allocated, we never free
  * kernel PTPs).
@@ -303,7 +303,7 @@
 #endif
 
 /*
- * pmap_pg_g: if our processor supports PG_G in the PTE then we 
+ * pmap_pg_g: if our processor supports PG_G in the PTE then we
  * set pmap_pg_g to PG_G (otherwise it is zero).
  */
 
@@ -397,8 +397,8 @@
 #define ALLOCPV_TRY    1       /* just try to allocate, don't steal */
 #define ALLOCPV_NONEED 2       /* don't need PV, just growing cache */
 static struct pv_entry *pmap_alloc_pvpage __P((int));
-static void             pmap_enter_pv __P((struct pv_head *, 
-                                           struct pv_entry *, struct pmap *, 
+static void             pmap_enter_pv __P((struct pv_head *,
+                                           struct pv_entry *, struct pmap *,
                                            vaddr_t, struct vm_page *));
 static void             pmap_free_pv __P((struct pv_entry *));
 static void             pmap_free_pvs __P((struct pv_entry *));
@@ -410,24 +410,24 @@
 static pt_entry_t      *pmap_map_ptes __P((struct pmap *));
 static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *,
                                             vaddr_t));
-static boolean_t        pmap_remove_pte __P((struct pmap *, struct vm_page *, 
+static boolean_t        pmap_remove_pte __P((struct pmap *, struct vm_page *,
                                              pt_entry_t *, vaddr_t));
-static void             pmap_remove_ptes __P((struct pmap *, 
+static void             pmap_remove_ptes __P((struct pmap *,
                                               struct pmap_remove_record *,
-                                              struct vm_page *, vaddr_t, 
+                                              struct vm_page *, vaddr_t,
                                               vaddr_t, vaddr_t));
-static struct vm_page  *pmap_steal_ptp __P((struct uvm_object *, 
+static struct vm_page  *pmap_steal_ptp __P((struct uvm_object *,
                                             vaddr_t));
 static vaddr_t          pmap_tmpmap_pa __P((paddr_t));
 static pt_entry_t      *pmap_tmpmap_pvepte __P((struct pv_entry *));
 static void             pmap_tmpunmap_pa __P((void));
 static void             pmap_tmpunmap_pvepte __P((struct pv_entry *));
-static boolean_t        pmap_transfer_ptes __P((struct pmap *, 
+static boolean_t        pmap_transfer_ptes __P((struct pmap *,
                                         struct pmap_transfer_location *,
                                         struct pmap *,
                                         struct pmap_transfer_location *,
                                         int, boolean_t));
-static boolean_t        pmap_try_steal_pv __P((struct pv_head *, 
+static boolean_t        pmap_try_steal_pv __P((struct pv_head *,
                                                struct pv_entry *,
                                                struct pv_entry *));
 static void            pmap_unmap_ptes __P((struct pmap *));
@@ -444,12 +444,12 @@
  *             of course the kernel is always loaded
  */
 
-__inline static boolean_t pmap_is_curpmap(pmap)
-
-struct pmap *pmap;
-
+__inline static boolean_t
+pmap_is_curpmap(pmap)
+       struct pmap *pmap;
 {
-  return((pmap == pmap_kernel()) || (pmap->pm_pdirpa == (paddr_t) rcr3()));
+       return((pmap == pmap_kernel()) ||
+              (pmap->pm_pdirpa == (paddr_t) rcr3()));
 }
 
 /*
@@ -458,37 +458,35 @@
  * => returns with pmap_tmpptp_lock held
  */
 
-__inline static vaddr_t pmap_tmpmap_pa(pa)
-
-paddr_t pa;
-
+__inline static vaddr_t
+pmap_tmpmap_pa(pa)
+       paddr_t pa;
 {
-  simple_lock(&pmap_tmpptp_lock);
+       simple_lock(&pmap_tmpptp_lock);
 #if defined(DIAGNOSTIC)
-  if (*ptp_pte)
-    panic("pmap_tmpmap_pa: ptp_pte in use?");
+       if (*ptp_pte)
+               panic("pmap_tmpmap_pa: ptp_pte in use?");
 #endif
-  *ptp_pte = PG_V | PG_RW | pa;                /* always a new mapping */
-  return((vaddr_t)ptpp);
+       *ptp_pte = PG_V | PG_RW | pa;           /* always a new mapping */
+       return((vaddr_t)ptpp);
 }
 
 /*
  * pmap_tmpunmap_pa: unmap a tmp use page (undoes pmap_tmpmap_pa)
- * 
+ *
  * => we release pmap_tmpptp_lock
  */
 
-__inline static void pmap_tmpunmap_pa()
-
+__inline static void
+pmap_tmpunmap_pa()
 {
 #if defined(DIAGNOSTIC)
-  if (!pmap_valid_entry(*ptp_pte))
-    panic("pmap_tmpunmap_pa: our pte invalid?");
+       if (!pmap_valid_entry(*ptp_pte))
+               panic("pmap_tmpunmap_pa: our pte invalid?");
 #endif
-  *ptp_pte = 0;                /* zap! */
-  pmap_update_pg((vaddr_t)ptpp);
-  simple_unlock(&pmap_tmpptp_lock);
-  return;
+       *ptp_pte = 0;           /* zap! */
+       pmap_update_pg((vaddr_t)ptpp);
+       simple_unlock(&pmap_tmpptp_lock);
 }
 
 /*
@@ -498,22 +496,21 @@
  * => we may grab pmap_tmpptp_lock and return with it held
  */
 
-__inline static pt_entry_t *pmap_tmpmap_pvepte(pve)
-
-struct pv_entry *pve;
-
+__inline static pt_entry_t *
+pmap_tmpmap_pvepte(pve)
+       struct pv_entry *pve;
 {
 #ifdef DIAGNOSTIC
-  if (pve->pv_pmap == pmap_kernel())
-    panic("pmap_tmpmap_pvepte: attempt to map kernel");
+       if (pve->pv_pmap == pmap_kernel())
+               panic("pmap_tmpmap_pvepte: attempt to map kernel");
 #endif
 
-  /* is it current pmap?  use direct mapping... */
-  if (pmap_is_curpmap(pve->pv_pmap))
-    return(vtopte(pve->pv_va));
-
-  return( ((pt_entry_t *) pmap_tmpmap_pa(VM_PAGE_TO_PHYS(pve->pv_ptp)))



Home | Main Index | Thread Index | Old Index