Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-2-0]: src/sys/arch/alpha/alpha Pull up revision 1.211 (requested ...



details:   https://anonhg.NetBSD.org/src/rev/f8caf83b013d
branches:  netbsd-2-0
changeset: 564820:f8caf83b013d
user:      riz <riz%NetBSD.org@localhost>
date:      Sun Aug 07 15:08:16 2005 +0000

description:
Pull up revision 1.211 (requested by thorpej in ticket #5528):
1. Disable the lazy allocation of lev1map in pmap_enter(), instead
doing
it in pmap_create(), and freeing the lev1map in pmap_destroy().
This
means that pm_lev1map is consistent for the life of the pmap.
2. pmap_extract() now uses vtophys() for the kernel pmap.  This avoids
having to lock the kernel pmap, since kernel PT pages are never
freed.
3. Because of (1), pmap_asn_alloc() no longer needs to operate on a
locked
pmap; pm_lev1map will never change over the life of the pmap,
and all
other access to the pmap is done in per-CPU fields or with atomic
operations.
4. Because of (3), pmap_activate() no longer needs to lock the pmap
to do its work, thus eliminating the deadlock with sched_lock
described
in PR port-alpha/25599.  This is safe because we are guaranteed
that
the pmap is still alive, since by definition an LWP that uses
that it
is about to run.
Thanks to Michael Hitch for the analysis, and Michael and Ragge for
testing.

diffstat:

 sys/arch/alpha/alpha/pmap.c |  90 +++++++++++++++++++++++++++++++++++---------
 1 files changed, 71 insertions(+), 19 deletions(-)

diffs (224 lines):

diff -r 43bedb0dfe55 -r f8caf83b013d sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c       Fri Aug 05 20:23:06 2005 +0000
+++ b/sys/arch/alpha/alpha/pmap.c       Sun Aug 07 15:08:16 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.207 2004/01/13 18:50:40 nathanw Exp $ */
+/* $NetBSD: pmap.c,v 1.207.2.1 2005/08/07 15:08:16 riz Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -145,7 +145,7 @@
 
 #include <sys/cdefs.h>                 /* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.207 2004/01/13 18:50:40 nathanw Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.207.2.1 2005/08/07 15:08:16 riz Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -525,6 +525,17 @@
 int    pmap_physpage_delref(void *);
 
 /*
+ * Define PMAP_NO_LAZY_LEV1MAP in order to have a lev1map allocated
+ * in pmap_create(), rather than when the first mapping is entered.
+ * This causes pmaps to use an extra page of memory if no mappings
+ * are entered in them, but in practice this is probably not going
+ * to be a problem, and it allows us to avoid locking pmaps in
+ * pmap_activate(), which in turn allows us to avoid a deadlock with
+ * sched_lock via cpu_switch().
+ */
+#define        PMAP_NO_LAZY_LEV1MAP
+
+/*
  * PMAP_ISACTIVE{,_TEST}:
  *
  *     Check to see if a pmap is active on the current processor.
@@ -1207,6 +1218,11 @@
        TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
        simple_unlock(&pmap_all_pmaps_slock);
 
+#ifdef PMAP_NO_LAZY_LEV1MAP
+       i = pmap_lev1map_create(pmap, cpu_number());
+       KASSERT(i == 0);
+#endif
+
        return (pmap);
 }
 
@@ -1240,14 +1256,16 @@
        TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
        simple_unlock(&pmap_all_pmaps_slock);
 
-#ifdef DIAGNOSTIC
+#ifdef PMAP_NO_LAZY_LEV1MAP
+       pmap_lev1map_destroy(pmap, cpu_number());
+#endif
+
        /*
         * Since the pmap is supposed to contain no valid
-        * mappings at this point, this should never happen.
+        * mappings at this point, we should always see
+        * kernel_lev1map here.
         */
-       if (pmap->pm_lev1map != kernel_lev1map)
-               panic("pmap_destroy: pmap still contains valid mappings");
-#endif
+       KASSERT(pmap->pm_lev1map == kernel_lev1map);
 
        pool_put(&pmap_pmap_pool, pmap);
 }
@@ -1685,6 +1703,9 @@
                        panic("pmap_enter: user pmap, invalid va 0x%lx", va);
 #endif
 
+#ifdef PMAP_NO_LAZY_LEV1MAP
+               KASSERT(pmap->pm_lev1map != kernel_lev1map);
+#else
                /*
                 * If we're still referencing the kernel kernel_lev1map,
                 * create a new level 1 page table.  A reference will be
@@ -1715,6 +1736,7 @@
                                panic("pmap_enter: unable to create lev1map");
                        }
                }
+#endif /* PMAP_NO_LAZY_LEV1MAP */
 
                /*
                 * Check to see if the level 1 PTE is valid, and
@@ -2128,6 +2150,22 @@
        if (pmapdebug & PDB_FOLLOW)
                printf("pmap_extract(%p, %lx) -> ", pmap, va);
 #endif
+
+       /*
+        * Take a faster path for the kernel pmap.  Avoids locking,
+        * handles K0SEG.
+        */
+       if (pmap == pmap_kernel()) {
+               pa = vtophys(va);
+               if (pap != NULL)
+                       *pap = pa;
+#ifdef DEBUG
+               if (pmapdebug & PDB_FOLLOW)
+                       printf("0x%lx (kernel vtophys)\n", pa);
+#endif
+               return (pa != 0);       /* XXX */
+       }
+
        PMAP_LOCK(pmap);
 
        l1pte = pmap_l1pte(pmap, va);
@@ -2238,21 +2276,21 @@
                printf("pmap_activate(%p)\n", l);
 #endif
 
+#ifndef PMAP_NO_LAZY_LEV1MAP
        PMAP_LOCK(pmap);
-
-       /*
-        * Mark the pmap in use by this processor.
-        */
+#endif
+
+       /* Mark the pmap in use by this processor. */
        atomic_setbits_ulong(&pmap->pm_cpus, (1UL << cpu_id));
 
-       /*
-        * Allocate an ASN.
-        */
+       /* Allocate an ASN. */
        pmap_asn_alloc(pmap, cpu_id);
 
        PMAP_ACTIVATE(pmap, l, cpu_id);
 
+#ifndef PMAP_NO_LAZY_LEV1MAP
        PMAP_UNLOCK(pmap);
+#endif
 }
 
 /*
@@ -3261,12 +3299,18 @@
                panic("pmap_lev1map_create: pmap uses non-reserved ASN");
 #endif
 
+#ifdef PMAP_NO_LAZY_LEV1MAP
+       /* Being called from pmap_create() in this case; we can sleep. */
+       l1pt = pool_cache_get(&pmap_l1pt_cache, PR_WAITOK);
+#else
        l1pt = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
+#endif
        if (l1pt == NULL)
                return (ENOMEM);
 
        pmap->pm_lev1map = l1pt;
 
+#ifndef PMAP_NO_LAZY_LEV1MAP   /* guaranteed not to be active */
        /*
         * The page table base has changed; if the pmap was active,
         * reactivate it.
@@ -3276,6 +3320,7 @@
                PMAP_ACTIVATE(pmap, curlwp, cpu_id);
        }
        PMAP_LEV1MAP_SHOOTDOWN(pmap, cpu_id);
+#endif /* ! PMAP_NO_LAZY_LEV1MAP */
        return (0);
 }
 
@@ -3301,6 +3346,7 @@
         */
        pmap->pm_lev1map = kernel_lev1map;
 
+#ifndef PMAP_NO_LAZY_LEV1MAP   /* pmap is being destroyed */
        /*
         * The page table base has changed; if the pmap was active,
         * reactivate it.  Note that allocation of a new ASN is
@@ -3323,6 +3369,7 @@
        if (PMAP_ISACTIVE(pmap, cpu_id))
                PMAP_ACTIVATE(pmap, curlwp, cpu_id);
        PMAP_LEV1MAP_SHOOTDOWN(pmap, cpu_id);
+#endif /* ! PMAP_NO_LAZY_LEV1MAP */
 
        /*
         * Free the old level 1 page table page.
@@ -3561,11 +3608,13 @@
 #endif
 
        if (pmap_physpage_delref(l1pte) == 0) {
+#ifndef PMAP_NO_LAZY_LEV1MAP
                /*
                 * No more level 2 tables left, go back to the global
                 * kernel_lev1map.
                 */
                pmap_lev1map_destroy(pmap, cpu_id);
+#endif /* ! PMAP_NO_LAZY_LEV1MAP */
        }
 }
 
@@ -3597,6 +3646,13 @@
         * kernel mappings exist in that map, and all kernel mappings
         * have PG_ASM set.  If the pmap eventually gets its own
         * lev1map, an ASN will be allocated at that time.
+        *
+        * #ifdef PMAP_NO_LAZY_LEV1MAP
+        * Only the kernel pmap will reference kernel_lev1map.  Do the
+        * same old fixups, but note that we no longer need the pmap
+        * to be locked if we're in this mode, since pm_lev1map will
+        * never change.
+        * #endif
         */
        if (pmap->pm_lev1map == kernel_lev1map) {
 #ifdef DEBUG
@@ -3617,11 +3673,7 @@
                 */
                pma->pma_asn = PMAP_ASN_RESERVED;
 #else
-#ifdef DIAGNOSTIC
-               if (pma->pma_asn != PMAP_ASN_RESERVED)
-                       panic("pmap_asn_alloc: kernel_lev1map without "
-                           "PMAP_ASN_RESERVED");
-#endif
+               KASSERT(pma->pma_asn == PMAP_ASN_RESERVED);
 #endif /* MULTIPROCESSOR */
                return;
        }



Home | Main Index | Thread Index | Old Index