Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/alpha/alpha Use a pool cache for L1 PT pages. When...



details:   https://anonhg.NetBSD.org/src/rev/817020807293
branches:  trunk
changeset: 500166:817020807293
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Thu Dec 07 05:59:07 2000 +0000

description:
Use a pool cache for L1 PT pages.  When we can allocate a cached,
constructed L1 PT page, this saves us from having to copy the kernel
L1 PTEs into the user L1 PT page at fork time (it's already set up).

A simple test shows a 1 second improvement of a rapid fork/exit operation
10000 times on a 533MHz 21164A (12s to 11s).

diffstat:

 sys/arch/alpha/alpha/pmap.c |  143 ++++++++++++++++++++++++++++++++-----------
 1 files changed, 106 insertions(+), 37 deletions(-)

diffs (234 lines):

diff -r 147928e628c2 -r 817020807293 sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c       Thu Dec 07 05:45:57 2000 +0000
+++ b/sys/arch/alpha/alpha/pmap.c       Thu Dec 07 05:59:07 2000 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.151 2000/11/24 22:41:38 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.152 2000/12/07 05:59:07 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
@@ -156,7 +156,7 @@
 
 #include <sys/cdefs.h>                 /* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.151 2000/11/24 22:41:38 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.152 2000/12/07 05:59:07 thorpej Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -277,6 +277,8 @@
  * The pools from which pmap structures and sub-structures are allocated.
  */
 struct pool pmap_pmap_pool;
+struct pool pmap_l1pt_pool;
+struct pool_cache pmap_l1pt_cache;
 struct pool pmap_asn_pool;
 struct pool pmap_asngen_pool;
 struct pool pmap_pv_pool;
@@ -515,6 +517,11 @@
 void   pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, long);
 void   pmap_l1pt_delref(pmap_t, pt_entry_t *, long);
 
+void   *pmap_l1pt_alloc(unsigned long, int, int);
+void   pmap_l1pt_free(void *, unsigned long, int);
+
+int    pmap_l1pt_ctor(void *, void *, int);
+
 /*
  * PV table management functions.
  */
@@ -945,6 +952,10 @@
        pmap_ncpuids = ncpuids;
        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
            0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+       pool_init(&pmap_l1pt_pool, PAGE_SIZE, 0, 0, 0, "l1ptpl",
+           0, pmap_l1pt_alloc, pmap_l1pt_free, M_VMPMAP);
+       pool_cache_init(&pmap_l1pt_cache, &pmap_l1pt_pool, pmap_l1pt_ctor,
+           NULL, NULL);
        pool_init(&pmap_asn_pool, pmap_ncpuids * sizeof(u_int), 0, 0, 0,
            "pmasnpl",
            0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
@@ -3274,7 +3285,13 @@
        struct pv_head *pvh;
        paddr_t pa;
 
-       pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
+       /*
+        * Don't ask for a zero'd page in the L1PT case -- we will
+        * properly initialize it in the constructor.
+        */
+
+       pg = uvm_pagealloc(NULL, 0, NULL, usage == PGU_L1PT ?
+           UVM_PGA_USERESERVE : UVM_PGA_USERESERVE|UVM_PGA_ZERO);
        if (pg != NULL) {
                pa = VM_PAGE_TO_PHYS(pg);
 
@@ -3445,8 +3462,9 @@
 
                        l1idx = l1pte_index(va);
 
+                       simple_lock(&pmap_all_pmaps_slock);
+
                        /* Update all the user pmaps. */
-                       simple_lock(&pmap_all_pmaps_slock);
                        for (pm = TAILQ_FIRST(&pmap_all_pmaps);
                             pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) {
                                /* Skip the kernel pmap. */
@@ -3461,6 +3479,10 @@
                                pm->pm_lev1map[l1idx] = pte;
                                PMAP_UNLOCK(pm);
                        }
+
+                       /* Invalidate the L1 PT cache. */
+                       pool_cache_invalidate(&pmap_l1pt_cache);
+
                        simple_unlock(&pmap_all_pmaps_slock);
                }
 
@@ -3504,9 +3526,7 @@
 int
 pmap_lev1map_create(pmap_t pmap, long cpu_id)
 {
-       paddr_t ptpa;
-       pt_entry_t pte;
-       int i;
+       pt_entry_t *l1pt;
 
 #ifdef DIAGNOSTIC
        if (pmap == pmap_kernel())
@@ -3516,32 +3536,11 @@
                panic("pmap_lev1map_create: pmap uses non-reserved ASN");
 #endif
 
-       /*
-        * Allocate a page for the level 1 table.
-        */
-       if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE) {
-               /*
-                * Yow!  No free pages!  Try to steal a PT page from
-                * another pmap!
-                */
-               if (pmap_ptpage_steal(pmap, PGU_L1PT, &ptpa) == FALSE)
-                       return (KERN_RESOURCE_SHORTAGE);
-       }
-       pmap->pm_lev1map = (pt_entry_t *) ALPHA_PHYS_TO_K0SEG(ptpa);
-
-       /*
-        * Initialize the new level 1 table by copying the
-        * kernel mappings into it.
-        */
-       for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS);
-            i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++)
-               pmap->pm_lev1map[i] = kernel_lev1map[i];
-
-       /*
-        * Now, map the new virtual page table.  NOTE: NO ASM!
-        */
-       pte = ((ptpa >> PGSHIFT) << PG_SHIFT) | PG_V | PG_KRE | PG_KWE;
-       pmap->pm_lev1map[l1pte_index(VPTBASE)] = pte;
+       l1pt = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
+       if (l1pt == NULL)
+               return (KERN_RESOURCE_SHORTAGE);
+
+       pmap->pm_lev1map = l1pt;
 
        /*
         * The page table base has changed; if the pmap was active,
@@ -3564,15 +3563,13 @@
 void
 pmap_lev1map_destroy(pmap_t pmap, long cpu_id)
 {
-       paddr_t ptpa;
+       pt_entry_t *l1pt = pmap->pm_lev1map;
 
 #ifdef DIAGNOSTIC
        if (pmap == pmap_kernel())
                panic("pmap_lev1map_destroy: got kernel pmap");
 #endif
 
-       ptpa = ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap->pm_lev1map);
-
        /*
         * Go back to referencing the global kernel_lev1map.
         */
@@ -3603,7 +3600,79 @@
        /*
         * Free the old level 1 page table page.
         */
-       pmap_physpage_free(ptpa);
+       pool_cache_put(&pmap_l1pt_cache, l1pt);
+}
+
+/*
+ * pmap_l1pt_ctor:
+ *
+ *     Pool cache constructor for L1 PT pages.
+ */
+int
+pmap_l1pt_ctor(void *arg, void *object, int flags)
+{
+       pt_entry_t *l1pt = object, pte;
+       int i;
+
+       /*
+        * Initialize the new level 1 table by zeroing the
+        * user portion and copying the kernel mappings into
+        * the kernel portion.
+        */
+       for (i = 0; i < l1pte_index(VM_MIN_KERNEL_ADDRESS); i++)
+               l1pt[i] = 0;
+
+       for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS);
+            i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++)
+               l1pt[i] = kernel_lev1map[i];
+
+       /*
+        * Now, map the new virtual page table.  NOTE: NO ASM!
+        */
+       pte = ((ALPHA_K0SEG_TO_PHYS((vaddr_t) l1pt) >> PGSHIFT) << PG_SHIFT) |
+           PG_V | PG_KRE | PG_KWE;
+       l1pt[l1pte_index(VPTBASE)] = pte;
+
+       return (0);
+}
+
+/*
+ * pmap_l1pt_alloc:
+ *
+ *     Page alloctor for L1 PT pages.
+ */
+void *
+pmap_l1pt_alloc(unsigned long sz, int flags, int mtype)
+{
+       paddr_t ptpa;
+
+       /*
+        * Attempt to allocate a free page.
+        */
+       if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE) {
+#if 0
+               /*
+                * Yow!  No free pages!  Try to steal a PT page from
+                * another pmap!
+                */
+               if (pmap_ptpage_steal(pmap, PGU_L1PT, &ptpa) == FALSE)
+#endif
+                       return (NULL);
+       }
+
+       return ((void *) ALPHA_PHYS_TO_K0SEG(ptpa));
+}
+
+/*
+ * pmap_l1pt_free:
+ *
+ *     Page freer for L1 PT pages.
+ */
+void
+pmap_l1pt_free(void *v, unsigned long sz, int mtype)
+{
+
+       pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v));
 }
 
 /*



Home | Main Index | Thread Index | Old Index