Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-1-6]: src/sys/arch/arm/arm32 Pull up revision 1.111 (requested by...



details:   https://anonhg.NetBSD.org/src/rev/75f61cd50f9d
branches:  netbsd-1-6
changeset: 529667:75f61cd50f9d
user:      he <he%NetBSD.org@localhost>
date:      Sat Dec 07 20:44:23 2002 +0000

description:
Pull up revision 1.111 (requested by thorpej in ticket #714):
  Use a pool cache for PT-PTs.

diffstat:

 sys/arch/arm/arm32/pmap.c |  176 ++++++++++++++++++++++++++++++++-------------
 1 files changed, 123 insertions(+), 53 deletions(-)

diffs (truncated from 316 to 300 lines):

diff -r b9196da5ff28 -r 75f61cd50f9d sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sat Dec 07 20:43:02 2002 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sat Dec 07 20:44:23 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.97.4.4 2002/12/07 20:43:02 he Exp $ */
+/*     $NetBSD: pmap.c,v 1.97.4.5 2002/12/07 20:44:23 he Exp $ */
 
 /*
  * Copyright (c) 2002 Wasabi Systems, Inc.
@@ -143,7 +143,7 @@
 #include <machine/param.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97.4.4 2002/12/07 20:43:02 he Exp $");        
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97.4.5 2002/12/07 20:44:23 he Exp $");        
 #ifdef PMAP_DEBUG
 #define        PDEBUG(_lev_,_stat_) \
        if (pmap_debug_level >= (_lev_)) \
@@ -195,6 +195,23 @@
 
 struct pool pmap_pmap_pool;
 
+/*
+ * pool/cache that PT-PT's are allocated from
+ */
+
+struct pool pmap_ptpt_pool;
+struct pool_cache pmap_ptpt_cache;
+u_int pmap_ptpt_cache_generation;
+
+static void *pmap_ptpt_page_alloc(struct pool *, int);
+static void pmap_ptpt_page_free(struct pool *, void *);
+
+struct pool_allocator pmap_ptpt_allocator = {
+       pmap_ptpt_page_alloc, pmap_ptpt_page_free,
+};
+
+static int pmap_ptpt_ctor(void *, void *, int);
+
 static pt_entry_t *csrc_pte, *cdst_pte;
 static vaddr_t csrcp, cdstp;
 
@@ -285,9 +302,6 @@
 static int pmap_clean_page __P((struct pv_entry *, boolean_t));
 static void pmap_remove_all __P((struct vm_page *));
 
-static int pmap_alloc_ptpt(struct pmap *);
-static void pmap_free_ptpt(struct pmap *);
-
 static struct vm_page  *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
 static struct vm_page  *pmap_get_ptp __P((struct pmap *, vaddr_t));
 __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
@@ -1167,7 +1181,16 @@
 
        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                  &pool_allocator_nointr);
-       
+
+       /*
+        * initialize the PT-PT pool and cache.
+        */
+
+       pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
+                 &pmap_ptpt_allocator);
+       pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
+                       pmap_ptpt_ctor, NULL, NULL);
+
        cpu_dcache_wbinv_all();
 }
 
@@ -1387,40 +1410,38 @@
 }
 
 /*
- * pmap_alloc_ptpt:
+ * pmap_ptpt_page_alloc:
  *
- *     Allocate the page table that maps the PTE array.
+ *     Back-end page allocator for the PT-PT pool.
  */
-static int
-pmap_alloc_ptpt(struct pmap *pmap)
+static void *
+pmap_ptpt_page_alloc(struct pool *pp, int flags)
 {
        struct vm_page *pg;
        pt_entry_t *pte;
-
-       KASSERT(pmap->pm_vptpt == 0);
-
-       pmap->pm_vptpt = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
-       if (pmap->pm_vptpt == 0) {
-               PDEBUG(0,
-                   printf("pmap_alloc_ptpt: no KVA for PTPT\n"));
-               return (ENOMEM);
-       }
+       vaddr_t va;
+
+       /* XXX PR_WAITOK? */
+       va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
+       if (va == 0)
+               return (NULL);
 
        for (;;) {
                pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
                if (pg != NULL)
                        break;
+               if ((flags & PR_WAITOK) == 0) {
+                       uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
+                       return (NULL);
+               }
                uvm_wait("pmap_ptpt");
        }
 
-       pmap->pm_pptpt = VM_PAGE_TO_PHYS(pg);
-
-       pte = vtopte(pmap->pm_vptpt);
-
+       pte = vtopte(va);
        KDASSERT(pmap_pte_v(pte) == 0);
 
-       *pte = L2_S_PROTO | pmap->pm_pptpt |
-           L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
+       *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
+            L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
 #ifdef PMAP_ALIAS_DEBUG
     {
        int s = splhigh();
@@ -1429,24 +1450,53 @@
     }
 #endif /* PMAP_ALIAS_DEBUG */
 
-       return (0);
+       return ((void *) va);
 }
 
 /*
- * pmap_free_ptpt:
+ * pmap_ptpt_page_free:
  *
- *     Free the page table that maps the PTE array.
+ *     Back-end page free'er for the PT-PT pool.
  */
 static void
-pmap_free_ptpt(struct pmap *pmap)
+pmap_ptpt_page_free(struct pool *pp, void *v)
 {
-
-       pmap_kremove(pmap->pm_vptpt, L2_TABLE_SIZE);
+       vaddr_t va = (vaddr_t) v;
+       paddr_t pa;
+
+       pa = vtophys(va);
+
+       pmap_kremove(va, L2_TABLE_SIZE);
        pmap_update(pmap_kernel());
 
-       uvm_pagefree(PHYS_TO_VM_PAGE(pmap->pm_pptpt));
-
-       uvm_km_free(kernel_map, pmap->pm_vptpt, L2_TABLE_SIZE);
+       uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+
+       uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
+}
+
+/*
+ * pmap_ptpt_ctor:
+ *
+ *     Constructor for the PT-PT cache.
+ */
+static int
+pmap_ptpt_ctor(void *arg, void *object, int flags)
+{
+       caddr_t vptpt = object;
+
+       /* Page is already zero'd. */
+
+       /*
+        * Map in kernel PTs.
+        *
+        * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
+        */
+       memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
+              (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
+                       ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
+              (KERNEL_PD_SIZE >> 2));
+
+       return (0);
 }
 
 /*
@@ -1459,9 +1509,10 @@
 static int
 pmap_allocpagedir(struct pmap *pmap)
 {
+       vaddr_t vptpt;
        paddr_t pa;
        struct l1pt *pt;
-       int error;
+       u_int gen;
 
        PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
 
@@ -1499,14 +1550,29 @@
        }
 
        /* Allocate a page table to map all the page tables for this pmap */
-       if ((error = pmap_alloc_ptpt(pmap)) != 0) {
+       KASSERT(pmap->pm_vptpt == 0);
+
+ try_again:
+       gen = pmap_ptpt_cache_generation;
+       vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
+       if (vptpt == NULL) {
+               PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
                pmap_freepagedir(pmap);
-               return (error);
+               return (ENOMEM);
        }
 
        /* need to lock this all up for growkernel */
        simple_lock(&pmaps_lock);
 
+       if (gen != pmap_ptpt_cache_generation) {
+               simple_unlock(&pmaps_lock);
+               pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
+               goto try_again;
+       }
+
+       pmap->pm_vptpt = vptpt;
+       pmap->pm_pptpt = vtophys(vptpt);
+
        /* Duplicate the kernel mappings. */
        bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
                (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
@@ -1519,15 +1585,6 @@
 
        pt->pt_flags &= ~PTFLAG_CLEAN;  /* L1 is dirty now */
 
-       /*
-        * Map the kernel page tables into the new PT map.
-        */
-       bcopy((char *)(PTE_BASE
-           + (PTE_BASE >> (PGSHIFT - 2))
-           + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
-           (char *)pmap->pm_vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
-           (KERNEL_PD_SIZE >> 2));
-
        LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
        simple_unlock(&pmaps_lock);
        
@@ -1587,18 +1644,28 @@
        }
 }
 
-
 void
 pmap_freepagedir(struct pmap *pmap)
 {
        /* Free the memory used for the page table mapping */
-       if (pmap->pm_vptpt != 0)
-               pmap_free_ptpt(pmap);
+       if (pmap->pm_vptpt != 0) {
+               /*
+                * XXX Objects freed to a pool cache must be in constructed
+                * XXX form when freed, but we don't free page tables as we
+                * XXX go, so we need to zap the mappings here.
+                *
+                * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
+                */
+               memset((caddr_t) pmap->pm_vptpt, 0,
+                      ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
+               pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
+       }
 
        /* junk the L1 page table */
        if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
                /* Add the page table to the queue */
-               SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
+               SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
+                                   pmap->pm_l1pt, pt_queue);
                ++l1pt_static_queue_count;
        } else if (l1pt_queue_count < 8) {
                /* Add the page table to the queue */
@@ -1608,7 +1675,6 @@
                pmap_free_l1pt(pmap->pm_l1pt);
 }
 
-
 /*
  * Retire the given physical map from service.
  * Should only be called if the map contains no valid mappings.
@@ -1673,10 +1739,10 @@
                uvm_pagefree(page);
        }
        simple_unlock(&pmap->pm_obj.vmobjlock);
-       
+
        /* Free the page dir */
        pmap_freepagedir(pmap);



Home | Main Index | Thread Index | Old Index