Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sh5 Minor pmap rototil:



details:   https://anonhg.NetBSD.org/src/rev/39adc0513c43
branches:  trunk
changeset: 537659:39adc0513c43
user:      scw <scw%NetBSD.org@localhost>
date:      Fri Oct 04 09:17:57 2002 +0000

description:
Minor pmap rototil:
 - Track unmanaged mappings of RAM more closely by allocating a pvo
   for them. This allows us to check more accurately for multiple
   cache-mode-incompatible mappings.

 - As part of the above, implement pmap_steal_memory(). This has the
   beneficial side-effect of moving a fair chunk of kernel data
   structures into KSEG0.

diffstat:

 sys/arch/sh5/include/pmap.h |    6 +-
 sys/arch/sh5/sh5/pmap.c     |  399 ++++++++++++++++++++++++++++---------------
 2 files changed, 264 insertions(+), 141 deletions(-)

diffs (truncated from 829 to 300 lines):

diff -r c142546a6722 -r 39adc0513c43 sys/arch/sh5/include/pmap.h
--- a/sys/arch/sh5/include/pmap.h       Fri Oct 04 08:48:35 2002 +0000
+++ b/sys/arch/sh5/include/pmap.h       Fri Oct 04 09:17:57 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.10 2002/09/28 10:53:57 scw Exp $    */
+/*     $NetBSD: pmap.h,v 1.11 2002/10/04 09:17:57 scw Exp $    */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -78,6 +78,7 @@
 typedef struct pmap *pmap_t;
 
 #define        PMAP_NC         0x1000
+#define        PMAP_UNMANAGED  0x2000
 
 extern struct pmap kernel_pmap_store;
 #define        pmap_kernel()   (&kernel_pmap_store)
@@ -91,6 +92,9 @@
 #define        PMAP_MAP_POOLPAGE(p)    pmap_map_poolpage((p))
 #define        PMAP_UNMAP_POOLPAGE(v)  pmap_unmap_poolpage((v))
 
+#define        PMAP_STEAL_MEMORY
+extern vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
+
 #define pmap_clear_modify(pg)          (pmap_clear_bit((pg), SH5_PTEL_M))
 #define        pmap_clear_reference(pg)        (pmap_clear_bit((pg), SH5_PTEL_R))
 #define        pmap_is_modified(pg)            (pmap_query_bit((pg), SH5_PTEL_M))
diff -r c142546a6722 -r 39adc0513c43 sys/arch/sh5/sh5/pmap.c
--- a/sys/arch/sh5/sh5/pmap.c   Fri Oct 04 08:48:35 2002 +0000
+++ b/sys/arch/sh5/sh5/pmap.c   Fri Oct 04 09:17:57 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.13 2002/10/02 12:19:38 scw Exp $    */
+/*     $NetBSD: pmap.c,v 1.14 2002/10/04 09:17:58 scw Exp $    */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -138,6 +138,16 @@
 #ifdef DEBUG
 static int pmap_debug = 0;
 #define        PMPRINTF(x)     do { if (pmap_debug) printf x; } while (0)
+static const char * PMSTR(pmap_t);
+static const char *
+PMSTR(pmap_t pm)
+{
+       static char pm_str[32];
+       if (pm == pmap_kernel())
+               return("KERNEL");
+       sprintf(pm_str, "%p", pm);
+       return (pm_str);
+}
 #else
 #define        PMPRINTF(x)
 #endif
@@ -344,10 +354,28 @@
            LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);
 
 static struct pool pmap_pool;     /* pool of pmap structures */
+static struct pool pmap_upvo_pool; /* pool of pvo entries for unmanaged pages */
 static struct pool pmap_mpvo_pool; /* pool of pvo entries for managed pages */
 
-
-void pmap_bootstrap(vaddr_t, struct mem_region *);
+/*
+ * We keep a cache of unmanaged pages to be used for pvo entries for
+ * unmanaged pages.
+ */
+struct pvo_page {
+       SIMPLEQ_ENTRY(pvo_page) pvop_link;
+};
+SIMPLEQ_HEAD(pvop_head, pvo_page);
+struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
+u_long pmap_upvop_free;
+
+static void *pmap_pool_ualloc(struct pool *, int);
+static void pmap_pool_ufree(struct pool *, void *);
+
+static struct pool_allocator pmap_pool_uallocator = {
+       pmap_pool_ualloc, pmap_pool_ufree, 0,
+};
+
+void pmap_bootstrap(vaddr_t, paddr_t, struct mem_region *);
 volatile pte_t *pmap_pte_spill(u_int, vsid_t, vaddr_t);
 
 static volatile pte_t * pmap_pvo_to_pte(const struct pvo_entry *, int);
@@ -356,7 +384,7 @@
 static void pmap_release(pmap_t);
 static void pmap_pa_map_kva(vaddr_t, paddr_t, ptel_t);
 static ptel_t pmap_pa_unmap_kva(vaddr_t, ptel_t *);
-static int pmap_pvo_enter(pmap_t, struct pvo_head *,
+static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
        vaddr_t, paddr_t, ptel_t, int);
 static void pmap_pvo_remove(struct pvo_entry *, int);
 static void pmap_remove_update(struct pvo_head *, int);
@@ -387,7 +415,10 @@
 static vaddr_t pmap_copy_page_src_kva;
 static vaddr_t pmap_copy_page_dst_kva;
 static vaddr_t pmap_kva_avail_start;
+static vaddr_t pmap_device_kva_start;
+#define        PMAP_BOOTSTRAP_DEVICE_KVA       (NBPG * 512)
 vaddr_t vmmap;
+paddr_t pmap_kseg0_pa;
 
 int pmap_initialized;
 
@@ -526,7 +557,8 @@
 pmap_pteg_synch(ptel_t ptel, struct pvo_entry *pvo)
 {
 
-       pvo->pvo_ptel |= (ptel & SH5_PTEL_RM_MASK);
+       if (PVO_ISMANAGED(pvo))
+               pvo->pvo_ptel |= (ptel & SH5_PTEL_RM_MASK);
 }
 
 /*
@@ -917,7 +949,7 @@
 }
 
 void
-pmap_bootstrap(vaddr_t avail, struct mem_region *mr)
+pmap_bootstrap(vaddr_t avail, paddr_t kseg0base, struct mem_region *mr)
 {
        struct mem_region *mp;
        psize_t size;
@@ -926,6 +958,8 @@
        uvmexp.pagesize = NBPG;
        uvm_setpagesize();
 
+       pmap_kseg0_pa = kseg0base;
+
        for (mp = mr; mp->mr_size; mp++)
                physmem += btoc(mp->mr_size);
 
@@ -985,6 +1019,7 @@
         */
        size = sh5_round_page(MSGBUFSIZE);
        initmsgbuf((caddr_t)avail, size);
+       *((vaddr_t *)0xc0000000) = avail;
 
        avail = sh5_round_page(avail + size);
        mr[0].mr_start += size;
@@ -1014,8 +1049,22 @@
        pmap_copy_page_src_kva = pmap_zero_page_kva + NBPG;
        pmap_copy_page_dst_kva = pmap_copy_page_src_kva + NBPG;
        vmmap = pmap_copy_page_dst_kva + NBPG;
-
-       pmap_kva_avail_start = vmmap + NBPG;
+       pmap_device_kva_start = vmmap + NBPG;
+
+       pmap_kva_avail_start = pmap_device_kva_start +
+           PMAP_BOOTSTRAP_DEVICE_KVA;
+
+       pmap_asid_next = PMAP_ASID_USER_START;
+       pmap_asid_max = SH5_PTEH_ASID_MASK;     /* XXX Should be cpu specific */
+
+       pmap_pinit(pmap_kernel());
+       pmap_kernel()->pm_asid = PMAP_ASID_KERNEL;
+       pmap_kernel()->pm_asidgen = 0;
+
+       pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
+           0, 0, 0, "pmap_upvopl", &pmap_pool_uallocator);
+
+       pool_setlowat(&pmap_upvo_pool, 252);
 }
 
 /*
@@ -1029,79 +1078,32 @@
 pmap_map_device(paddr_t pa, u_int len)
 {
        vaddr_t va, rv;
-       u_int l = len;
-#if 0
-       ptel_t *ptelp;
        ptel_t ptel;
        int idx;
-#endif
-
-       l = len = sh5_round_page(len);
+
+       len = sh5_round_page(len);
 
        if (pmap_initialized == 0) {
-               /*
-                * Steal some KVA
-                */
-               rv = va = pmap_kva_avail_start;
+               rv = va = pmap_device_kva_start;
+               if ((va + len) >= pmap_kva_avail_start)
+                       panic("pmap_map_device: out of device bootstrap kva");
+               pmap_device_kva_start += len;
        } else
                rv = va = uvm_km_valloc(kernel_map, len);
 
-#if 1
        while (len) {
-               pmap_kenter_pa(va, pa, VM_PROT_ALL);
+               idx = kva_to_iptidx(va);
+
+               ptel = SH5_PTEL_CB_DEVICE | SH5_PTEL_PR_R | SH5_PTEL_PR_W;
+               ptel |= (ptel_t)(pa & SH5_PTEL_PPN_MASK);
+
+               pmap_kernel_ipt[idx] = ptel;
+
                va += NBPG;
                pa += NBPG;
                len -= NBPG;
        }
 
-       if (pmap_initialized == 0)
-               pmap_kva_avail_start += l;
-#else
-       /*
-        * Get the index into pmap_kernel_ipt.
-        */
-       if ((idx = kva_to_iptidx(va)) < 0)
-               panic("pmap_map_device: Invalid KVA %p", (void *)va);
-
-       ptelp = &pmap_kernel_ipt[idx];
-       ptel = (ptel_t)(pa & SH5_PTEL_PPN_MASK) |
-           SH5_PTEL_CB_DEVICE | SH5_PTEL_PR_R | SH5_PTEL_PR_W;
-
-       /*
-        * We optimise the page size for these mappings according to the
-        * requested length. This helps reduce TLB thrashing for large
-        * regions of device memory, for example.
-        */
-       while (len) {
-               ptel_t pgsize, pgend, mask;
-               if (len >= 0x20000000) {
-                       pgsize = SH5_PTEL_SZ_512MB;     /* Impossible?!?! */
-                       pgend = ptel + 0x20000000;
-                       mask = SH5_PTEL_PPN_MASK & ~(0x20000000 - 1);
-               } else
-               if (len >= 0x100000) {
-                       pgsize = SH5_PTEL_SZ_1MB;
-                       pgend = ptel + 0x100000;
-                       mask = SH5_PTEL_PPN_MASK & ~(0x100000 - 1);
-               } else
-               if (len >= 0x10000) {
-                       pgsize = SH5_PTEL_SZ_64KB;
-                       pgend = ptel + 0x10000;
-                       mask = SH5_PTEL_PPN_MASK & ~(0x10000 - 1);
-               } else {
-                       pgsize = SH5_PTEL_SZ_4KB;
-                       pgend = ptel + 0x1000;
-                       mask = SH5_PTEL_PPN_MASK & ~(0x1000 - 1);
-               }
-
-               while (ptel < pgend && len) {
-                       *ptelp++ = (ptel & mask) | pgsize;
-                       ptel += NBPG;
-                       len -= NBPG;
-               }
-       }
-#endif
-
        return (rv);
 }
 
@@ -1148,13 +1150,6 @@
 
        pool_setlowat(&pmap_mpvo_pool, 1008);
 
-       pmap_asid_next = PMAP_ASID_USER_START;
-       pmap_asid_max = SH5_PTEH_ASID_MASK;     /* XXX Should be cpu specific */
-
-       pmap_pinit(pmap_kernel());
-       pmap_kernel()->pm_asid = PMAP_ASID_KERNEL;
-       pmap_kernel()->pm_asidgen = 0;
-
        pmap_initialized = 1;
 
        splx(s);
@@ -1359,7 +1354,7 @@
        if (!pmap_initialized)
                panic("pmap_copy_page: pmap_initialized is false!");
 
-       PMPRINTF(("pmap_copy_page: copying 0x%08lx -> 0x%08lx\n", src, dst));
+       PMPRINTF(("pmap_copy_page: copying 0x%lx -> 0x%lx\n", src, dst));
 
        pmap_pa_map_kva(pmap_copy_page_src_kva, src, 0);
        pmap_pa_map_kva(pmap_copy_page_dst_kva, dst, SH5_PTEL_PR_W);
@@ -1499,7 +1494,7 @@
  * This returns whether this is the first mapping of a page.
  */
 static int
-pmap_pvo_enter(pmap_t pm, struct pvo_head *pvo_head,
+pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
        vaddr_t va, paddr_t pa, ptel_t ptel, int flags)
 {
        struct pvo_head *pvo_table_head;
@@ -1543,19 +1538,18 @@
         * Remove any existing mapping for this virtual page.
         */
        LIST_FOREACH(pvo, pvo_table_head, pvo_olink) {
-               if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
+               if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
+                       pmap_pvo_remove(pvo, idx);
                        break;
+               }
        }



Home | Main Index | Thread Index | Old Index