Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sh5/sh5 Another big round of pmap fixes/cleanup:



details:   https://anonhg.NetBSD.org/src/rev/2c3497a02f23
branches:  trunk
changeset: 536182:2c3497a02f23
user:      scw <scw%NetBSD.org@localhost>
date:      Tue Sep 10 12:42:03 2002 +0000

description:
Another big round of pmap fixes/cleanup:

 - Use the PMAP_ASID_* constants from pmap.h

 - Track pmap_pvo_{enter,remove}() depth in the same way as mpc6xx's pmap
   (on which this pmap was originally based).

 - Some misc. tidying up and added commentary.

 - Use the VA/KVA to select whether to use the IPT or PTEG instead of
   checking which pmap is being operated on.

 - Add a handy DDB-callable function which will scan the kernel IPT
   looking for inconsitencies.

 - Finally, when unmapping a pool page, purge the data cache for the
   page. This permits write-back caching to be enabled for kernel
   text/data.

diffstat:

 sys/arch/sh5/sh5/pmap.c |  406 ++++++++++++++++++++++++++++++++---------------
 1 files changed, 271 insertions(+), 135 deletions(-)

diffs (truncated from 807 to 300 lines):

diff -r e93ea18efaec -r 2c3497a02f23 sys/arch/sh5/sh5/pmap.c
--- a/sys/arch/sh5/sh5/pmap.c   Tue Sep 10 12:33:44 2002 +0000
+++ b/sys/arch/sh5/sh5/pmap.c   Tue Sep 10 12:42:03 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.5 2002/09/06 16:20:49 scw Exp $     */
+/*     $NetBSD: pmap.c,v 1.6 2002/09/10 12:42:03 scw Exp $     */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -129,7 +129,8 @@
 #define pmap_debugger()        panic("")
 #else
 #include <machine/db_machdep.h>
-#define        pmap_debugger() cpu_Debugger();
+#define        pmap_debugger() asm volatile("trapa r63");
+int validate_kipt(int);
 #endif
 #endif
 
@@ -285,6 +286,7 @@
 
 #define        PVO_VADDR(pvo)          ((pvo)->pvo_vaddr & SH5_PTEH_EPN_MASK)
 #define        PVO_ISEXECUTABLE(pvo)   ((pvo)->pvo_ptel & SH5_PTEL_PR_X)
+#define        PVO_ISWIRED(pvo)        ((pvo)->pvo_vaddr & PVO_WIRED)
 #define        PVO_ISMANAGED(pvo)      ((pvo)->pvo_vaddr & PVO_MANAGED)
 #define        PVO_ISWRITABLE(pvo)     ((pvo)->pvo_vaddr & PVO_WRITABLE)
 #define        PVO_PTEGIDX_GET(pvo)    ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
@@ -352,6 +354,11 @@
 
 int pmap_initialized;
 
+#ifdef PMAP_DIAG
+int pmap_pvo_enter_depth;
+int pmap_pvo_remove_depth;
+#endif
+
 
 /*
  * Returns non-zero if the given pmap is `current'.
@@ -373,8 +380,12 @@
 static __inline int
 kva_to_iptidx(vaddr_t kva)
 {
-       int idx = (int) sh5_btop(kva - SH5_KSEG1_BASE);
+       int idx;
 
+       if (kva < SH5_KSEG1_BASE)
+               return (-1);
+
+       idx = (int) sh5_btop(kva - SH5_KSEG1_BASE);
        if (idx >= KERNEL_IPT_SIZE)
                return (-1);
 
@@ -498,13 +509,14 @@
        ptel &= ~ptebit;
        pt->ptel = ptel;
 
-       if (pvo->pvo_pmap->pm_asidgen == pmap_asid_generation) {
+       if (pvo->pvo_pmap->pm_asid == PMAP_ASID_KERNEL ||
+           pvo->pvo_pmap->pm_asidgen == pmap_asid_generation) {
                /*
                 * The mapping may be cached in the TLB. Call cpu-specific
                 * code to check and invalidate if necessary.
                 */
                __cpu_tlbinv_cookie((pteh & SH5_PTEH_EPN_MASK) |
-                   pvo->pvo_pmap->pm_asid << SH5_PTEH_ASID_SHIFT,
+                   (pvo->pvo_pmap->pm_asid << SH5_PTEH_ASID_SHIFT),
                    SH5_PTEH_TLB_COOKIE(pteh));
        }
 
@@ -519,7 +531,7 @@
        ptel = pmap_kernel_ipt[idx];
        pmap_kernel_ipt[idx] &= ~ptebit;
 
-       __cpu_tlbinv((PVO_VADDR(pvo) & SH5_PTEH_EPN_MASK) | SH5_PTEH_SH,
+       __cpu_tlbinv(PVO_VADDR(pvo) | SH5_PTEH_SH,
            SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
 
        pmap_pteg_synch(ptel, pvo);
@@ -559,13 +571,14 @@
        pt->pteh = 0;
        ptel = pt->ptel;
 
-       if (pvo->pvo_pmap->pm_asidgen == pmap_asid_generation) {
+       if (pvo->pvo_pmap->pm_asid == PMAP_ASID_KERNEL ||
+           pvo->pvo_pmap->pm_asidgen == pmap_asid_generation) {
                /*
                 * The mapping may be cached in the TLB. Call cpu-specific
                 * code to check and invalidate if necessary.
                 */
                __cpu_tlbinv_cookie((pteh & SH5_PTEH_EPN_MASK) |
-                   pvo->pvo_pmap->pm_asid << SH5_PTEH_ASID_SHIFT,
+                   (pvo->pvo_pmap->pm_asid << SH5_PTEH_ASID_SHIFT),
                    SH5_PTEH_TLB_COOKIE(pteh));
        }
 
@@ -699,6 +712,8 @@
        /*
         * If we couldn't find the victim, however, then the pmap module
         * has become very confused...
+        *
+        * XXX: This panic is pointless; SR.BL is set ...
         */
        if (victim_pvo == NULL)
                panic("pmap_pte_spill: victim p-pte (%p) has no pvo entry!",pt);
@@ -923,11 +938,14 @@
 
        pool_setlowat(&pmap_mpvo_pool, 1008);
 
-       pmap_asid_next = 1;
-       pmap_asid_max = 255;
-       pmap_initialized = 1;
+       pmap_asid_next = PMAP_ASID_USER_START;
+       pmap_asid_max = SH5_PTEH_ASID_MASK;     /* XXX Should be cpu specific */
 
        pmap_pinit(pmap_kernel());
+       pmap_kernel()->pm_asid = PMAP_ASID_KERNEL;
+       pmap_kernel()->pm_asidgen = 0;
+
+       pmap_initialized = 1;
 
        splx(s);
 }
@@ -967,7 +985,7 @@
        int i;
 
        pm->pm_refs = 1;
-       pm->pm_asid = PMAP_ASID_RESERVED;
+       pm->pm_asid = PMAP_ASID_UNASSIGNED;
 
        for (i = 0; i < NPMAPS; i += VSID_NBPW) {
                static u_int pmap_vsidcontext;
@@ -1277,12 +1295,24 @@
 
 #ifdef PMAP_DIAG
        if (pm == pmap_kernel() && va < SH5_KSEG1_BASE) {
-               printf("pmap_pvo_enter: pmap_kernel() with va 0x%lx!!\n", va);
+               printf("pmap_pvo_enter: pmap_kernel() with va 0x%lx!\n", va);
+               pmap_debugger();
+       }
+
+       if (pmap_pvo_remove_depth > 0) {
+               printf("pmap_pvo_enter: pmap_pvo_remove active, for va 0x%lx\n",
+                   va);
                pmap_debugger();
        }
+       if (pmap_pvo_enter_depth) {
+               printf("pmap_pvo_enter: called recursively for va 0x%lx\n", va);
+               pmap_debugger();
+       }
+       pmap_pvo_enter_depth++;
 #endif
 
-       if (pm != pmap_kernel()) {
+       if (va < SH5_KSEG1_BASE) {
+               KDASSERT(va < SH5_KSEG0_BASE);
                idx = va_to_pteg(pm->pm_vsid, va);
                pvo_table_head = &pmap_upvo_table[idx];
        } else {
@@ -1306,8 +1336,11 @@
                 * protecton status. This can happen as part of tracking
                 * page modification.
                 */
+#if 0
+               KDASSERT(PVO_ISWIRED(pvo) == 0);
                if (PVO_ISWRITABLE(pvo) && flags & VM_PROT_WRITE)
                        ptel |= SH5_PTEL_PR_W;
+#endif
                pmap_pvo_remove(pvo, idx);
        }
 
@@ -1318,15 +1351,18 @@
         */
        pvo = pool_get(&pmap_mpvo_pool, poolflags);
 
+       s = splhigh();
+
        if (pvo == NULL) {
                if ((flags & PMAP_CANFAIL) == 0)
                        panic("pmap_pvo_enter: failed");
-
+#ifdef PMAP_DIAG
+               pmap_pvo_enter_depth--;
+#endif
+               splx(s);
                return (ENOMEM);
        }
 
-       s = splhigh();
-
        ptel |= (ptel_t) (pa & SH5_PTEL_PPN_MASK);
        pvo->pvo_vaddr = va;
        pvo->pvo_pmap = pm;
@@ -1349,7 +1385,7 @@
 
        pvo->pvo_pmap->pm_stats.resident_count++;
 
-       if (pm != pmap_kernel()) {
+       if (va < SH5_KSEG0_BASE) {
                /*
                 * We hope this succeeds but it isn't required.
                 */
@@ -1366,6 +1402,9 @@
                    va, (u_long)ptel, idx));
        }
 
+#ifdef PMAP_DIAG
+       pmap_pvo_enter_depth--;
+#endif
        splx(s);
 
        return (0);
@@ -1375,9 +1414,20 @@
 pmap_pvo_remove(struct pvo_entry *pvo, int ptegidx)
 {
 
-       if (pvo->pvo_pmap != pmap_kernel()) {
+#ifdef PMAP_DIAG
+       if (pmap_pvo_remove_depth > 0) {
+               printf("pmap_pvo_remove: called recusively, for va 0x%lx\n",
+                   PVO_VADDR(pvo));
+               pmap_debugger();
+       }
+       pmap_pvo_remove_depth++;
+#endif
+
+       if (PVO_VADDR(pvo) < SH5_KSEG1_BASE) {
                volatile pte_t *pt;
 
+               KDASSERT(PVO_VADDR(pvo) < SH5_KSEG0_BASE);
+
                /* 
                 * If there is an active pte entry, we need to deactivate it
                 * (and save the ref & chg bits).
@@ -1389,13 +1439,6 @@
                        PVO_PTEGIDX_CLR(pvo);
                }
        } else {
-#ifdef PMAP_DIAG
-               if (PVO_VADDR(pvo) < SH5_KSEG1_BASE) {
-                       printf("pmap_pvo_remove: pmap_kernel() va 0x%lx!!\n",
-                           PVO_VADDR(pvo));
-                       pmap_debugger();
-               }
-#endif
                pvo->pvo_ptel |=
                    pmap_pa_unmap_kva(pvo->pvo_vaddr) & PVO_REFMOD_MASK;
        }
@@ -1404,7 +1447,7 @@
         * Update our statistics
         */
        pvo->pvo_pmap->pm_stats.resident_count--;
-       if (pvo->pvo_vaddr & PVO_WIRED)
+       if (PVO_ISWIRED(pvo))
                pvo->pvo_pmap->pm_stats.wired_count--;
 
        /*
@@ -1432,6 +1475,10 @@
        LIST_REMOVE(pvo, pvo_olink);
 
        pool_put(&pmap_mpvo_pool, pvo);
+
+#ifdef PMAP_DIAG
+       pmap_pvo_remove_depth--;
+#endif
 }
 
 int
@@ -1441,6 +1488,7 @@
        struct vm_page *pg;
        ptel_t ptel;
        int error;
+       int s;
 
        PMPRINTF((
            "pmap_enter: %p: va=0x%lx, pa=0x%lx, prot=0x%x, flags=0x%x\n",
@@ -1480,23 +1528,22 @@
                if (va <= VM_MAXUSER_ADDRESS)
                        ptel |= SH5_PTEL_PR_U;
 
-               /* Pre-load mod/ref status according to the hint in `flags' */
-               if (flags & VM_PROT_WRITE) {
+               /*
+                * Pre-load mod/ref status according to the hint in `flags'.
+                *
+                * Note that managed pages are initially read-only, unless
+                * the hint indicates they are writable. This allows us to
+                * track page modification status by taking a write-protect
+                * fault later on.
+                */
+               if (flags & VM_PROT_WRITE)
                        ptel |= SH5_PTEL_R | SH5_PTEL_M | SH5_PTEL_PR_W;
-                       pmap_attr_save(pg, SH5_PTEL_R | SH5_PTEL_M);
-               } else
-               if (flags & VM_PROT_ALL) {
+               else
+               if (flags & VM_PROT_ALL)
                        ptel |= SH5_PTEL_R;
-                       pmap_attr_save(pg, SH5_PTEL_R);
-               }



Home | Main Index | Thread Index | Old Index