Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/powerpc/mpc6xx Disable interrupts when dealing with...



details:   https://anonhg.NetBSD.org/src/rev/7428a44fd9e5
branches:  trunk
changeset: 511624:7428a44fd9e5
user:      matt <matt%NetBSD.org@localhost>
date:      Sat Jun 23 03:17:32 2001 +0000

description:
Disable interrupts when dealing with pvo lists.  clean up some things.
Keep track of executable ness of pages.  Of sync icache executable pages.

diffstat:

 sys/arch/powerpc/mpc6xx/pmap.c |  257 +++++++++++++++++++++++-----------------
 1 files changed, 149 insertions(+), 108 deletions(-)

diffs (truncated from 619 to 300 lines):

diff -r 0106856dc72d -r 7428a44fd9e5 sys/arch/powerpc/mpc6xx/pmap.c
--- a/sys/arch/powerpc/mpc6xx/pmap.c    Sat Jun 23 03:16:11 2001 +0000
+++ b/sys/arch/powerpc/mpc6xx/pmap.c    Sat Jun 23 03:17:32 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.15 2001/06/21 22:05:50 matt Exp $   */
+/*     $NetBSD: pmap.c,v 1.16 2001/06/23 03:17:32 matt Exp $   */
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -155,9 +155,11 @@
 #define        PVO_PTEGIDX_MASK        0x0007          /* which PTEG slot */
 #define        PVO_PTEGIDX_VALID       0x0008          /* slot is valid */
 #define        PVO_WIRED               0x0010          /* PVO entry is wired */
-#define        PVO_MANAGED             0x0020          /* PVO entyy for managed page */
+#define        PVO_MANAGED             0x0020          /* PVO e. for managed page */
+#define        PVO_EXECUTABLE          0x0040          /* PVO e. for executable page */
 };
 #define        PVO_VADDR(pvo)          ((pvo)->pvo_vaddr & ~ADDR_POFF)
+#define        PVO_ISEXECUTABLE(pvo)   ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
 #define        PVO_PTEGIDX_GET(pvo)    ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
 #define        PVO_PTEGIDX_ISSET(pvo)  ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
 #define        PVO_PTEGIDX_CLR(pvo)    \
@@ -225,7 +227,7 @@
 STATIC void pmap_pa_unmap(struct pvo_entry *, pte_t *, int *);
 STATIC void tlbia(void);
 
-STATIC void pmap_syncicache(paddr_t);
+STATIC void pmap_syncicache(paddr_t, psize_t);
 STATIC void pmap_release (pmap_t);
 STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
 
@@ -247,8 +249,18 @@
 #define        TLBSYNC()       __asm __volatile("tlbsync")
 #define        SYNC()          __asm __volatile("sync")
 #define        EIEIO()         __asm __volatile("eieio")
+#define        MFMSR()         mfmsr()
+#define        MTMSR(psl)      __asm __volatile("mtmsr %0" :: "r"(psl))
 #define        MFTB()          mftb()
 
+static __inline u_int32_t
+mfmsr(void)
+{
+       u_int psl;
+       __asm __volatile("mfmsr %0" : "=r"(psl) : );
+       return psl;
+}
+
 static __inline u_int
 mftb(void)
 {
@@ -256,6 +268,23 @@
        __asm __volatile("mftb %0" : "=r"(tb) : );
        return tb;
 }
+
+static __inline u_int32_t
+pmap_interrupts_off(void)
+{
+       u_int32_t msr = MFMSR();
+       if (msr & PSL_EE)
+               MTMSR(msr & ~PSL_EE);
+       return msr;
+}
+
+static void
+pmap_interrupts_restore(u_int32_t msr)
+{
+       if (msr & PSL_EE)
+               MTMSR(msr);
+}
+
 /*
  * These small routines may have to be replaced,
  * if/when we support processors other that the 604.
@@ -571,6 +600,7 @@
        struct pvo_entry *source_pvo, *victim_pvo;
        struct pvo_entry *pvo;
        int ptegidx, i;
+       u_int32_t msr;
        sr_t sr;
        volatile pteg_t *pteg;
        volatile pte_t *pt;
@@ -585,8 +615,8 @@
         *
         * Use low bits of timebase as random generator
         */
-       __asm __volatile ("mftb %0" : "=r"(i));
        pteg = &pmap_pteg_table[ptegidx];
+       i = MFTB();
        pt = &pteg->pt[i & 7];
 
        source_pvo = NULL;
@@ -602,7 +632,9 @@
                         * Now found an entry to be spilled into the pteg.
                         * The PTE is now be valid, so we know it's active;
                         */
+                       msr = pmap_interrupts_off();
                        i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
+                       pmap_interrupts_restore(msr);
                        if (i >= 0) {
                                PVO_PTEGIDX_SET(pvo, i);
                                pmap_pte_overflow--;
@@ -636,11 +668,14 @@
         * we lose any ref/chg bit changes contained in the TLB
         * entry.
         */
-       pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
-       PVO_PTEGIDX_CLR(victim_pvo);
+       source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
 
-       source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
+       msr = pmap_interrupts_off();
+       pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
        pmap_pte_set(pt, &source_pvo->pvo_pte);
+       pmap_interrupts_restore(msr);
+
+       PVO_PTEGIDX_CLR(victim_pvo);
        PVO_PTEGIDX_SET(source_pvo, i);
        pmap_pte_replacements++;
        return 1;
@@ -969,7 +1004,7 @@
 #ifdef DIAGNOSTIC
        if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
                panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
-                   "pvo but valid pte index", pvo);
+                   "pvo but no valid pte index", pvo);
        }
        if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
                panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
@@ -1126,7 +1161,7 @@
 }
 
 void
-pmap_syncicache(paddr_t pa)
+pmap_syncicache(paddr_t pa, psize_t len)
 {
        static int depth;
        static u_int calls;
@@ -1141,7 +1176,7 @@
                        pmap_pvo_syncicache = pmap_rkva_alloc(VM_PROT_READ|VM_PROT_WRITE);
                calls++;
                pmap_pa_map(pmap_pvo_syncicache, pa, &saved_pte, &depth);
-               __syncicache((void *)PVO_VADDR(pmap_pvo_syncicache), NBPG);
+               __syncicache((void *)PVO_VADDR(pmap_pvo_syncicache), len);
                pmap_pa_unmap(pmap_pvo_syncicache, &saved_pte, &depth);
                return;
        }
@@ -1285,6 +1320,7 @@
        vaddr_t va, paddr_t pa, u_int pte_lo, int flags)
 {
        struct pvo_entry *pvo;
+       u_int32_t msr;
        sr_t sr;
        int first;
        int ptegidx;
@@ -1303,6 +1339,7 @@
        sr = va_to_sr(pm->pm_sr, va);
        ptegidx = va_to_pteg(sr, va);
 
+       msr = pmap_interrupts_off();
        /*
         * Remove any existing mapping for this page.  Reuse the
         * pvo entry if there a mapping.
@@ -1330,19 +1367,15 @@
        }
 
        /*
-        * Remember is the list was empty and therefore will be
-        * the first item.
-        */
-       first = LIST_FIRST(pvo_head) == NULL;
-
-       /*
         * If we aren't overwriting an mapping, try to allocate
         */
        if (pvo == NULL) {
                int poolflags = PR_NOWAIT;
                if ((flags & PMAP_CANFAIL) == 0)
                        poolflags |= PR_URGENT;
+               pmap_interrupts_restore(msr);
                pvo = pool_get(pl, poolflags);
+               msr = pmap_interrupts_off();
                if (pvo == NULL) {
 #if 0
                        pvo = pmap_pvo_reclaim(pm);
@@ -1367,11 +1400,20 @@
 #endif
        }
        pvo->pvo_vaddr &= ~ADDR_POFF;
+       if (flags & VM_PROT_EXECUTE)
+               pvo->pvo_vaddr |= PVO_EXECUTABLE;
        if (flags & PMAP_WIRED)
                pvo->pvo_vaddr |= PVO_WIRED;
        if (pvo_head != &pmap_pvo_kunmanaged)
                pvo->pvo_vaddr |= PVO_MANAGED; 
        pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
+
+       /*
+        * Remember is the list was empty and therefore will be
+        * the first item.
+        */
+       first = LIST_FIRST(pvo_head) == NULL;
+
        LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
        if (pvo->pvo_pte.pte_lo & PMAP_WIRED)
                pvo->pvo_pmap->pm_stats.wired_count++;
@@ -1390,12 +1432,13 @@
        } else {
                pmap_pte_overflow++;
 #if 0
-               if ((flags & VM_PROT_ALL) != VM_PROT_NONE)
+               if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) != VM_PROT_NONE)
                        pmap_pte_evict(pvo, ptegidx, MFTB() & 7);
 #endif
        }
        PMAP_PVO_CHECK(pvo);            /* sanity check */
        pmap_pvo_enter_depth--;
+       pmap_interrupts_restore(msr);
        return first ? ENOENT : 0;
 }
 
@@ -1467,7 +1510,7 @@
        struct mem_region *mp;
        struct pvo_head *pvo_head;
        struct pool *pl;
-       u_int pte_lo;
+       u_int32_t pte_lo;
        int s;
        int error;
        u_int pvo_flags;
@@ -1510,6 +1553,12 @@
                                va, pa);
        }
 #endif
+
+       /*
+        * We need to know if this page can be executable
+        */
+       flags |= (prot & VM_PROT_EXECUTE);
+
        /*
         * Record mapping for later back-translation and pte spilling.
         * This will overwrite any existing mapping.
@@ -1523,7 +1572,7 @@
                 * Flush the real memory from the cache.
                 */
                if (((prot|flags) & VM_PROT_EXECUTE) && (pte_lo & PTE_I) == 0) {
-                       pmap_syncicache(pa);
+                       pmap_syncicache(pa, NBPG);
                }
                error = 0;
        }
@@ -1535,7 +1584,7 @@
 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
 {
        struct mem_region *mp;
-       u_int pte_lo;
+       u_int32_t pte_lo;
        int error;
        int s;
 
@@ -1570,7 +1619,7 @@
         * Flush the real memory from the instruction cache.
         */
        if ((prot & VM_PROT_EXECUTE) && (pte_lo & (PTE_I|PTE_G)) == 0) {
-               pmap_syncicache(pa);
+               pmap_syncicache(pa, NBPG);
        }
 }
 
@@ -1592,14 +1641,18 @@
 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
 {
        struct pvo_entry *pvo;
+       u_int32_t msr;
        int pteidx;
        int s;
 
        for (; va < endva; va += PAGE_SIZE) {
                s = splvm();
+               msr = pmap_interrupts_off();
                pvo = pmap_pvo_find_va(pm, va, &pteidx);
-               if (pvo != NULL)
+               if (pvo != NULL) {
                        pmap_pvo_remove(pvo, pteidx, TRUE);
+               }
+               pmap_interrupts_restore(msr);
                splx(s);
        }
 }
@@ -1611,9 +1664,11 @@
 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
 {
        struct pvo_entry *pvo;
+       u_int32_t msr;
        int s;
        
        s = splvm();
+       msr = pmap_interrupts_off();
        pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);



Home | Main Index | Thread Index | Old Index