Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/powerpc Clean the icache for pages when they are en...



details:   https://anonhg.NetBSD.org/src/rev/a454943c9bf2
branches:  trunk
changeset: 524959:a454943c9bf2
user:      matt <matt%NetBSD.org@localhost>
date:      Wed Apr 03 00:12:07 2002 +0000

description:
Clean the icache for pages when they are entered as executable and before
they were either not mapped at all or mapped as non-executable.  Round
memory regions in pmap_bootstrap.

diffstat:

 sys/arch/powerpc/include/mpc6xx/pte.h |   4 +-
 sys/arch/powerpc/mpc6xx/pmap.c        |  83 ++++++++++++++++++++++++----------
 2 files changed, 61 insertions(+), 26 deletions(-)

diffs (219 lines):

diff -r 4e3c19edcc05 -r a454943c9bf2 sys/arch/powerpc/include/mpc6xx/pte.h
--- a/sys/arch/powerpc/include/mpc6xx/pte.h     Wed Apr 03 00:09:52 2002 +0000
+++ b/sys/arch/powerpc/include/mpc6xx/pte.h     Wed Apr 03 00:12:07 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pte.h,v 1.7 2002/03/02 21:36:27 kleink Exp $   */
+/*     $NetBSD: pte.h,v 1.8 2002/04/03 00:12:07 matt Exp $     */
 
 /*-
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
@@ -68,6 +68,8 @@
 #define        PTE_RW          PTE_BW
 #define        PTE_RO          PTE_BR
 
+#define        PTE_EXEC        0x00000200      /* pseudo bit in attrs; page is exec */
+
 #ifndef        _LOCORE
 typedef        struct pte pte_t;
 #endif /* _LOCORE */
diff -r 4e3c19edcc05 -r a454943c9bf2 sys/arch/powerpc/mpc6xx/pmap.c
--- a/sys/arch/powerpc/mpc6xx/pmap.c    Wed Apr 03 00:09:52 2002 +0000
+++ b/sys/arch/powerpc/mpc6xx/pmap.c    Wed Apr 03 00:12:07 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.38 2002/03/08 20:48:33 thorpej Exp $        */
+/*     $NetBSD: pmap.c,v 1.39 2002/04/03 00:12:08 matt Exp $   */
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -396,12 +396,14 @@
 #endif
 
 static __inline struct pvo_head *
-pa_to_pvoh(paddr_t pa)
+pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
 {
 #ifdef __HAVE_VM_PAGE_MD
        struct vm_page *pg;
 
        pg = PHYS_TO_VM_PAGE(pa);
+       if (pg_p != NULL)
+               *pg_p = pg;
        if (pg == NULL)
                return &pmap_pvo_unmanaged;
        return &pg->mdpage.mdpg_pvoh;
@@ -410,6 +412,8 @@
        int bank, pg;
 
        bank = vm_physseg_find(atop(pa), &pg);
+       if (pg_p != NULL)
+               *pg_p = pg;
        if (bank == -1)
                return &pmap_pvo_unmanaged;
        return &vm_physmem[bank].pmseg.pvoh[pg];
@@ -423,7 +427,7 @@
        return &pg->mdpage.mdpg_pvoh;
 #endif
 #ifdef __HAVE_PMAP_PHYSSEG
-       return pa_to_pvoh(VM_PAGE_TO_PHYS(pg));
+       return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
 #endif
 }
 
@@ -1335,7 +1339,7 @@
        }
 
        if (pvo->pvo_vaddr & PVO_MANAGED) {
-               pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN);
+               pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
        } else {
                if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
                        printf("pmap_pvo_check: pvo %p: non kernel address "
@@ -1412,7 +1416,6 @@
        struct pvo_entry *pvo;
        u_int32_t msr;
        sr_t sr;
-       int first;
        int ptegidx;
        int i;
        int poolflags = PR_NOWAIT;
@@ -1495,12 +1498,6 @@
                pvo->pvo_vaddr |= PVO_MANAGED; 
        pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
 
-       /*
-        * Remember is the list was empty and therefore will be
-        * the first item.
-        */
-       first = LIST_FIRST(pvo_head) == NULL;
-
        LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
        if (pvo->pvo_pte.pte_lo & PVO_WIRED)
                pvo->pvo_pmap->pm_stats.wired_count++;
@@ -1529,7 +1526,7 @@
        pmap_pvo_enter_depth--;
 #endif
        pmap_interrupts_restore(msr);
-       return first ? ENOENT : 0;
+       return 0;
 }
 
 void
@@ -1601,26 +1598,47 @@
 {
        struct mem_region *mp;
        struct pvo_head *pvo_head;
+       struct vm_page *pg;
        struct pool *pl;
        u_int32_t pte_lo;
        int s;
        int error;
        u_int pvo_flags;
+       u_int was_exec = 0;
 
        if (__predict_false(!pmap_initialized)) {
                pvo_head = &pmap_pvo_kunmanaged;
                pl = &pmap_upvo_pool;
                pvo_flags = 0;
+               pg = NULL;
+               was_exec = PTE_EXEC;
        } else {
-               pvo_head = pa_to_pvoh(pa);
+               pvo_head = pa_to_pvoh(pa, &pg);
                pl = &pmap_mpvo_pool;
                pvo_flags = PVO_MANAGED;
        }
 
+       /*
+        * If this is a managed page, and it's the first reference to the
+        * page clear the execness of the page.  Otherwise fetch the execness.
+        */
+       if (pg != NULL) {
+               if (LIST_EMPTY(pvo_head)) {
+                       pmap_attr_clear(pg, PTE_EXEC);
+               } else {
+                       was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
+               }
+       }
+
        DPRINTFN(ENTER,
            ("pmap_enter(0x%p, 0x%lx, 0x%lx, 0x%x, 0x%x) ",
            pm, va, pa, prot, flags));
 
+
+       /*
+        * Assume the page is cache inhibited and access is guarded unless
+        * it's in our available memory array.
+        */
        pte_lo = PTE_I | PTE_G;
        if ((flags & PMAP_NC) == 0) {
                for (mp = mem; mp->size; mp++) {
@@ -1660,14 +1678,18 @@
        error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
        splx(s);
 
-       if (error == ENOENT) {
-               /* 
-                * Flush the real memory from the cache.
-                */
-               if (((prot|flags) & VM_PROT_EXECUTE) && (pte_lo & PTE_I) == 0) {
-                       pmap_syncicache(pa, NBPG);
-               }
-               error = 0;
+       /* 
+        * Flush the real page from the instruction cache if this page is
+        * mapped executable and cacheable and was not previously mapped
+        * (or was not mapped executable).
+        */
+       if (error == 0 &&
+            (flags & VM_PROT_EXECUTE) &&
+            (pte_lo & PTE_I) == 0 &&
+           was_exec == 0) {
+               pmap_syncicache(pa, NBPG);
+               if (pg != NULL)
+                       pmap_attr_save(pg, PTE_EXEC);
        }
 
        return error;
@@ -1709,7 +1731,7 @@
        pmap_interrupts_restore(msr);
        splx(s);
 
-       if (error != 0 && error != ENOENT)
+       if (error != 0)
                panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d", va, pa, error);
 
        /* 
@@ -2643,8 +2665,10 @@
        kernelstart = trunc_page(kernelstart);
        kernelend = round_page(kernelend);
        for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
-               s = trunc_page(mp->start);
-               e = round_page(mp->start + mp->size);
+               mp->start = round_page(mp->start);
+               mp->size = trunc_page(mp->size);
+               s = mp->start;
+               e = mp->start + mp->size;
 
                DPRINTFN(BOOT,
                    ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
@@ -2657,10 +2681,19 @@
                        e = pmap_memlimit;
 
                /*
+                * Is this region empty or strange?  skip it.
+                */
+               if (e <= s) {
+                       mp->start = 0;
+                       mp->size = 0;
+                       continue;
+               }
+
+               /*
                 * Does this overlap the beginning of kernel?
                 *   Does extend past the end of the kernel?
                 */
-               if (s < kernelstart && e > kernelstart) {
+               else if (s < kernelstart && e > kernelstart) {
                        if (e > kernelend) {
                                avail[avail_cnt].start = kernelend;
                                avail[avail_cnt].size = e - kernelend;



Home | Main Index | Thread Index | Old Index