Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sh5/sh5 Lots of small changes, some functional, som...



details:   https://anonhg.NetBSD.org/src/rev/ce8e8e2ff51d
branches:  trunk
changeset: 537891:ce8e8e2ff51d
user:      scw <scw%NetBSD.org@localhost>
date:      Mon Oct 07 15:02:07 2002 +0000

description:
Lots of small changes, some functional, some cosmetic.

The main bug fixes are:
 - pmap_pvo_remove() must calculate the kipt index if the idx param is -1.

 - Don't assume that if a pmap's ASID generation is out of date that we
   can skip purging/invalidating the cache for any of its constituent
   mappings. At this time, the ASID generation just indicates that none
   of its mappings are in the TLB. However, there may still be some valid
   cache entries for them.

Finally, the subtle NFS and buffer cache corruption problems disappear.

diffstat:

 sys/arch/sh5/sh5/pmap.c |  391 +++++++++++++++++++++++++++++++++--------------
 1 files changed, 269 insertions(+), 122 deletions(-)

diffs (truncated from 815 to 300 lines):

diff -r a467aa48e62b -r ce8e8e2ff51d sys/arch/sh5/sh5/pmap.c
--- a/sys/arch/sh5/sh5/pmap.c   Mon Oct 07 14:57:53 2002 +0000
+++ b/sys/arch/sh5/sh5/pmap.c   Mon Oct 07 15:02:07 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.15 2002/10/05 08:23:32 scw Exp $    */
+/*     $NetBSD: pmap.c,v 1.16 2002/10/07 15:02:07 scw Exp $    */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -340,11 +340,25 @@
 static struct evcnt pmap_pte_spill_evict_events =
        EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "spill evictions");
 
+static struct evcnt pmap_asid_regen_events =
+       EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "asid regens");
+
 static struct evcnt pmap_shared_cache_downgrade_events =
        EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "cache downgrades");
 static struct evcnt pmap_shared_cache_upgrade_events =
        EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "cache upgrades");
 
+static struct evcnt pmap_zero_page_events =
+       EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "zero page");
+static struct evcnt pmap_copy_page_events =
+       EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "copy page");
+static struct evcnt pmap_zero_page_dpurge_events =
+       EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "zero page purge");
+static struct evcnt pmap_copy_page_dpurge_src_events =
+       EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "copy page src purge");
+static struct evcnt pmap_copy_page_dpurge_dst_events =
+       EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "copy page dst purge");
+
 /*
  * This array contains one entry per kernel IPT entry.
  */
@@ -378,16 +392,18 @@
 void pmap_bootstrap(vaddr_t, paddr_t, struct mem_region *);
 volatile pte_t *pmap_pte_spill(u_int, vsid_t, vaddr_t);
 
+static __inline void pmap_copyzero_page_dpurge(paddr_t, struct evcnt *);
 static volatile pte_t * pmap_pvo_to_pte(const struct pvo_entry *, int);
 static struct pvo_entry * pmap_pvo_find_va(pmap_t, vaddr_t, int *);
 static void pmap_pinit(pmap_t);
 static void pmap_release(pmap_t);
 static void pmap_pa_map_kva(vaddr_t, paddr_t, ptel_t);
 static ptel_t pmap_pa_unmap_kva(vaddr_t, ptel_t *);
+static __inline void pmap_change_cache_attr(struct pvo_entry *, ptel_t);
 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
        vaddr_t, paddr_t, ptel_t, int);
 static void pmap_pvo_remove(struct pvo_entry *, int);
-static void pmap_remove_update(struct pvo_head *, int);
+static void pmap_remove_update(struct pvo_head *);
 
 static u_int   pmap_asid_next;
 static u_int   pmap_asid_max;
@@ -573,7 +589,7 @@
        /*
         * Just return if the mapping is not cacheable.
         */
-       if ((ptel & SH5_PTEL_CB_MASK) <= SH5_PTEL_CB_DEVICE)
+       if (!SH5_PTEL_CACHEABLE(ptel))
                return;
 
        /*
@@ -630,7 +646,7 @@
        /*
         * Just return if the mapping was not cacheable.
         */
-       if ((ptel & SH5_PTEL_CB_MASK) <= SH5_PTEL_CB_DEVICE)
+       if (!SH5_PTEL_CACHEABLE(ptel))
                return;
 
        /*
@@ -663,6 +679,12 @@
                /*
                 * The page was read-only.
                 * Just invalidate the data cache.
+                *
+                * Note: We'd like to use __cpu_cache_dinv() here, but
+                * since the mapping may still be in the TLB, the cache
+                * tag will contain the original protection bits.
+                * The invalidate operation will actually cause a write-
+                * protection fault (!!!!) in this case.
                 */
                __cpu_cache_dpurge(va, pa, NBPG);
                break;
@@ -687,18 +709,9 @@
        ptel = pt->ptel;
        pt->ptel = ptel & ~ptebit;
 
-       if (pm->pm_asid == PMAP_ASID_KERNEL ||
+       if (pm->pm_asid != PMAP_ASID_UNASSIGNED &&
            pm->pm_asidgen == pmap_asid_generation) {
                /*
-                * Before raising the protection of the mapping,
-                * make sure the cache is synchronised.
-                *
-                * Note: The cpu-specific cache handling code will ensure
-                * this doesn't cause a TLB miss exception.
-                */
-               pmap_cache_sync_raise(PVO_VADDR(pvo), ptel, ptebit);
-
-               /*
                 * The mapping may be cached in the TLB. Call cpu-specific
                 * code to check and invalidate if necessary.
                 */
@@ -707,6 +720,15 @@
                    SH5_PTEH_TLB_COOKIE(pteh));
        }
 
+       /*
+        * Before raising the protection of the mapping,
+        * make sure the cache is synchronised.
+        *
+        * Note: The cpu-specific cache handling code will ensure
+        * this doesn't cause a TLB miss exception.
+        */
+       pmap_cache_sync_raise(PVO_VADDR(pvo), ptel, ptebit);
+
        pmap_pteg_synch(ptel, pvo);
 }
 
@@ -715,7 +737,11 @@
 {
        ptel_t ptel;
 
+       __cpu_tlbinv(PVO_VADDR(pvo) | SH5_PTEH_SH,
+           SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
+
        ptel = pmap_kernel_ipt[idx];
+       pmap_pteg_synch(ptel, pvo);
 
        /*
         * Syncronise the cache in readiness for raising the protection.
@@ -723,14 +749,9 @@
        pmap_cache_sync_raise(PVO_VADDR(pvo), ptel, ptebit);
 
        /*
-        * Echo the change in the TLB.
+        * It's now safe to change the page table.
         */
        pmap_kernel_ipt[idx] = ptel & ~ptebit;
-
-       __cpu_tlbinv(PVO_VADDR(pvo) | SH5_PTEH_SH,
-           SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
-
-       pmap_pteg_synch(ptel, pvo);
 }
 
 /*
@@ -771,19 +792,10 @@
        pteh = pt->pteh;
        pt->pteh = 0;
 
-       if (pm->pm_asid == PMAP_ASID_KERNEL ||
+       if (pm->pm_asid != PMAP_ASID_UNASSIGNED &&
            pm->pm_asidgen == pmap_asid_generation) {
                /*
-                * Before deleting the mapping from the PTEG/TLB,
-                * make sure the cache is synchronised.
-                *
-                * Note: The cpu-specific cache handling code must ensure
-                * this doesn't cause a TLB miss exception.
-                */
-               pmap_cache_sync_unmap(PVO_VADDR(pvo), ptel);
-
-               /*
-                * The mapping may be cached in the TLB. Call cpu-specific
+                * The mapping may be in the TLB. Call cpu-specific
                 * code to check and invalidate if necessary.
                 */
                __cpu_tlbinv_cookie((pteh & SH5_PTEH_EPN_MASK) |
@@ -791,6 +803,15 @@
                    SH5_PTEH_TLB_COOKIE(pteh));
        }
 
+       /*
+        * Before deleting the mapping from the PTEG/TLB,
+        * make sure the cache is synchronised.
+        *
+        * Note: The cpu-specific cache handling code must ensure
+        * this doesn't cause a TLB miss exception.
+        */
+       pmap_cache_sync_unmap(PVO_VADDR(pvo), ptel);
+
        pmap_pteg_synch(ptel, pvo);
 }
 
@@ -861,8 +882,6 @@
        source_pvo = NULL;
        victim_pvo = NULL;
 
-       pmap_pte_spill_events.ev_count++;
-
        LIST_FOREACH(pvo, &pmap_upvo_table[ptegidx], pvo_olink) {
                if (source_pvo == NULL && pmap_pteh_match(pvo, vsid, va)) {
                        /*
@@ -881,6 +900,7 @@
                                /* Excellent. No need to evict anyone! */
                                PVO_PTEGIDX_SET(pvo, j);
                                pmap_pteg_idx_events[j].ev_count++;
+                               pmap_pte_spill_events.ev_count++;
                                return (&ptg->pte[j]);
                        }
 
@@ -944,6 +964,7 @@
        PVO_PTEGIDX_SET(source_pvo, idx);
 
        pmap_pte_spill_evict_events.ev_count++;
+       pmap_pte_spill_events.ev_count++;
 
        return (pt);
 }
@@ -1120,7 +1141,7 @@
        if (va < SH5_KSEG1_BASE && va >= SH5_KSEG0_BASE)
                return (1);
 
-       s = splhigh();
+       s = splvm();
        pvo = pmap_pvo_find_va(pm, va, NULL);
        if (pvo != NULL)
                ptel = pvo->pvo_ptel;
@@ -1132,7 +1153,7 @@
        }
        splx(s);
 
-       return ((ptel & SH5_PTEL_CB_MASK) > SH5_PTEL_CB_NOCACHE);
+       return (SH5_PTEL_CACHEABLE(ptel));
 }
 
 void
@@ -1156,8 +1177,14 @@
 
        evcnt_attach_static(&pmap_pte_spill_events);
        evcnt_attach_static(&pmap_pte_spill_evict_events);
+       evcnt_attach_static(&pmap_asid_regen_events);
        evcnt_attach_static(&pmap_shared_cache_downgrade_events);
        evcnt_attach_static(&pmap_shared_cache_upgrade_events);
+       evcnt_attach_static(&pmap_zero_page_events);
+       evcnt_attach_static(&pmap_copy_page_events);
+       evcnt_attach_static(&pmap_zero_page_dpurge_events);
+       evcnt_attach_static(&pmap_copy_page_dpurge_src_events);
+       evcnt_attach_static(&pmap_copy_page_dpurge_dst_events);
        for (i = 0; i < SH5_PTEG_SIZE; i++)
                evcnt_attach_static(&pmap_pteg_idx_events[i]);
 }
@@ -1328,6 +1355,42 @@
 {
 }
 
+static __inline void
+pmap_copyzero_page_dpurge(paddr_t pa, struct evcnt *ev)
+{
+       struct pvo_head *pvo_head;
+       struct pvo_entry *pvo;
+
+       if ((pvo_head = pa_to_pvoh(pa, NULL)) == NULL ||
+           (pvo = LIST_FIRST(pvo_head)) == NULL ||
+           !SH5_PTEL_CACHEABLE(pvo->pvo_ptel))
+               return;
+
+       /*
+        * One or more cacheable mappings already exist for this
+        * physical page. We now have to take preventative measures
+        * to purge all dirty data back to the page and ensure no
+        * valid cache lines remain which reference the page.
+        */
+       LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
+               KDASSERT((paddr_t)(pvo->pvo_ptel & SH5_PTEL_PPN_MASK) == pa);
+
+               if (PVO_VADDR(pvo) < SH5_KSEG0_BASE && !PVO_PTEGIDX_ISSET(pvo))
+                       continue;
+
+               __cpu_cache_dpurge_iinv(PVO_VADDR(pvo), pa, NBPG);
+
+               ev->ev_count++;
+
+               /*
+                * If the first mapping is writable, then we don't need
+                * to purge the others; they must all be at the same VA.
+                */
+               if (PVO_ISWRITABLE(pvo))
+                       break;
+       }
+}
+
 /*
  * Fill the given physical page with zeroes.
  */
@@ -1338,10 +1401,28 @@
        if (!pmap_initialized)
                panic("pmap_zero_page: pmap_initialized is false!");
 
+       pmap_zero_page_events.ev_count++;
+
+       /*
+        * Purge/invalidate the cache for any other mappings to this PA.
+        *
+        * XXX: This should not be necessary, but is here as a fail-safe.
+        * We should really panic if there are any existing mappings...



Home | Main Index | Thread Index | Old Index