Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sh5 Add code to deal with the "Cache Purge/Invalida...



details:   https://anonhg.NetBSD.org/src/rev/e11ebf033706
branches:  trunk
changeset: 536261:e11ebf033706
user:      scw <scw%NetBSD.org@localhost>
date:      Thu Sep 12 12:44:13 2002 +0000

description:
Add code to deal with the "Cache Purge/Invalidate can cause TLB Miss"
problem, such that a TLB miss no longer occurs.

With the above, it is now safe to enable write-back caching for userland
mappings.

TODO: Deal with cache issues for shared mappings with different VAs.

diffstat:

 sys/arch/sh5/include/pmap.h    |    5 +-
 sys/arch/sh5/sh5/genassym.cf   |    6 +-
 sys/arch/sh5/sh5/pmap.c        |  365 +++++++++++++++++++++++------------
 sys/arch/sh5/sh5/stb1_locore.S |  411 ++++++++++++++++++++++++++++++++--------
 sys/arch/sh5/sh5/stb1var.h     |   14 +-
 5 files changed, 584 insertions(+), 217 deletions(-)

diffs (truncated from 1257 to 300 lines):

diff -r ec1cf99871d0 -r e11ebf033706 sys/arch/sh5/include/pmap.h
--- a/sys/arch/sh5/include/pmap.h       Thu Sep 12 12:41:31 2002 +0000
+++ b/sys/arch/sh5/include/pmap.h       Thu Sep 12 12:44:13 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.6 2002/09/11 11:08:45 scw Exp $     */
+/*     $NetBSD: pmap.h,v 1.7 2002/09/12 12:44:13 scw Exp $     */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -72,7 +72,8 @@
 
 #define        PMAP_ASID_UNASSIGNED    ((u_int)(-1))
 #define        PMAP_ASID_KERNEL        0
-#define        PMAP_ASID_USER_START    1
+#define        PMAP_ASID_CACHEOPS      1
+#define        PMAP_ASID_USER_START    2
 
 typedef struct pmap *pmap_t;
 
diff -r ec1cf99871d0 -r e11ebf033706 sys/arch/sh5/sh5/genassym.cf
--- a/sys/arch/sh5/sh5/genassym.cf      Thu Sep 12 12:41:31 2002 +0000
+++ b/sys/arch/sh5/sh5/genassym.cf      Thu Sep 12 12:44:13 2002 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: genassym.cf,v 1.9 2002/09/10 12:08:49 scw Exp $
+#      $NetBSD: genassym.cf,v 1.10 2002/09/12 12:44:14 scw Exp $
 
 # Copyright 2002 Wasabi Systems, Inc.
 # All rights reserved.
@@ -48,6 +48,7 @@
 include <machine/pcb.h>
 include <machine/trap.h>
 include <machine/proc.h>
+include <machine/pmap.h>
 include <machine/memregion.h>
 
 include <sh5/pte.h>
@@ -291,6 +292,7 @@
 define SH5_PTEL_CB_NOCACHE     SH5_PTEL_CB_NOCACHE
 define SH5_PTEL_CB_WRITETHRU   SH5_PTEL_CB_WRITETHRU
 define SH5_PTEL_CB_WRITEBACK   SH5_PTEL_CB_WRITEBACK
+define SH5_PTEL_SZ_4KB         SH5_PTEL_SZ_4KB
 define SH5_PTEL_SZ_512MB       SH5_PTEL_SZ_512MB
 define SH5_PTEL_PR_R           SH5_PTEL_PR_R
 define SH5_PTEL_PR_W           SH5_PTEL_PR_W
@@ -304,6 +306,8 @@
 
 define SH5_PTE_PN_MASK_MOVI    SH5_PTE_PN_MASK_MOVI
 
+# Pmap contstants
+define PMAP_ASID_CACHEOPS      PMAP_ASID_CACHEOPS
 
 # Control register bits
 define SH5_CONREG_SR_S                 SH5_CONREG_SR_S
diff -r ec1cf99871d0 -r e11ebf033706 sys/arch/sh5/sh5/pmap.c
--- a/sys/arch/sh5/sh5/pmap.c   Thu Sep 12 12:41:31 2002 +0000
+++ b/sys/arch/sh5/sh5/pmap.c   Thu Sep 12 12:44:13 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.7 2002/09/11 11:08:45 scw Exp $     */
+/*     $NetBSD: pmap.c,v 1.8 2002/09/12 12:44:14 scw Exp $     */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -297,6 +297,7 @@
 #define        PVO_PTEGIDX_SET(pvo,i)  \
        ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
 
+
 /*
  * This array is allocated at boot time and contains one entry per
  * PTE group.
@@ -521,50 +522,35 @@
 }
 
 /*
- * Clear the specified bit(s) in the PTE (actually, the PTEL)
- *
- * In this case, the pte MUST be resident in the ipt, and it may
- * also be resident in the TLB.
+ * We're about to raise the protection of a mapping.
+ * Make sure the cache is synchronised before the protection changes.
  */
-static __inline void
-pmap_pteg_clear_bit(volatile pte_t *pt, struct pvo_entry *pvo, u_int ptebit)
+static void
+pmap_cache_sync_raise(vaddr_t va, ptel_t ptel, ptel_t clrbits)
 {
-       ptel_t ptel;
-       pteh_t pteh;
-
-       pteh = pt->pteh;
-       ptel = pt->ptel;
-       ptel &= ~ptebit;
-       pt->ptel = ptel;
+       paddr_t pa;
 
-       if (pvo->pvo_pmap->pm_asid == PMAP_ASID_KERNEL ||
-           pvo->pvo_pmap->pm_asidgen == pmap_asid_generation) {
-               /*
-                * The mapping may be cached in the TLB. Call cpu-specific
-                * code to check and invalidate if necessary.
-                */
-               __cpu_tlbinv_cookie((pteh & SH5_PTEH_EPN_MASK) |
-                   (pvo->pvo_pmap->pm_asid << SH5_PTEH_ASID_SHIFT),
-                   SH5_PTEH_TLB_COOKIE(pteh));
-       }
+       /*
+        * Just return if the mapping is not cacheable.
+        */
+       if ((ptel & SH5_PTEL_CB_MASK) <= SH5_PTEL_CB_DEVICE)
+               return;
 
-       pmap_pteg_synch(ptel, pvo);
-}
+       /*
+        * Also just return if the page has never been referenced.
+        */
+       if ((ptel & SH5_PTEL_R) == 0)
+               return;
 
-static __inline void
-pmap_kpte_clear_bit(int idx, struct pvo_entry *pvo, ptel_t ptebit)
-{
-       ptel_t ptel;
+       pa = (paddr_t)(ptel & SH5_PTEL_PPN_MASK);
 
-       ptel = pmap_kernel_ipt[idx];
-
-       switch ((ptel & ptebit) & (SH5_PTEL_PR_W | SH5_PTEL_PR_X)) {
+       switch ((ptel & clrbits) & (SH5_PTEL_PR_W | SH5_PTEL_PR_X)) {
        case SH5_PTEL_PR_W | SH5_PTEL_PR_X:
                /*
                 * The page is being made no-exec, rd-only.
                 * Purge the data cache and invalidate insn cache.
                 */
-               __cpu_cache_dpurge_iinv(PVO_VADDR(pvo), NBPG);
+               __cpu_cache_dpurge_iinv(va, pa, NBPG);
                break;
 
        case SH5_PTEL_PR_W:
@@ -572,7 +558,7 @@
                 * The page is being made read-only.
                 * Purge the data-cache.
                 */
-               __cpu_cache_dpurge(PVO_VADDR(pvo), NBPG);
+               __cpu_cache_dpurge(va, pa, NBPG);
                break;
 
        case SH5_PTEL_PR_X:
@@ -580,7 +566,7 @@
                 * The page is being made no-exec.
                 * Invalidate the instruction cache.
                 */
-               __cpu_cache_iinv(PVO_VADDR(pvo), NBPG);
+               __cpu_cache_iinv(va, pa, NBPG);
                break;
 
        case 0:
@@ -590,11 +576,116 @@
                 */
                break;
        }
+}
+
+/*
+ * We're about to delete a mapping.
+ * Make sure the cache is synchronised before the mapping disappears.
+ */
+static void
+pmap_cache_sync_unmap(vaddr_t va, ptel_t ptel)
+{
+       paddr_t pa;
+
+       /*
+        * Just return if the mapping was not cacheable.
+        */
+       if ((ptel & SH5_PTEL_CB_MASK) <= SH5_PTEL_CB_DEVICE)
+               return;
 
        /*
-        * It's now safe to echo the change in the TLB.
+        * Also just return if the page has never been referenced.
         */
-       pmap_kernel_ipt[idx] &= ~ptebit;
+       if ((ptel & SH5_PTEL_R) == 0)
+               return;
+
+       pa = (paddr_t)(ptel & SH5_PTEL_PPN_MASK);
+
+       switch (ptel & (SH5_PTEL_PR_W | SH5_PTEL_PR_X)) {
+       case SH5_PTEL_PR_W | SH5_PTEL_PR_X:
+       case SH5_PTEL_PR_X:
+               /*
+                * The page was executable, and possibly writable.
+                * Purge the data cache and invalidate insn cache.
+                */
+               __cpu_cache_dpurge_iinv(va, pa, NBPG);
+               break;
+
+       case SH5_PTEL_PR_W:
+               /*
+                * The page was writable.
+                * Purge the data-cache.
+                */
+               __cpu_cache_dpurge(va, pa, NBPG);
+               break;
+
+       case 0:
+               /*
+                * The page was read-only.
+                * Just invalidate the data cache.
+                */
+               __cpu_cache_dpurge(va, pa, NBPG);
+               break;
+       }
+}
+
+/*
+ * Clear the specified bit(s) in the PTE (actually, the PTEL)
+ *
+ * In this case, the pte MUST be resident in the ipt, and it may
+ * also be resident in the TLB.
+ */
+static void
+pmap_pteg_clear_bit(volatile pte_t *pt, struct pvo_entry *pvo, u_int ptebit)
+{
+       pmap_t pm;
+       ptel_t ptel;
+       pteh_t pteh;
+
+       pm = pvo->pvo_pmap;
+       pteh = pt->pteh;
+       ptel = pt->ptel;
+       pt->ptel = ptel & ~ptebit;
+
+       if (pm->pm_asid == PMAP_ASID_KERNEL ||
+           pm->pm_asidgen == pmap_asid_generation) {
+               /*
+                * Before raising the protection of the mapping,
+                * make sure the cache is synchronised.
+                *
+                * Note: The cpu-specific cache handling code will ensure
+                * this doesn't cause a TLB miss exception.
+                */
+               pmap_cache_sync_raise(PVO_VADDR(pvo), ptel, ptebit);
+
+               /*
+                * The mapping may be cached in the TLB. Call cpu-specific
+                * code to check and invalidate if necessary.
+                */
+               __cpu_tlbinv_cookie((pteh & SH5_PTEH_EPN_MASK) |
+                   (pm->pm_asid << SH5_PTEH_ASID_SHIFT),
+                   SH5_PTEH_TLB_COOKIE(pteh));
+       }
+
+       pmap_pteg_synch(ptel, pvo);
+}
+
+static void
+pmap_kpte_clear_bit(int idx, struct pvo_entry *pvo, ptel_t ptebit)
+{
+       ptel_t ptel;
+
+       ptel = pmap_kernel_ipt[idx];
+
+       /*
+        * Syncronise the cache in readiness for raising the protection.
+        */
+       pmap_cache_sync_raise(PVO_VADDR(pvo), ptel, ptebit);
+
+       /*
+        * Echo the change in the TLB.
+        */
+       pmap_kernel_ipt[idx] = ptel & ~ptebit;
 
        __cpu_tlbinv(PVO_VADDR(pvo) | SH5_PTEH_SH,
            SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
@@ -609,7 +700,9 @@
  * This makes the mapping directly available to the TLB miss
  * handler.
  *
- * It is assumed that the mapping is not currently in the TLB.
+ * It is assumed that the mapping is not currently in the TLB, and
+ * hence not in the cache either. Therefore neither need to be
+ * synchronised.
  */
 static __inline void
 pmap_pteg_set(volatile pte_t *pt, struct pvo_entry *pvo)
@@ -626,24 +719,35 @@
  * This function will preserve Referenced/Modified state from the PTE
  * before ensuring there is no reference to the mapping in the TLB.
  */
-static __inline void
+static void
 pmap_pteg_unset(volatile pte_t *pt, struct pvo_entry *pvo)
 {
+       pmap_t pm;
        pteh_t pteh;
        ptel_t ptel;
 
+       pm = pvo->pvo_pmap;



Home | Main Index | Thread Index | Old Index