Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-2-0]: src/sys/arch/sparc/sparc Pull up revisions 1.84-1.89 (reque...



details:   https://anonhg.NetBSD.org/src/rev/51a91bc2bc6e
branches:  netbsd-2-0
changeset: 560529:51a91bc2bc6e
user:      jdc <jdc%NetBSD.org@localhost>
date:      Sat Apr 24 18:25:34 2004 +0000

description:
Pull up revisions 1.84-1.89 (requested by pk in ticket #179)

Many fixes for issues with sparc multi-processor support (includes
fixes to make HyperSPARC MP work).

diffstat:

 sys/arch/sparc/sparc/cache.c |  252 ++++++++++++++++++++----------------------
 1 files changed, 118 insertions(+), 134 deletions(-)

diffs (truncated from 397 to 300 lines):

diff -r 0cb37c287bea -r 51a91bc2bc6e sys/arch/sparc/sparc/cache.c
--- a/sys/arch/sparc/sparc/cache.c      Sat Apr 24 18:24:56 2004 +0000
+++ b/sys/arch/sparc/sparc/cache.c      Sat Apr 24 18:25:34 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cache.c,v 1.82.2.1 2004/04/08 21:03:40 jdc Exp $ */
+/*     $NetBSD: cache.c,v 1.82.2.2 2004/04/24 18:25:34 jdc Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -59,7 +59,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.82.2.1 2004/04/08 21:03:40 jdc Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.82.2.2 2004/04/24 18:25:34 jdc Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_sparc_arch.h"
@@ -233,16 +233,7 @@
         * Enable instruction cache and, on single-processor machines,
         * disable `Unimplemented Flush Traps'.
         */
-#if defined(MULTIPROCESSOR)
-#ifdef __HYPERSPARC_SMP_ICE_BUG_FIXED
-       v = HYPERSPARC_ICCR_ICE | (ncpu == 1 ? HYPERSPARC_ICCR_FTD : 0);
-#else
-       /* For now, disable cache (if ncpu > 1) and flush fault trap */
-       v = HYPERSPARC_ICCR_FTD | (ncpu == 1 ? HYPERSPARC_ICCR_ICE : 0);
-#endif
-#else
-       v = HYPERSPARC_ICCR_ICE | HYPERSPARC_ICCR_FTD;
-#endif
+       v = HYPERSPARC_ICCR_ICE | (ncpu <= 1 ? HYPERSPARC_ICCR_FTD : 0);
        wrasr(v, HYPERSPARC_ASRNUM_ICCR);
 }
 
@@ -348,15 +339,6 @@
 #endif /* SUN4M || SUN4D */
 
 
-/* XXX - should inline */
-void
-cache_flush(base, len)
-       caddr_t base;
-       u_int len;
-{
-       cpuinfo.cache_flush(base, len, getcontext());
-}
-
 /*
  * Note: the sun4 & sun4c the cache flush functions ignore the `ctx'
  * parameter. This can be done since the pmap operations that need
@@ -512,10 +494,9 @@
 #define CACHE_FLUSH_MAGIC      (CACHEINFO.c_totalsize / PAGE_SIZE)
 
 void
-sun4_cache_flush(base, len, ctx)
+sun4_cache_flush(base, len)
        caddr_t base;
        u_int len;
-       int ctx;
 {
        int i, ls, baseoff;
        char *p;
@@ -549,7 +530,7 @@
        cachestats.cs_ra[min(i, MAXCACHERANGE)]++;
 #endif
 
-       if (i < CACHE_FLUSH_MAGIC) {
+       if (__predict_true(i < CACHE_FLUSH_MAGIC)) {
                /* cache_flush_page, for i pages */
                p = (char *)((int)base & ~baseoff);
                if (CACHEINFO.c_hwflush) {
@@ -566,19 +547,20 @@
 
        baseoff = (u_int)base & SGOFSET;
        i = (baseoff + len + SGOFSET) >> SGSHIFT;
-       if (i == 1)
-               sun4_vcache_flush_segment(VA_VREG(base), VA_VSEG(base), ctx);
-       else {
-               if (HASSUN4_MMU3L) {
-                       baseoff = (u_int)base & RGOFSET;
-                       i = (baseoff + len + RGOFSET) >> RGSHIFT;
-                       if (i == 1)
-                               sun4_vcache_flush_region(VA_VREG(base), ctx);
-                       else
-                               sun4_vcache_flush_context(ctx);
-               } else
-                       sun4_vcache_flush_context(ctx);
+       if (__predict_true(i == 1)) {
+               sun4_vcache_flush_segment(VA_VREG(base), VA_VSEG(base), 0);
+               return;
        }
+
+       if (HASSUN4_MMU3L) {
+               baseoff = (u_int)base & RGOFSET;
+               i = (baseoff + len + RGOFSET) >> RGSHIFT;
+               if (i == 1)
+                       sun4_vcache_flush_region(VA_VREG(base), 0);
+               else
+                       sun4_vcache_flush_context(0);
+       } else
+               sun4_vcache_flush_context(0);
 }
 
 
@@ -689,12 +671,18 @@
 
        cachestats.cs_npgflush++;
        p = (char *)va;
-       ls = CACHEINFO.c_linesize;
-       i = PAGE_SIZE >> CACHEINFO.c_l2linesize;
+
+       /*
+        * XXX - if called early during bootstrap, we don't have the cache
+        *       info yet. Make up a cache line size (double-word aligned)
+        */
+       if ((ls = CACHEINFO.c_linesize) == 0)
+               ls = 8;
+       i = PAGE_SIZE;
        octx = getcontext4m();
        trapoff();
        setcontext4m(ctx);
-       for (; --i >= 0; p += ls)
+       for (; i > 0; p += ls, i -= ls)
                sta(p, ASI_IDCACHELFP, 0);
 #if defined(MULTIPROCESSOR)
        /*
@@ -703,7 +691,7 @@
         * functions will not always cross flush it in the MP case (because
         * may not be active on this CPU) we flush the TLB entry now.
         */
-       if (cpuinfo.cpu_type == CPUTYP_HS_MBUS)
+       /*if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) -- more work than it's worth */
                sta(va | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
 
 #endif
@@ -720,56 +708,66 @@
        srmmu_vcache_flush_context(0);
 }
 
+void
+srmmu_vcache_flush_range(int va, int len, int ctx)
+{
+       int i, ls, offset;
+       char *p;
+       int octx;
+
+       /*
+        * XXX - if called early during bootstrap, we don't have the cache
+        *       info yet. Make up a cache line size (double-word aligned)
+        */
+       if ((ls = CACHEINFO.c_linesize) == 0)
+               ls = 8;
+
+       /* Compute # of cache lines covered by this range */
+       offset = va & (ls - 1);
+       i = len + offset;
+       p = (char *)(va & ~(ls - 1));
+
+       octx = getcontext4m();
+       trapoff();
+       setcontext4m(ctx);
+       for (; i > 0; p += ls, i -= ls)
+               sta(p, ASI_IDCACHELFP, 0);
+
+#if defined(MULTIPROCESSOR)
+       if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
+               /*
+                * See hypersparc comment in srmmu_vcache_flush_page().
+                */
+               offset = va & PGOFSET;
+               i = (offset + len + PGOFSET) >> PGSHIFT;
+
+               va = va & ~PGOFSET;
+               for (; --i >= 0; va += PAGE_SIZE)
+                       sta(va | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
+       }
+#endif
+       setcontext4m(octx);
+       trapon();
+       return;
+}
+
 /*
  * Flush a range of virtual addresses (in the current context).
- * The first byte is at (base&~PGOFSET) and the last one is just
- * before byte (base+len).
  *
  * We choose the best of (context,segment,page) here.
  */
 
-#define CACHE_FLUSH_MAGIC      (CACHEINFO.c_totalsize / PAGE_SIZE)
-
 void
-srmmu_cache_flush(base, len, ctx)
+srmmu_cache_flush(base, len)
        caddr_t base;
        u_int len;
-       int ctx;
 {
-       int i, ls, baseoff;
-       char *p;
+       int ctx = getcontext4m();
+       int i, baseoff;
 
-       if (len < PAGE_SIZE) {
-               int octx;
-               /* less than a page, flush just the covered cache lines */
-               ls = CACHEINFO.c_linesize;
-               baseoff = (int)base & (ls - 1);
-               i = (baseoff + len + ls - 1) >> CACHEINFO.c_l2linesize;
-               p = (char *)((int)base & -ls);
-               octx = getcontext4m();
-               trapoff();
-               setcontext4m(ctx);
-               for (; --i >= 0; p += ls)
-                       sta(p, ASI_IDCACHELFP, 0);
-#if defined(MULTIPROCESSOR)
-               if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
-                       /*
-                        * See hypersparc comment in srmmu_vcache_flush_page().
-                        * Just flush both possibly touched pages
-                        * fromt the TLB.
-                        */
-                       int va = (int)base & ~0xfff;
-                       sta(va | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
-                       sta((va+4096) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
-               }
-#endif
-               setcontext4m(octx);
-               trapon();
-               return;
-       }
 
        /*
-        * Figure out how much must be flushed.
+        * Figure out the most efficient way to flush.
         *
         * If we need to do CACHE_FLUSH_MAGIC pages,  we can do a segment
         * in the same number of loop iterations.  We can also do the whole
@@ -784,62 +782,54 @@
         * segments), but I did not want to debug that now and it is
         * not clear it would help much.
         *
-        * (XXX the magic number 16 is now wrong, must review policy)
         */
-       baseoff = (int)base & PGOFSET;
-       i = (baseoff + len + PGOFSET) >> PGSHIFT;
+
+       if (__predict_true(len < CACHEINFO.c_totalsize)) {
+#if defined(MULTIPROCESSOR)
+               FXCALL3(cpuinfo.sp_vcache_flush_range,
+                       cpuinfo.ft_vcache_flush_range,
+                       (int)base, len, ctx, CPUSET_ALL);
+#else
+               cpuinfo.sp_vcache_flush_range((int)base, len, ctx);
+#endif
+               return;
+       }
 
        cachestats.cs_nraflush++;
-#ifdef notyet
-       cachestats.cs_ra[min(i, MAXCACHERANGE)]++;
+
+       baseoff = (u_int)base & SGOFSET;
+       i = (baseoff + len + SGOFSET) >> SGSHIFT;
+       if (__predict_true(i == 1)) {
+#if defined(MULTIPROCESSOR)
+               FXCALL3(cpuinfo.sp_vcache_flush_segment,
+                       cpuinfo.ft_vcache_flush_segment,
+                       VA_VREG(base), VA_VSEG(base), ctx, CPUSET_ALL);
+#else
+               srmmu_vcache_flush_segment(VA_VREG(base), VA_VSEG(base), ctx);
 #endif
-
-       if (i < CACHE_FLUSH_MAGIC) {
-               int octx;
-               /* cache_flush_page, for i pages */
-               p = (char *)((int)base & ~baseoff);
-               ls = CACHEINFO.c_linesize;
-               i <<= PGSHIFT - CACHEINFO.c_l2linesize;
-               octx = getcontext4m();
-               trapoff();
-               setcontext4m(ctx);
-               for (; --i >= 0; p += ls)
-                       sta(p, ASI_IDCACHELFP, 0);
-#if defined(MULTIPROCESSOR)
-               if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
-                       /* Just flush the segment(s) from the TLB */
-                       /* XXX - assumes CACHE_FLUSH_MAGIC <= NBPSG */
-                       int va = (int)base & ~SGOFSET;
-                       sta(va | ASI_SRMMUFP_L2, ASI_SRMMUFP, 0);
-                       sta((va+NBPSG) | ASI_SRMMUFP_L2, ASI_SRMMUFP, 0);
-               }
-#endif
-               setcontext4m(octx);
-               trapon();
                return;
        }



Home | Main Index | Thread Index | Old Index