Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm Support (untested) SHEEVA_L2_CACHE and SHEEVA_L...



details:   https://anonhg.NetBSD.org/src/rev/427b96bf1060
branches:  trunk
changeset: 795468:427b96bf1060
user:      matt <matt%NetBSD.org@localhost>
date:      Mon Apr 14 20:50:46 2014 +0000

description:
Support (untested) SHEEVA_L2_CACHE and SHEEVA_L2_CACHE_WT options.
Move prototypes out to <arm/cpufunc.h> to their own file.
Add sdcache routines to cpufunc_asm_sheeva.S
Add code sheeve_setup to init the sdcache and sdcache info.

diffstat:

 sys/arch/arm/arm/cpufunc.c            |   60 ++-
 sys/arch/arm/arm/cpufunc_asm_sheeva.S |  293 ++++++++++++++++-----
 sys/arch/arm/conf/files.arm           |    5 +-
 sys/arch/arm/include/armreg.h         |    5 +-
 sys/arch/arm/include/cpuconf.h        |   14 +-
 sys/arch/arm/include/cpufunc.h        |  390 -----------------------------
 sys/arch/arm/include/cpufunc_proto.h  |  443 ++++++++++++++++++++++++++++++++++
 7 files changed, 709 insertions(+), 501 deletions(-)

diffs (truncated from 1455 to 300 lines):

diff -r 6ea51b07ffce -r 427b96bf1060 sys/arch/arm/arm/cpufunc.c
--- a/sys/arch/arm/arm/cpufunc.c        Mon Apr 14 19:45:40 2014 +0000
+++ b/sys/arch/arm/arm/cpufunc.c        Mon Apr 14 20:50:46 2014 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpufunc.c,v 1.145 2014/04/10 02:49:42 matt Exp $       */
+/*     $NetBSD: cpufunc.c,v 1.146 2014/04/14 20:50:46 matt Exp $       */
 
 /*
  * arm7tdmi support code Copyright (c) 2001 John Fremlin
@@ -49,7 +49,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.145 2014/04/10 02:49:42 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.146 2014/04/14 20:50:46 matt Exp $");
 
 #include "opt_compat_netbsd.h"
 #include "opt_cpuoptions.h"
@@ -66,8 +66,8 @@
 #include <uvm/uvm.h>
 
 #include <arm/cpuconf.h>
-#include <arm/cpufunc.h>
 #include <arm/locore.h>
+#include <arm/cpufunc_proto.h>
 
 #ifdef CPU_XSCALE_80200
 #include <arm/xscale/i80200reg.h>
@@ -1212,8 +1212,7 @@
 };
 #endif /* CPU_IXP12X0 */
 
-#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
-    defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
+#if defined(CPU_XSCALE)
 struct cpu_functions xscale_cpufuncs = {
        /* CPU functions */
 
@@ -1272,8 +1271,7 @@
 
        .cf_setup               = xscale_setup
 };
-#endif
-/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
+#endif /* CPU_XSCALE */
 
 #if defined(CPU_ARMV7)
 struct cpu_functions armv7_cpufuncs = {
@@ -3465,8 +3463,7 @@
 }
 #endif /* CPU_IXP12X0 */
 
-#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
-    defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
+#if defined(CPU_XSCALE)
 struct cpu_option xscale_options[] = {
 #ifdef COMPAT_12
        { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
@@ -3547,7 +3544,7 @@
        __asm volatile("mcr p15, 0, %0, c1, c0, 1"
                : : "r" (auxctl));
 }
-#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
+#endif /* CPU_XSCALE */
 
 #if defined(CPU_SHEEVA)
 struct cpu_option sheeva_options[] = {
@@ -3565,8 +3562,6 @@
 void
 sheeva_setup(char *args)
 {
-       uint32_t sheeva_ext;
-
        int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
            | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
            | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
@@ -3586,18 +3581,36 @@
        cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
 
        /* Enable DCache Streaming Switch and Write Allocate */
-       __asm volatile("mrc p15, 1, %0, c15, c1, 0"
-           : "=r" (sheeva_ext));
+       uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
 
        sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
-
-       __asm volatile("mcr p15, 1, %0, c15, c1, 0"
-           :: "r" (sheeva_ext));
-
-       /*
-        * Sheeva has L2 Cache.  Enable/Disable it here.
-        * Really not support yet...
-        */
+#ifdef SHEEVA_L2_CACHE
+       sheeva_ext |= FC_L2CACHE_EN;
+       sheeva_ext &= ~FC_L2_PREF_DIS;
+#endif
+
+       armreg_sheeva_xctrl_write(sheeva_ext);
+
+#ifdef SHEEVA_L2_CACHE
+#ifndef SHEEVA_L2_CACHE_WT
+       arm_scache.cache_type = CPU_CT_CTYPE_WB2;
+#elif CPU_CT_CTYPE_WT != 0
+       arm_scache.cache_type = CPU_CT_CTYPE_WT;
+#endif
+       arm_scache.cache_unified = 1;
+       arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
+       arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
+       arm_scache.dcache_ways = arm_scache.icache_ways = 4;
+       arm_scache.dcache_way_size = arm_scache.icache_way_size =
+           arm_scache.dcache_size / arm_scache.dcache_ways;
+       arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
+       arm_scache.dcache_sets = arm_scache.icache_sets =
+           arm_scache.dcache_way_size / arm_scache.dcache_line_size;
+
+       cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
+       cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
+       cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
+#endif /* SHEEVA_L2_CACHE */
 
 #ifdef __ARMEB__
        cpuctrl |= CPU_CONTROL_BEND_ENABLE;
@@ -3620,5 +3633,8 @@
 
        /* And again. */
        cpu_idcache_wbinv_all();
+#ifdef SHEEVA_L2_CACHE
+       sheeva_sdcache_wbinv_all();
+#endif
 }
 #endif /* CPU_SHEEVA */
diff -r 6ea51b07ffce -r 427b96bf1060 sys/arch/arm/arm/cpufunc_asm_sheeva.S
--- a/sys/arch/arm/arm/cpufunc_asm_sheeva.S     Mon Apr 14 19:45:40 2014 +0000
+++ b/sys/arch/arm/arm/cpufunc_asm_sheeva.S     Mon Apr 14 20:50:46 2014 +0000
@@ -39,17 +39,19 @@
        .word   _C_LABEL(PAGE_MASK)
 
 ENTRY(sheeva_dcache_wbinv_range)
-       str     lr, [sp, #-4]!
-       mrs     lr, cpsr
+       push    {r4,r5}
+       mrs     r4, cpsr
+       orr     r5, r4, #I32_bit | F32_bit
+
        /* Start with cache line aligned address */
        ldr     ip, .Lsheeva_cache_line_size
-       ldr     ip, [ip]
-       sub     ip, ip, #1
-       and     r2, r0, ip
+       ldr     r3, [ip]
+       sub     r3, r3, #1
+       and     r2, r0, r3
        add     r1, r1, r2
-       add     r1, r1, ip
-       bics    r1, r1, ip
-       bics    r0, r0, ip
+       add     r1, r1, r3
+       bic     r1, r1, r3
+       bic     r0, r0, r3
 
        ldr     ip, .Lsheeva_asm_page_mask
        and     r2, r0, ip
@@ -57,16 +59,13 @@
        cmp     r1, r2
        movcc   ip, r1
        movcs   ip, r2
+       sub     r2, r0, #1
 1:
-       add     r3, r0, ip
-       sub     r2, r3, #1
-       /* Disable irqs */
-       orr     r3, lr, #I32_bit | F32_bit
-       msr     cpsr_c, r3
+       add     r2, r2, ip
+       msr     cpsr_c, r5              /* Disable irqs */
        mcr     p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
        mcr     p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
-       /* Enable irqs */
-       msr     cpsr_c, lr
+       msr     cpsr_c, r4              /* Enable irqs */
 
        add     r0, r0, ip
        sub     r1, r1, ip
@@ -77,21 +76,24 @@
        bne     1b
        mov     r0, #0
        mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
-       ldr     lr, [sp], #4
+       pop     {r4, r5}
        RET
+END(sheeva_dcache_wbinv_range)
 
 ENTRY(sheeva_dcache_inv_range)
-       str     lr, [sp, #-4]!
-       mrs     lr, cpsr
+       push    {r4,r5}
+       mrs     r4, cpsr
+       orr     r5, r4, #I32_bit | F32_bit
+
        /* Start with cache line aligned address */
        ldr     ip, .Lsheeva_cache_line_size
-       ldr     ip, [ip]
-       sub     ip, ip, #1
-       and     r2, r0, ip
+       ldr     r3, [ip]
+       sub     r3, r3, #1
+       and     r2, r0, r3
        add     r1, r1, r2
-       add     r1, r1, ip
-       bics    r1, r1, ip
-       bics    r0, r0, ip
+       add     r1, r1, r3
+       bic     r1, r1, r3
+       bic     r0, r0, r3
 
        ldr     ip, .Lsheeva_asm_page_mask
        and     r2, r0, ip
@@ -99,16 +101,55 @@
        cmp     r1, r2
        movcc   ip, r1
        movcs   ip, r2
+       sub     r2, r0, #1
 1:
-       add     r3, r0, ip
-       sub     r2, r3, #1
-       /* Disable irqs */
-       orr     r3, lr, #I32_bit | F32_bit
-       msr     cpsr_c, r3
+       add     r2, r2, ip
+       msr     cpsr_c, r5              /* Disable irqs */
        mcr     p15, 5, r0, c15, c14, 0 /* Inv zone start address */
        mcr     p15, 5, r2, c15, c14, 1 /* Inv zone end address */
-       /* Enable irqs */
-       msr     cpsr_c, lr
+       msr     cpsr_c, r4              /* Enable irqs */
+
+       add     r0, r0, ip
+       sub     r1, r1, ip
+       cmp     r1, #PAGE_SIZE
+       movcc   ip, r1
+       movcs   ip, #PAGE_SIZE
+       cmp     r1, #0
+       bne     1b
+       mov     r0, #0
+       mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
+       pop     {r4, r5}
+       RET
+END(sheeva_dcache_inv_range)
+
+ENTRY(sheeva_dcache_wb_range)
+       push    {r4,r5}
+       mrs     r4, cpsr
+       orr     r5, r4, #I32_bit | F32_bit
+
+       /* Start with cache line aligned address */
+       ldr     ip, .Lsheeva_cache_line_size
+       ldr     r3, [ip]
+       sub     r3, r3, #1
+       and     r2, r0, r3
+       add     r1, r1, r2
+       add     r1, r1, r3
+       bic     r1, r1, r3
+       bic     r0, r0, r3
+
+       ldr     ip, .Lsheeva_asm_page_mask
+       and     r2, r0, ip
+       rsb     r2, r2, #PAGE_SIZE
+       cmp     r1, r2
+       movcc   ip, r1
+       movcs   ip, r2
+       sub     r2, r0, #1
+1:
+       add     r2, r2, ip
+       msr     cpsr_c, r5              /* Disable irqs */
+       mcr     p15, 5, r0, c15, c13, 0 /* Clean zone start address */
+       mcr     p15, 5, r2, c15, c13, 1 /* Clean zone end address */
+       msr     cpsr_c, r4              /* Enable irqs */
 
        add     r0, r0, ip
        sub     r1, r1, ip
@@ -119,12 +160,66 @@
        bne     1b
        mov     r0, #0
        mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
-       ldr     lr, [sp], #4
+       pop     {r4, r5}
        RET
+END(sheeva_dcache_wb_range)
+
+ENTRY(sheeva_idcache_wbinv_range)
+       push    {r4,r5}
+       mrs     r4, cpsr
+       orr     r5, r4, #I32_bit | F32_bit
+
+       /* Start with cache line aligned address */
+       ldr     ip, .Lsheeva_cache_line_size
+       ldr     r3, [ip]
+       sub     r3, r3, #1
+       and     r2, r0, r3



Home | Main Index | Thread Index | Old Index