Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm/cortex something temporary that will go away on...



details:   https://anonhg.NetBSD.org/src/rev/8becfcc1d14d
branches:  trunk
changeset: 341832:8becfcc1d14d
user:      marty <marty%NetBSD.org@localhost>
date:      Wed Nov 25 04:03:34 2015 +0000

description:
something temporary that will go away once odroid xu4 works

diffstat:

 sys/arch/arm/cortex/cortex_init.S |  780 ++++++++++++++++++++++++++++++++++++++
 1 files changed, 780 insertions(+), 0 deletions(-)

diffs (truncated from 784 to 300 lines):

diff -r b4f1b487bd17 -r 8becfcc1d14d sys/arch/arm/cortex/cortex_init.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/arm/cortex/cortex_init.S Wed Nov 25 04:03:34 2015 +0000
@@ -0,0 +1,780 @@
+/*     $NetBSD: cortex_init.S,v 1.1 2015/11/25 04:03:34 marty Exp $    */
+/*-
+ * Copyright (c) 2012 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_cpuoptions.h"
+#include "opt_cputypes.h"
+#include "opt_multiprocessor.h"
+
+#include <arm/asm.h>
+#include <arm/armreg.h>
+#include <arm/cortex/scu_reg.h>
+#include "assym.h"
+
+#define A15    0xf
+//#define MPDEBUG
+
+// Marco to call routines in .text
+#if defined(KERNEL_BASES_EQUAL)
+#define CALL(f)                bl      _C_LABEL(f)
+#else
+#define        CALL(f) \
+       movw    ip, #:lower16:_C_LABEL(f); \
+       movt    ip, #:upper16:_C_LABEL(f); \
+       sub     ip, ip, #KERNEL_BASE_VOFFSET; \
+       blx     ip
+#endif
+
+
+// We'll modify va and pa at run time so we can use relocatable addresses.
+#define MMU_INIT(va,pa,n_sec,attr) \
+       .word   ((va) & 0xffffffff)|(n_sec)                 ; \
+       .word   ((pa) & 0xffffffff)|(attr)                  ; \
+
+// Set up a preliminary mapping in the MMU to allow us to run at KERNEL_BASE
+// with caches on.  If we are MULTIPROCESSOR, save the TTB address.
+//
+arm_boot_l1pt_init:
+#if defined(MULTIPROCESSOR)
+       movw    r3, #:lower16:cortex_mmuinfo
+       movt    r3, #:upper16:cortex_mmuinfo
+#if !defined(KERNEL_BASES_EQUAL)
+       sub     r3, r3, #KERNEL_BASE_VOFFSET
+#endif
+       str     r0, [r3]
+
+       // Make sure the info makes into memory
+       mcr     p15, 0, r3, c7, c10, 1          // writeback the cache line
+       dsb
+#endif
+
+       mov     ip, r1                  // save mmu table addr
+       // Build page table from scratch
+       mov     r1, r0                  // Start address to clear memory.
+       // Zero the entire table so all virtual addresses are invalid.
+       add     r2, r1, #L1_TABLE_SIZE  // Ending address
+       mov     r4, #0
+       mov     r5, #0
+       mov     r6, #0
+       mov     r7, #0
+1:     stmia   r1!, {r4-r7}            // 16 bytes at a time
+       cmp     r1, r2
+       blt     1b
+
+       // Now create our entries per the mmu_init_table.
+       l1table .req r0
+       va      .req r1
+       pa      .req r2
+       n_sec   .req r3
+       attr    .req r4
+       itable  .req r5
+
+       mov     attr, #0
+       mrc     p15, 0, r3, c0, c0, 5   // MPIDR read
+       cmp     r3, #0                  // not zero?
+       movne   attr, #L1_S_V6_S        //    yes, shareable attribute
+       mov     itable, ip              // reclaim table address
+       b       3f
+
+2:     str     pa, [l1table, va, lsl #2]
+       add     va, va, #1
+       add     pa, pa, #(L1_S_SIZE)
+       subs    n_sec, n_sec, #1
+       bhi     2b
+
+3:     ldmia   itable!, {va, pa}
+       // Convert va to l1 offset:     va = 4 * (va >> L1_S_SHIFT)
+       ubfx    n_sec, va, #0, #L1_S_SHIFT
+       lsr     va, va, #L1_S_SHIFT
+
+       // Do we need add sharing for this?
+       tst     pa, #(L1_S_C|L1_S_B)    // is this entry cacheable?
+       orrne   pa, pa, attr            // add sharing
+
+4:     cmp     n_sec, #0
+       bne     2b
+       bx      lr                      // return
+
+       .unreq  va
+       .unreq  pa
+       .unreq  n_sec
+       .unreq  attr
+       .unreq  itable
+       .unreq  l1table
+
+//
+// Coprocessor register initialization values
+//
+#if defined(CPU_CORTEXA8)
+#undef CPU_CONTROL_SWP_ENABLE          // not present on A8
+#define CPU_CONTROL_SWP_ENABLE         0
+#endif
+#ifdef __ARMEL__
+#define CPU_CONTROL_EX_BEND_SET                0
+#else
+#define CPU_CONTROL_EX_BEND_SET                CPU_CONTROL_EX_BEND
+#endif
+#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
+#define CPU_CONTROL_AFLT_ENABLE_CLR    CPU_CONTROL_AFLT_ENABLE
+#define CPU_CONTROL_AFLT_ENABLE_SET    0
+#else
+#define CPU_CONTROL_AFLT_ENABLE_CLR    0
+#define CPU_CONTROL_AFLT_ENABLE_SET    CPU_CONTROL_AFLT_ENABLE
+#endif
+
+// bits to set in the Control Register
+//
+#define CPU_CONTROL_SET \
+       (CPU_CONTROL_MMU_ENABLE         |       \
+        CPU_CONTROL_AFLT_ENABLE_SET    |       \
+        CPU_CONTROL_DC_ENABLE          |       \
+        CPU_CONTROL_SWP_ENABLE         |       \
+        CPU_CONTROL_BPRD_ENABLE        |       \
+        CPU_CONTROL_IC_ENABLE          |       \
+        CPU_CONTROL_EX_BEND_SET        |       \
+        CPU_CONTROL_UNAL_ENABLE)
+
+// bits to clear in the Control Register
+//
+#define CPU_CONTROL_CLR \
+       (CPU_CONTROL_AFLT_ENABLE_CLR)
+
+arm_cpuinit:
+       // Because the MMU may already be on do a typical sequence to set
+       // the Translation Table Base(s).
+       mov     ip, lr
+       mov     r10, r0                 // save TTBR
+       mov     r1, #0
+
+       mcr     p15, 0, r1, c7, c5, 0   // invalidate I cache
+
+       mrc     p15, 0, r2, c1, c0, 0   // SCTLR read
+       movw    r1, #(CPU_CONTROL_DC_ENABLE|CPU_CONTROL_IC_ENABLE)
+       bic     r2, r2, r1              // clear I+D cache enable
+
+#ifdef __ARMEB__
+       // SCTLR.EE determines the endianness of translation table lookups.
+       // So we need to make sure it's set before starting to use the new
+       // translation tables (which are big endian).
+       //
+       orr     r2, r2, #CPU_CONTROL_EX_BEND
+       bic     r2, r2, #CPU_CONTROL_MMU_ENABLE
+       pli     [pc, #32]               // preload the next few cachelines
+       pli     [pc, #64]
+       pli     [pc, #96]
+       pli     [pc, #128]
+#endif
+
+       mcr     p15, 0, r2, c1, c0, 0   // SCTLR write
+
+       XPUTC(#'F')
+       dsb                             // Drain the write buffers.
+
+       XPUTC(#'G')
+       mrc     p15, 0, r1, c0, c0, 5   // MPIDR read
+       cmp     r1, #0
+       orrlt   r10, r10, #TTBR_MPATTR  // MP, cachable (Normal WB)
+       orrge   r10, r10, #TTBR_UPATTR  // Non-MP, cacheable, normal WB
+       XPUTC(#'0')
+       mcr     p15, 0, r10, c2, c0, 0  // TTBR0 write
+#if defined(ARM_MMU_EXTENDED)
+       // When using split TTBRs, we need to set both since the physical
+       // addresses we were/are using might be in either.
+       XPUTC(#'1')
+       mcr     p15, 0, r10, c2, c0, 1  // TTBR1 write
+#endif
+
+       XPUTC(#'H')
+#if defined(ARM_MMU_EXTENDED)
+       XPUTC(#'1')
+       mov     r1, #TTBCR_S_N_1        // make sure TTBCR_S_N is 1
+#else
+       XPUTC(#'0')
+       mov     r1, #0                  // make sure TTBCR is 0
+#endif
+       mcr     p15, 0, r1, c2, c0, 2   // TTBCR write
+
+       isb
+
+#if !defined(CPU_CORTEXA5)
+       XPUTC(#'I')
+       mov     r1, #0
+       mcr     p15, 0, r1, c8, c7, 0   // TLBIALL (just this core)
+       dsb
+       isb
+#endif
+
+       XPUTC(#'J')
+       mov     r1, #0                  // get KERNEL_PID
+       mcr     p15, 0, r1, c13, c0, 1  // CONTEXTIDR write
+
+       // Set the Domain Access register.  Very important!
+       XPUTC(#'K')
+       mov     r1, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
+       mcr     p15, 0, r1, c3, c0, 0   // DACR write
+
+       //
+       // Enable the MMU, etc.
+       //
+       XPUTC(#'L')
+       mrc     p15, 0, r1, c1, c0, 0   // SCTLR read
+
+       movw    r3, #:lower16:CPU_CONTROL_SET
+       movt    r3, #:upper16:CPU_CONTROL_SET
+       orr     r0, r1, r3
+#if defined(CPU_CONTROL_CLR) && (CPU_CONTROL_CLR != 0)
+       bic     r0, r0, #CPU_CONTROL_CLR
+#endif
+
+       pli     1f
+       dsb
+
+       // turn mmu on!
+       //
+       mov     r0, r0                  // fetch instruction cacheline
+1:     mcr     p15, 0, r0, c1, c0, 0   // SCTLR write
+
+       // Ensure that the coprocessor has finished turning on the MMU.
+       //
+       mrc     p15, 0, r0, c0, c0, 0   // Read an arbitrary value.
+       mov     r0, r0                  // Stall until read completes.
+       XPUTC(#'M')
+
+       bx      ip                      // return
+
+       .p2align 2
+
+#if defined(VERBOSE_INIT_ARM) && defined(XPUTC_COM)
+#define TIMO           0x25000
+#ifndef COM_MULT
+#define COM_MULT       1
+#endif
+xputc:
+       mov     r2, #TIMO
+#ifdef CONADDR
+       movw    r3, #:lower16:CONADDR
+       movt    r3, #:upper16:CONADDR
+#elif defined(CONSADDR)
+       movw    r3, #:lower16:CONSADDR
+       movt    r3, #:upper16:CONSADDR
+#endif
+1:
+#if COM_MULT == 1
+       ldrb    r1, [r3, #(COM_LSR*COM_MULT)]
+#else
+#if COM_MULT == 2
+       ldrh    r1, [r3, #(COM_LSR*COM_MULT)]
+#elif COM_MULT == 4
+       ldr     r1, [r3, #(COM_LSR*COM_MULT)]
+#endif
+#ifdef COM_BSWAP



Home | Main Index | Thread Index | Old Index