Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-6]: src/sys/arch Pull up following revision(s) (requested by matt...



details:   https://anonhg.NetBSD.org/src/rev/8cb9ab92b81b
branches:  netbsd-6
changeset: 774089:8cb9ab92b81b
user:      riz <riz%NetBSD.org@localhost>
date:      Wed May 09 22:42:31 2012 +0000

description:
Pull up following revision(s) (requested by matt in ticket #241):
        sys/arch/powerpc/conf/kern-mb.ldscript: revision 1.1
        sys/arch/powerpc/include/booke/pmap.h: revision 1.9
        sys/arch/powerpc/booke/e500_tlb.c: revision 1.8
        sys/arch/powerpc/conf/files.powerpc: revision 1.83
        sys/arch/powerpc/booke/booke_pmap.c: revision 1.13
        sys/arch/powerpc/include/booke/e500var.h: revision 1.5
        sys/arch/evbppc/mpc85xx/machdep.c: revision 1.23
Add ldscript which aligns .data to a 1MB boundary. (used for testing)
Add PMAP_MINIMALTLB defflag
Add vsize_t to pmap_md_{un,}map_poolpage.
Add pmap_kvptefill prototype.
Slightly change pmap_bootstrap prototype.
Add e500_tlb_minimize prototype.
Add support PMAP_MINIMALTLB option.  This changes the default use of TLB1
entries to map all of physical memory to using two TLB1 entries, one for
mapping text and one for data.  The rest of memory is mapped using the
page table which is updated as needed.  This is used to trap memory
corruption issues.
Add support for PMAP_MINIMALTLB.

diffstat:

 sys/arch/evbppc/mpc85xx/machdep.c        |   32 +----
 sys/arch/powerpc/booke/booke_pmap.c      |  209 +++++++++++++++++++++---------
 sys/arch/powerpc/booke/e500_tlb.c        |   95 +++++++++++++-
 sys/arch/powerpc/conf/files.powerpc      |    4 +-
 sys/arch/powerpc/conf/kern-mb.ldscript   |   76 +++++++++++
 sys/arch/powerpc/include/booke/e500var.h |    3 +-
 sys/arch/powerpc/include/booke/pmap.h    |   15 +-
 7 files changed, 333 insertions(+), 101 deletions(-)

diffs (truncated from 682 to 300 lines):

diff -r 1ae10740a4fb -r 8cb9ab92b81b sys/arch/evbppc/mpc85xx/machdep.c
--- a/sys/arch/evbppc/mpc85xx/machdep.c Wed May 09 20:11:55 2012 +0000
+++ b/sys/arch/evbppc/mpc85xx/machdep.c Wed May 09 22:42:31 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: machdep.c,v 1.22 2012/01/27 18:52:54 para Exp $        */
+/*     $NetBSD: machdep.c,v 1.22.2.1 2012/05/09 22:42:32 riz Exp $     */
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -1126,44 +1126,20 @@
        /*
         * Initialize the pmap.
         */
-       pmap_bootstrap(startkernel, endkernel, availmemr, nmemr);
+       endkernel = pmap_bootstrap(startkernel, endkernel, availmemr, nmemr);
 
        /*
         * Let's take all the indirect calls via our stubs and patch 
         * them to be direct calls.
         */
        cpu_fixup_stubs();
-#if 0
+
        /*
         * As a debug measure we can change the TLB entry that maps all of
         * memory to one that encompasses the 64KB with the kernel vectors.
         * All other pages will be soft faulted into the TLB as needed.
         */
-       const uint32_t saved_mas0 = mfspr(SPR_MAS0);
-       mtspr(SPR_MAS6, 0);
-       __asm volatile("tlbsx\t0, %0" :: "b"(startkernel));
-       uint32_t mas0 = mfspr(SPR_MAS0);
-       uint32_t mas1 = mfspr(SPR_MAS1);
-       uint32_t mas2 = mfspr(SPR_MAS2);
-       uint32_t mas3 = mfspr(SPR_MAS3);
-       KASSERT(mas3 & MAS3_SW);
-       KASSERT(mas3 & MAS3_SR);
-       KASSERT(mas3 & MAS3_SX);
-       mas1 = (mas1 & ~MAS1_TSIZE) | MASX_TSIZE_64KB;
-       pt_entry_t xpn_mask = ~0 << (10 + 2 * MASX_TSIZE_GET(mas1));
-       mas2 = (mas2 & ~(MAS2_EPN        )) | (startkernel & xpn_mask);
-       mas3 = (mas3 & ~(MAS3_RPN|MAS3_SW)) | (startkernel & xpn_mask);
-       printf(" %#lx=<%#x,%#x,%#x,%#x>", startkernel, mas0, mas1, mas2, mas3);
-#if 1
-       mtspr(SPR_MAS1, mas1);
-       mtspr(SPR_MAS2, mas2);
-       mtspr(SPR_MAS3, mas3);
-       extern void tlbwe(void);
-       tlbwe();
-       mtspr(SPR_MAS0, saved_mas0);
-       printf("(ok)");
-#endif
-#endif
+       e500_tlb_minimize(endkernel);
 
        /*
         * Set some more MD helpers
diff -r 1ae10740a4fb -r 8cb9ab92b81b sys/arch/powerpc/booke/booke_pmap.c
--- a/sys/arch/powerpc/booke/booke_pmap.c       Wed May 09 20:11:55 2012 +0000
+++ b/sys/arch/powerpc/booke/booke_pmap.c       Wed May 09 22:42:31 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: booke_pmap.c,v 1.12 2012/02/02 18:59:44 para Exp $     */
+/*     $NetBSD: booke_pmap.c,v 1.12.2.1 2012/05/09 22:42:32 riz Exp $  */
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -38,7 +38,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.12 2012/02/02 18:59:44 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.12.2.1 2012/05/09 22:42:32 riz Exp $");
 
 #include <sys/param.h>
 #include <sys/kcore.h>
@@ -59,6 +59,8 @@
 
 CTASSERT(sizeof(struct pmap_segtab) == NBPG);
 
+struct pmap_segtab pmap_kernel_segtab;
+
 void
 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
 {
@@ -120,30 +122,46 @@
        return (paddr_t) va;
 }
 
+#ifdef PMAP_MINIMALTLB
+static pt_entry_t *
+kvtopte(const struct pmap_segtab *stp, vaddr_t va)
+{
+       pt_entry_t * const ptep = stp->seg_tab[va >> SEGSHIFT];
+       if (ptep == NULL)
+               return NULL;
+       return &ptep[(va & SEGOFSET) >> PAGE_SHIFT];
+}
+
+vaddr_t
+pmap_kvptefill(vaddr_t sva, vaddr_t eva, pt_entry_t pt_entry)
+{
+       const struct pmap_segtab * const stp = pmap_kernel()->pm_segtab;
+       KASSERT(sva == trunc_page(sva));
+       pt_entry_t *ptep = kvtopte(stp, sva);
+       for (; sva < eva; sva += NBPG) {
+               *ptep++ = pt_entry ? (sva | pt_entry) : 0;
+       }
+       return sva;
+}
+#endif
+
 /*
  *     Bootstrap the system enough to run with virtual memory.
  *     firstaddr is the first unused kseg0 address (not page aligned).
  */
-void
+vaddr_t
 pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
-       const phys_ram_seg_t *avail, size_t cnt)
+       phys_ram_seg_t *avail, size_t cnt)
 {
-       for (size_t i = 0; i < cnt; i++) {
-               printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
-                   atop(avail[i].start),
-                   atop(avail[i].start + avail[i].size) - 1,
-                   atop(avail[i].start),
-                   atop(avail[i].start + avail[i].size) - 1,
-                   VM_FREELIST_DEFAULT);
-               uvm_page_physload(
-                   atop(avail[i].start),
-                   atop(avail[i].start + avail[i].size) - 1,
-                   atop(avail[i].start),
-                   atop(avail[i].start + avail[i].size) - 1,
-                   VM_FREELIST_DEFAULT);
-       }
+       struct pmap_segtab * const stp = &pmap_kernel_segtab;
 
-       pmap_tlb_info_init(&pmap_tlb0_info);            /* init the lock */
+       /*
+        * Initialize the kernel segment table.
+        */
+       pmap_kernel()->pm_segtab = stp;
+       curcpu()->ci_pmap_kern_segtab = stp;
+
+       KASSERT(endkernel == trunc_page(endkernel));
 
        /*
         * Compute the number of pages kmem_arena will have.
@@ -160,7 +178,7 @@
        vsize_t bufsz = buf_memcalc();
        buf_setvalimit(bufsz);
 
-       vsize_t nsegtabs = pmap_round_seg(VM_PHYS_SIZE
+       vsize_t kv_nsegtabs = pmap_round_seg(VM_PHYS_SIZE
            + (ubc_nwins << ubc_winshift)
            + bufsz
            + 16 * NCARGS
@@ -169,7 +187,7 @@
 #ifdef SYSVSHM
            + NBPG * shminfo.shmall
 #endif
-           + NBPG * nkmempages);
+           + NBPG * nkmempages) >> SEGSHIFT;
 
        /*
         * Initialize `FYI' variables.  Note we're relying on
@@ -179,67 +197,99 @@
         */
        pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
        pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
-       const vsize_t max_nsegtabs =
+       const size_t max_nsegtabs =
            (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
                - pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
-       if (nsegtabs >= max_nsegtabs) {
+       if (kv_nsegtabs >= max_nsegtabs) {
                pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
-               nsegtabs = max_nsegtabs;
+               kv_nsegtabs = max_nsegtabs;
        } else {
                pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
-                   + nsegtabs * NBSEG;
+                   + kv_nsegtabs * NBSEG;
        }
 
-       pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
-
        /*
         * Now actually allocate the kernel PTE array (must be done
         * after virtual_end is initialized).
         */
-       vaddr_t segtabs =
-           uvm_pageboot_alloc(NBPG * nsegtabs + sizeof(struct pmap_segtab));
+       const vaddr_t kv_segtabs = avail[0].start;
+       KASSERT(kv_segtabs == endkernel);
+       KASSERT(avail[0].size >= NBPG * kv_nsegtabs);
+       printf(" kv_nsegtabs=%#"PRIxVSIZE, kv_nsegtabs);
+       printf(" kv_segtabs=%#"PRIxVADDR, kv_segtabs);
+       avail[0].start += NBPG * kv_nsegtabs;
+       avail[0].size -= NBPG * kv_nsegtabs;
+       endkernel += NBPG * kv_nsegtabs;
 
        /*
         * Initialize the kernel's two-level page level.  This only wastes
         * an extra page for the segment table and allows the user/kernel
         * access to be common.
         */
-       struct pmap_segtab * const stp = (void *)segtabs;
-       segtabs += round_page(sizeof(struct pmap_segtab));
        pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
-       for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG) {
-               *ptp++ = (void *)segtabs;
+       pt_entry_t *ptep = (void *)kv_segtabs;
+       memset(ptep, 0, NBPG * kv_nsegtabs);
+       for (size_t i = 0; i < kv_nsegtabs; i++, ptep += NPTEPG) {
+               *ptp++ = ptep;
        }
-       pmap_kernel()->pm_segtab = stp;
-       curcpu()->ci_pmap_kern_segtab = stp;
-       printf(" kern_segtab=%p", stp);
 
-#if 0
-       nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
-       segtabs = uvm_pageboot_alloc(NBPG * nsegtabs);
+#if PMAP_MINIMALTLB
+       const vsize_t dm_nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
+       const vaddr_t dm_segtabs = avail[0].start;
+       printf(" dm_nsegtabs=%#"PRIxVSIZE, dm_nsegtabs);
+       printf(" dm_segtabs=%#"PRIxVADDR, dm_segtabs);
+       KASSERT(dm_segtabs == endkernel);
+       KASSERT(avail[0].size >= NBPG * dm_nsegtabs);
+       avail[0].start += NBPG * dm_nsegtabs;
+       avail[0].size -= NBPG * dm_nsegtabs;
+       endkernel += NBPG * dm_nsegtabs;
+
        ptp = stp->seg_tab;
-       pt_entry_t pt_entry = PTE_M|PTE_xX|PTE_xR;
-       pt_entry_t *ptep = (void *)segtabs;
-       printf("%s: allocated %lu page table pages for mapping %u pages\n",
-           __func__, nsegtabs, physmem);
-       for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG, ptp++) {
+       ptep = (void *)dm_segtabs;
+       memset(ptep, 0, NBPG * dm_nsegtabs);
+       for (size_t i = 0; i < dm_nsegtabs; i++, ptp++, ptep += NPTEPG) {
                *ptp = ptep;
-               for (u_int j = 0; j < NPTEPG; j++, ptep++) {
-                       *ptep = pt_entry;
-                       pt_entry += NBPG;
-               }
-               printf(" [%u]=%p (%#x)", i, *ptp, **ptp);
-               pt_entry |= PTE_xW;
-               pt_entry &= ~PTE_xX;
        }
 
        /*
-        * Now make everything before the kernel inaccessible.
         */
-       for (u_int i = 0; i < startkernel / NBPG; i += NBPG) {
-               stp->seg_tab[i >> SEGSHIFT][(i & SEGOFSET) >> PAGE_SHIFT] = 0;
+       extern uint32_t _fdata[], _etext[];
+       vaddr_t va;
+
+       /* Now make everything before the kernel inaccessible. */
+       va = pmap_kvptefill(NBPG, startkernel, 0);
+
+       /* Kernel text is readonly & executable */
+       va = pmap_kvptefill(va, round_page((vaddr_t)_etext),
+           PTE_M | PTE_xR | PTE_xX);
+
+       /* Kernel .rdata is readonly */
+       va = pmap_kvptefill(va, trunc_page((vaddr_t)_fdata), PTE_M | PTE_xR);
+
+       /* Kernel .data/.bss + page tables are read-write */
+       va = pmap_kvptefill(va, round_page(endkernel), PTE_M | PTE_xR | PTE_xW);
+
+       /* message buffer page table pages are read-write */
+       (void) pmap_kvptefill(msgbuf_paddr, msgbuf_paddr+round_page(MSGBUFSIZE),
+           PTE_M | PTE_xR | PTE_xW);
+#endif
+
+       for (size_t i = 0; i < cnt; i++) {
+               printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
+                   atop(avail[i].start),
+                   atop(avail[i].start + avail[i].size) - 1,
+                   atop(avail[i].start),
+                   atop(avail[i].start + avail[i].size) - 1,
+                   VM_FREELIST_DEFAULT);
+               uvm_page_physload(
+                   atop(avail[i].start),
+                   atop(avail[i].start + avail[i].size) - 1,
+                   atop(avail[i].start),
+                   atop(avail[i].start + avail[i].size) - 1,
+                   VM_FREELIST_DEFAULT);
        }
-#endif
+
+       pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
 
        /*
         * Initialize the pools.



Home | Main Index | Thread Index | Old Index