Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/matt-nb6-plus]: src/sys/arch/arm pullup pmap changes from HEAD



details:   https://anonhg.NetBSD.org/src/rev/cd7ec9685dcc
branches:  matt-nb6-plus
changeset: 774546:cd7ec9685dcc
user:      matt <matt%NetBSD.org@localhost>
date:      Thu Feb 07 06:52:53 2013 +0000

description:
pullup pmap changes from HEAD

diffstat:

 sys/arch/arm/arm32/pmap.c         |  240 ++++++++++++++++++++++++++-----------
 sys/arch/arm/include/arm32/pmap.h |   12 +-
 2 files changed, 174 insertions(+), 78 deletions(-)

diffs (truncated from 474 to 300 lines):

diff -r 96de45cb34e2 -r cd7ec9685dcc sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Thu Feb 07 06:51:48 2013 +0000
+++ b/sys/arch/arm/arm32/pmap.c Thu Feb 07 06:52:53 2013 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.228.2.1.2.1 2012/11/28 22:40:19 matt Exp $  */
+/*     $NetBSD: pmap.c,v 1.228.2.1.2.2 2013/02/07 06:52:53 matt Exp $  */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
 #include <arm/cpuconf.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228.2.1.2.1 2012/11/28 22:40:19 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228.2.1.2.2 2013/02/07 06:52:53 matt Exp $");
 
 #ifdef PMAP_DEBUG
 
@@ -668,12 +668,6 @@
 
 
 /*
- * External function prototypes
- */
-extern void bzero_page(vaddr_t);
-extern void bcopy_page(vaddr_t, vaddr_t);
-
-/*
  * Misc variables
  */
 vaddr_t virtual_avail;
@@ -699,6 +693,12 @@
 }
 #endif /* PMAP_DEBUG */
 
+#ifdef PMAP_CACHE_VIPT
+#define PMAP_VALIDATE_MD_PAGE(md)      \
+       KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \
+           "(md) %p: attrs=%#x urw=%u krw=%u", (md), \
+           (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings);
+#endif /* PMAP_CACHE_VIPT */
 /*
  * A bunch of routines to conditionally flush the caches/TLB depending
  * on whether the specified pmap actually needs to be flushed at any
@@ -829,10 +829,10 @@
 /*
  * main pv_entry manipulation functions:
  *   pmap_enter_pv: enter a mapping onto a vm_page list
- *   pmap_remove_pv: remove a mappiing from a vm_page list
+ *   pmap_remove_pv: remove a mapping from a vm_page list
  *
  * NOTE: pmap_enter_pv expects to lock the pvh itself
- *       pmap_remove_pv expects te caller to lock the pvh before calling
+ *       pmap_remove_pv expects the caller to lock the pvh before calling
  */
 
 /*
@@ -896,6 +896,13 @@
 
 #ifdef PMAP_CACHE_VIPT
        /*
+        * Even though pmap_vac_me_harder will set PVF_WRITE for us,
+        * do it here as well to keep the mappings & KVF_WRITE consistent.
+        */
+       if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) {
+               md->pvh_attrs |= PVF_WRITE;
+       }
+       /*
         * If this is an exec mapping and its the first exec mapping
         * for this page, make sure to sync the I-cache.
         */
@@ -1014,8 +1021,11 @@
         * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
         * the contents to memory.
         */
-       if (md->krw_mappings + md->urw_mappings == 0)
-               md->pvh_attrs &= ~PVF_WRITE;
+       if (arm_cache_prefer_mask != 0) {
+               if (md->krw_mappings + md->urw_mappings == 0)
+                       md->pvh_attrs &= ~PVF_WRITE;
+               PMAP_VALIDATE_MD_PAGE(md);
+       }
        KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif /* PMAP_CACHE_VIPT */
 
@@ -1093,8 +1103,13 @@
                }
        }
 #ifdef PMAP_CACHE_VIPT
-       if (md->urw_mappings + md->krw_mappings == 0)
-               md->pvh_attrs &= ~PVF_WRITE;
+       if (arm_cache_prefer_mask != 0) {
+               if (md->urw_mappings + md->krw_mappings == 0) {
+                       md->pvh_attrs &= ~PVF_WRITE;
+               } else {
+                       md->pvh_attrs |= PVF_WRITE;
+               }
+       }
        /*
         * We have two cases here: the first is from enter_pv (new exec
         * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
@@ -1850,7 +1865,7 @@
                 * Only check for a bad alias if we have writable mappings.
                 */
                tst_mask &= arm_cache_prefer_mask;
-               if (rw_mappings > 0 && arm_cache_prefer_mask) {
+               if (rw_mappings > 0) {
                        for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
                                /* if there's a bad alias, stop checking. */
                                if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
@@ -1906,7 +1921,7 @@
                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
        } else if (!va) {
-               KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
+               KASSERT(pmap_is_page_colored_p(md));
                KASSERT(!(md->pvh_attrs & PVF_WRITE)
                    || (md->pvh_attrs & PVF_DIRTY));
                if (rw_mappings == 0) {
@@ -2230,8 +2245,13 @@
                                        md->uro_mappings++;
                                }
 #ifdef PMAP_CACHE_VIPT
-                               if (md->urw_mappings + md->krw_mappings == 0)
-                                       md->pvh_attrs &= ~PVF_WRITE;
+                               if (arm_cache_prefer_mask != 0) {
+                                       if (md->urw_mappings + md->krw_mappings == 0) {
+                                               md->pvh_attrs &= ~PVF_WRITE;
+                                       } else {
+                                               PMAP_VALIDATE_MD_PAGE(md);
+                                       }
+                               }
                                if (want_syncicache)
                                        need_syncicache = true;
                                need_vac_me_harder = true;
@@ -2564,7 +2584,7 @@
                if (PV_IS_EXEC_P(md->pvh_attrs))
                        PMAPCOUNT(exec_discarded_page_protect);
                md->pvh_attrs &= ~PVF_EXEC;
-               KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
+               PMAP_VALIDATE_MD_PAGE(md);
 #endif
                return;
        }
@@ -2663,9 +2683,11 @@
        md->pvh_attrs &= ~PVF_EXEC;
        KASSERT(md->urw_mappings == 0);
        KASSERT(md->uro_mappings == 0);
-       if (md->krw_mappings == 0)
-               md->pvh_attrs &= ~PVF_WRITE;
-       KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
+       if (arm_cache_prefer_mask != 0) {
+               if (md->krw_mappings == 0)
+                       md->pvh_attrs &= ~PVF_WRITE;
+               PMAP_VALIDATE_MD_PAGE(md);
+       }
 #endif
 
        if (flush) {
@@ -2809,8 +2831,7 @@
                /*
                 * This is to be a managed mapping.
                 */
-               if ((flags & VM_PROT_ALL) ||
-                   (md->pvh_attrs & PVF_REF)) {
+               if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) {
                        /*
                         * - The access type indicates that we don't need
                         *   to do referenced emulation.
@@ -2841,7 +2862,10 @@
                        npte |= L2_TYPE_INV;
                }
 
-               npte |= pte_l2_s_cache_mode;
+               if (flags & ARM32_MMAP_WRITECOMBINE) {
+                       npte |= pte_l2_s_wc_mode;
+               } else
+                       npte |= pte_l2_s_cache_mode;
 
                if (pg == opg) {
                        /*
@@ -3050,7 +3074,7 @@
                KASSERT(uvm_page_locked_p(pg));
 #endif
                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
-               KASSERT(arm_cache_prefer_mask == 0 || ((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0));
+               PMAP_VALIDATE_MD_PAGE(md);
        }
 #endif
 
@@ -3468,7 +3492,9 @@
                                        KASSERT(omd->kro_mappings == 0);
                                        omd->pvh_attrs &= ~PVF_KMPAGE;
 #ifdef PMAP_CACHE_VIPT
-                                       omd->pvh_attrs &= ~PVF_WRITE;
+                                       if (arm_cache_prefer_mask != 0) {
+                                               omd->pvh_attrs &= ~PVF_WRITE;
+                                       }
 #endif
                                        pmap_kmpages--;
 #ifdef PMAP_CACHE_VIPT
@@ -4047,6 +4073,7 @@
         */
        if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
                extern int last_fault_code;
+               extern int kernel_debug;
                printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
                    pm, va, ftype);
                printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
@@ -4054,7 +4081,8 @@
                printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
                    pte, l1pd, last_fault_code);
 #ifdef DDB
-               Debugger();
+               if (kernel_debug & 2)
+                       Debugger();
 #endif
        }
 #endif
@@ -4440,14 +4468,26 @@
        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 #endif
-#ifdef PMAP_CACHE_VIPT
+#if defined(PMAP_CACHE_VIPT)
        /* Choose the last page color it had, if any */
        const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
 #else
        const vsize_t va_offset = 0;
 #endif
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+       /*
+        * Is this page mapped at its natural color?
+        * If we have all of memory mapped, then just convert PA to VA.
+        */
+       const bool okcolor = va_offset == (phys & arm_cache_prefer_mask);
+       const vaddr_t vdstp = KERNEL_BASE + (phys - physical_start);
+#else
+       const bool okcolor = false;
+       const vaddr_t vdstp = cdstp + va_offset;
+#endif
        pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
 
+
 #ifdef DEBUG
        if (!SLIST_EMPTY(&md->pvh_list))
                panic("pmap_zero_page: page has mappings");
@@ -4455,25 +4495,39 @@
 
        KDASSERT((phys & PGOFSET) == 0);
 
-       /*
-        * Hook in the page, zero it, and purge the cache for that
-        * zeroed page. Invalidate the TLB as needed.
-        */
-       *ptep = L2_S_PROTO | phys |
-           L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
-       PTE_SYNC(ptep);
-       cpu_tlb_flushD_SE(cdstp + va_offset);
-       cpu_cpwait();
-       bzero_page(cdstp + va_offset);
-       /*
-        * Unmap the page.
-        */
-       *ptep = 0;
-       PTE_SYNC(ptep);
-       cpu_tlb_flushD_SE(cdstp + va_offset);
+       if (!okcolor) {
+               /*
+                * Hook in the page, zero it, and purge the cache for that
+                * zeroed page. Invalidate the TLB as needed.
+                */
+               *ptep = L2_S_PROTO | phys |
+                   L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+               PTE_SYNC(ptep);
+               cpu_tlb_flushD_SE(cdstp + va_offset);
+               cpu_cpwait();
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+               /*
+                * If we are direct-mapped and our color isn't ok, then before
+                * we bzero the page invalidate its contents from the cache and
+                * reset the color to its natural color.
+                */
+               cpu_dcache_inv_range(cdstp + va_offset, PAGE_SIZE);
+               md->pvh_attrs &= ~arm_cache_prefer_mask;
+               md->pvh_attrs |= (phys & arm_cache_prefer_mask);
+#endif
+       }
+       bzero_page(vdstp);
+       if (!okcolor) {
+               /*
+                * Unmap the page.
+                */
+               *ptep = 0;
+               PTE_SYNC(ptep);
+               cpu_tlb_flushD_SE(cdstp + va_offset);
 #ifdef PMAP_CACHE_VIVT
-       cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
-#endif
+               cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);



Home | Main Index | Thread Index | Old Index