Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm/arm32 Perform tracking of unmanaged mappings fo...



details:   https://anonhg.NetBSD.org/src/rev/6d584410bd75
branches:  trunk
changeset: 826411:6d584410bd75
user:      skrll <skrll%NetBSD.org@localhost>
date:      Sat Sep 02 12:24:39 2017 +0000

description:
Perform tracking of unmanaged mappings for VIVT and call vac_me_harder
as appropriate.

PR/52102 shark: ffs_newvnode panic when unpacking sets installing -current

Thanks to Felix Deichmann for bisecting the problem and testing the fix.

diffstat:

 sys/arch/arm/arm32/pmap.c |  49 ++++++++++++++++++++++++++++------------------
 1 files changed, 30 insertions(+), 19 deletions(-)

diffs (149 lines):

diff -r e4c3fd3c2de9 -r 6d584410bd75 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sat Sep 02 11:57:09 2017 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sat Sep 02 12:24:39 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.355 2017/09/02 11:57:09 skrll Exp $ */
+/*     $NetBSD: pmap.c,v 1.356 2017/09/02 12:24:39 skrll Exp $ */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -217,7 +217,7 @@
 
 #include <arm/locore.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.355 2017/09/02 11:57:09 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.356 2017/09/02 12:24:39 skrll Exp $");
 
 //#define PMAP_DEBUG
 #ifdef PMAP_DEBUG
@@ -3586,7 +3586,7 @@
        pmap_release_pmap_lock(pm);
 }
 
-#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
+#if !defined(ARM_MMU_EXTENDED)
 static struct pv_entry *
 pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
 {
@@ -3594,7 +3594,9 @@
        paddr_t pa = VM_PAGE_TO_PHYS(pg);
        struct pv_entry *pv;
 
+#ifdef PMAP_CACHE_VIPT
        KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
+#endif
        KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
        KASSERT(pmap_page_locked_p(md));
 
@@ -3612,16 +3614,18 @@
                if (SLIST_EMPTY(&md->pvh_list)) {
                        md->pvh_attrs &= ~PVF_EXEC;
                        PMAPCOUNT(exec_discarded_kremove);
+#ifdef PMAP_CACHE_VIPT
                } else {
                        pmap_syncicache_page(md, pa);
                        PMAPCOUNT(exec_synced_kremove);
+#endif
                }
        }
        pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
 
        return pv;
 }
-#endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
+#endif /* !ARM_MMU_EXTENDED */
 
 /*
  * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
@@ -3633,16 +3637,11 @@
 void
 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 {
-#ifdef PMAP_CACHE_VIVT
-       struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
-#endif
-#ifdef PMAP_CACHE_VIPT
        struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
        struct vm_page *opg;
 #ifndef ARM_MMU_EXTENDED
        struct pv_entry *pv = NULL;
 #endif
-#endif
        struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
 
        UVMHIST_FUNC(__func__);
@@ -3676,12 +3675,13 @@
                l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE;
        } else {
                PMAPCOUNT(kenter_remappings);
+               opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
+               struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg);
+               if (opg
 #ifdef PMAP_CACHE_VIPT
-               opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
-#if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC)
-               struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg);
-#endif
-               if (opg && arm_cache_prefer_mask != 0) {
+                   && arm_cache_prefer_mask != 0
+#endif
+                   && true) {
                        KASSERT(opg != pg);
                        KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
                        KASSERT((flags & PMAP_KMPAGE) == 0);
@@ -3691,7 +3691,6 @@
                        pmap_release_page_lock(omd);
 #endif
                }
-#endif
                if (l2pte_valid_p(opte)) {
                        l2pte_reset(ptep);
                        PTE_SYNC(ptep);
@@ -3750,8 +3749,14 @@
                        md->pvh_attrs |= PVF_KMPAGE;
 #endif
                        atomic_inc_32(&pmap_kmpages);
+               } else if (false
 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
-               } else if (arm_cache_prefer_mask != 0) {
+                   || arm_cache_prefer_mask != 0
+#elif defined(PMAP_CACHE_VIVT)
+                   || true
+#endif
+                   || false) {
+#if !defined(ARM_MMU_EXTENDED)
                        if (pv == NULL) {
                                pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
                                KASSERT(pv != NULL);
@@ -3768,13 +3773,13 @@
                        pmap_release_page_lock(md);
 #endif
                }
-#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
+#if !defined(ARM_MMU_EXTENDED)
        } else {
                if (pv != NULL)
                        pool_put(&pmap_pv_pool, pv);
 #endif
        }
-#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
+#if !defined(ARM_MMU_EXTENDED)
        KASSERT(md == NULL || !pmap_page_locked_p(md));
 #endif
        if (pmap_initialized) {
@@ -3832,8 +3837,14 @@
                                        }
 #endif
                                        atomic_dec_32(&pmap_kmpages);
+                               } else if (false
 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
-                               } else if (arm_cache_prefer_mask != 0) {
+                                   || arm_cache_prefer_mask != 0
+#elif defined(PMAP_CACHE_VIVT)
+                                   || true
+#endif
+                                   || false) {
+#if !defined(ARM_MMU_EXTENDED)
                                        pmap_acquire_page_lock(omd);
                                        pool_put(&pmap_pv_pool,
                                            pmap_kremove_pg(opg, va));



Home | Main Index | Thread Index | Old Index