Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-6-0]: src/sys/arch/mips/mips Pull up following revision(s) (reque...



details:   https://anonhg.NetBSD.org/src/rev/4eda40b7d416
branches:  netbsd-6-0
changeset: 775279:4eda40b7d416
user:      snj <snj%NetBSD.org@localhost>
date:      Wed Nov 08 21:17:46 2017 +0000

description:
Pull up following revision(s) (requested by skrll in ticket #1056):
        sys/arch/mips/mips/pmap.c: revision 1.210-1.213
        sys/arch/mips/mips/vm_machdep.c: revision 1.143
Fix a logic inversion introduced with the matt-nb5-mips64 for
pmap_{zero,copy}_page cache alias handing. The check previously used
PG_MD_UNCACHED_P, where it now uses PG_MD_CACHED_P, when considering if
a cache invalidation is required.
Additionally flush the cache for the uarea va to avoid potential (future)
cache aliases in cpu_uarea_free when handing pages back to uvm for later
use.
ok matt@
Hopefully this addresses the instability reported in the following PRs:
PR/44900 - R5000/Rm5200 mips ports are broken
PR/46170 - NetBSD/cobalt 6.0_BETA does not boot
PR/46890 - upcoming NetBSD 6.0 release is very unstable / unusable on cobalt qube 2
PR/48628 - cobalt and hpcmips ports are dead
Grab pv_list lock in pmap_unmap_ephemeral_page only when needed.
Make PARANOIADIAG compile.
Use pmap_tlb_asid_check to reduce code c&p.

diffstat:

 sys/arch/mips/mips/pmap.c       |  57 +++++++++++++++-------------------------
 sys/arch/mips/mips/vm_machdep.c |   9 +++++-
 2 files changed, 29 insertions(+), 37 deletions(-)

diffs (151 lines):

diff -r 29cff975cc26 -r 4eda40b7d416 sys/arch/mips/mips/pmap.c
--- a/sys/arch/mips/mips/pmap.c Sun Nov 05 20:34:36 2017 +0000
+++ b/sys/arch/mips/mips/pmap.c Wed Nov 08 21:17:46 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.207.2.1 2012/07/05 18:39:42 riz Exp $       */
+/*     $NetBSD: pmap.c,v 1.207.2.1.4.1 2017/11/08 21:17:46 snj Exp $   */
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.207.2.1 2012/07/05 18:39:42 riz Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.207.2.1.4.1 2017/11/08 21:17:46 snj Exp $");
 
 /*
  *     Manages physical address maps.
@@ -453,19 +453,21 @@
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        pv_entry_t pv = &md->pvh_first;
        
-       (void)PG_MD_PVLIST_LOCK(md, false);
-       if (MIPS_CACHE_VIRTUAL_ALIAS
-           && (PG_MD_UNCACHED_P(md)
-               || (pv->pv_pmap != NULL
-                   && mips_cache_badalias(pv->pv_va, va)))) {
-               /*
-                * If this page was previously uncached or we had to use an
-                * incompatible alias and it has a valid mapping, flush it
-                * from the cache.
-                */
-               mips_dcache_wbinv_range(va, PAGE_SIZE);
+       if (MIPS_CACHE_VIRTUAL_ALIAS) {
+               (void)PG_MD_PVLIST_LOCK(md, false);
+               if (PG_MD_CACHED_P(md)
+                   || (pv->pv_pmap != NULL
+                       && mips_cache_badalias(pv->pv_va, va))) {
+
+                       /*
+                        * If this page was previously cached or we had to use an
+                        * incompatible alias and it has a valid mapping, flush it
+                        * from the cache.
+                        */
+                       mips_dcache_wbinv_range(va, PAGE_SIZE);
+               }
+               PG_MD_PVLIST_UNLOCK(md);
        }
-       PG_MD_PVLIST_UNLOCK(md);
 #ifndef _LP64
        /*
         * If we had to map using a page table entry, unmap it now.
@@ -575,7 +577,7 @@
 
        /*
         * Now actually allocate the kernel PTE array (must be done
-        * after virtual_end is initialized).
+        * after mips_virtual_end is initialized).
         */
        Sysmap = (pt_entry_t *)
            uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize);
@@ -1023,15 +1025,7 @@
        if (eva > VM_MAXUSER_ADDRESS)
                panic("pmap_remove: uva not in range");
        if (PMAP_IS_ACTIVE(pmap)) {
-               struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
-               uint32_t asid;
-
-               __asm volatile("mfc0 %0,$10; nop" : "=r"(asid));
-               asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
-               if (asid != pai->pai_asid) {
-                       panic("inconsistency for active TLB flush: %d <-> %d",
-                           asid, pai->pai_asid);
-               }
+               pmap_tlb_asid_check();
        }
 #endif
 #ifdef PMAP_FAULTINFO
@@ -1214,15 +1208,7 @@
        if (eva > VM_MAXUSER_ADDRESS)
                panic("pmap_protect: uva not in range");
        if (PMAP_IS_ACTIVE(pmap)) {
-               struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
-               uint32_t asid;
-
-               __asm volatile("mfc0 %0,$10; nop" : "=r"(asid));
-               asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
-               if (asid != pai->pai_asid) {
-                       panic("inconsistency for active TLB update: %d <-> %d",
-                           asid, pai->pai_asid);
-               }
+               pmap_tlb_asid_check();
        }
 #endif
 
@@ -1586,6 +1572,7 @@
 
 #ifdef PARANOIADIAG
        if (PMAP_IS_ACTIVE(pmap)) {
+               struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
                uint32_t asid;
 
                __asm volatile("mfc0 %0,$10; nop" : "=r"(asid));
@@ -1774,7 +1761,7 @@
        if (pmap == pmap_kernel()) {
                /* change entries in kernel pmap */
 #ifdef PARANOIADIAG
-               if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
+               if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end)
                        panic("pmap_unwire");
 #endif
                pte = kvtopte(va);
@@ -2088,7 +2075,7 @@
 pmap_check_pvlist(struct vm_page_md *md)
 {
 #ifdef PARANOIADIAG
-       pt_entry_t pv = &md->pvh_first;
+       pv_entry_t pv = &md->pvh_first;
        if (pv->pv_pmap != NULL) {
                for (; pv != NULL; pv = pv->pv_next) {
                        KASSERT(!MIPS_KSEG0_P(pv->pv_va));
diff -r 29cff975cc26 -r 4eda40b7d416 sys/arch/mips/mips/vm_machdep.c
--- a/sys/arch/mips/mips/vm_machdep.c   Sun Nov 05 20:34:36 2017 +0000
+++ b/sys/arch/mips/mips/vm_machdep.c   Wed Nov 08 21:17:46 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vm_machdep.c,v 1.141 2011/09/27 01:02:34 jym Exp $     */
+/*     $NetBSD: vm_machdep.c,v 1.141.12.1 2017/11/08 21:17:46 snj Exp $        */
 
 /*
  * Copyright (c) 1988 University of Utah.
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.141 2011/09/27 01:02:34 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.141.12.1 2017/11/08 21:17:46 snj Exp $");
 
 #include "opt_ddb.h"
 #include "opt_coredump.h"
@@ -244,6 +244,11 @@
        paddr_t pa = MIPS_KSEG0_TO_PHYS(va);
 #endif
 
+#ifdef MIPS3_PLUS
+       if (MIPS_CACHE_VIRTUAL_ALIAS)
+               mips_dcache_inv_range((vaddr_t)va, USPACE);
+#endif
+
        for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
                struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
                KASSERT(pg != NULL);



Home | Main Index | Thread Index | Old Index