NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: port-sh3/56381: Infinite loop in pmap_page_protect()
The following reply was made to PR port-sh3/56381; it has been noted by GNATS.
From: Martin Husemann <martin%duskware.de@localhost>
To: gnats-bugs%netbsd.org@localhost
Cc:
Subject: Re: port-sh3/56381: Infinite loop in pmap_page_protect()
Date: Mon, 30 Aug 2021 12:22:59 +0200
It seems I have run into this too and added instrumentation (see below),
but then never got that to fire again.
Martin
Index: pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/sh3/pmap.c,v
retrieving revision 1.85
diff -u -p -r1.85 pmap.c
--- pmap.c 26 Jul 2021 21:43:11 -0000 1.85
+++ pmap.c 30 Aug 2021 10:20:59 -0000
@@ -325,6 +325,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
pt_entry_t entry, *pte;
bool kva = (pmap == pmap_kernel());
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist, "pmap %#jx va %#jx pa %#jx flags %#jx",
+ (uintptr_t)pmap, (uintptr_t)va, (uintptr_t)pa, (uintptr_t)flags);
+
/* "flags" never exceed "prot" */
KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
@@ -345,6 +349,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
entry |= PG_V;
pvh->pvh_flags |= PVH_REFERENCED;
}
+ UVMHIST_LOG(maphist, "added PVH_REFERENCED pg: %#jx pvh_flags %#jx",
+ (uintptr_t)pg, (uintptr_t)pvh->pvh_flags, 0, 0);
/* Protection */
if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
@@ -391,6 +397,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
}
}
+ KDASSERT(entry != 0);
*pte = entry;
if (pmap->pm_asid != -1)
@@ -441,6 +448,7 @@ __pmap_map_change(pmap_t pmap, vaddr_t v
if (oentry & _PG_WIRED) {
if (!(entry & _PG_WIRED)) {
/* wired -> unwired */
+ KDASSERT(entry != 0);
*pte = entry;
/* "wired" is software bits. no need to update TLB */
pmap->pm_stats.wired_count--;
@@ -500,7 +508,10 @@ __pmap_pv_enter(pmap_t pmap, struct vm_p
}
void
-pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
+pmap_remove_int(pmap_t pmap, vaddr_t sva, vaddr_t eva, bool must_exist);
+
+void
+pmap_remove_int(pmap_t pmap, vaddr_t sva, vaddr_t eva, bool must_exist)
{
struct vm_page *pg;
pt_entry_t *pte, entry;
@@ -509,11 +520,19 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
KDASSERT((sva & PGOFSET) == 0);
for (va = sva; va < eva; va += PAGE_SIZE) {
- if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
- (entry = *pte) == 0)
+ pte = __pmap_pte_lookup(pmap, va);
+ KDASSERTMSG(!must_exist || pte != NULL, "va: %lx", va);
+ if (pte == NULL)
continue;
- if ((pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL)
+ entry = *pte;
+ KDASSERTMSG(!must_exist || entry != 0, "va: %lx", va);
+ if (entry == 0)
+ continue;
+
+ pg = PHYS_TO_VM_PAGE(entry & PG_PPN);
+ KDASSERTMSG(!must_exist || pg != NULL, "va: %lx", va);
+ if (pg != NULL)
__pmap_pv_remove(pmap, pg, va);
if (entry & _PG_WIRED)
@@ -530,6 +549,12 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
}
}
+void
+pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
+{
+ pmap_remove_int(pmap, sva, eva, false);
+}
+
/*
* void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
* Remove physical-virtual map from vm_page.
@@ -564,7 +589,9 @@ __pmap_pv_remove(pmap_t pmap, struct vm_
#ifdef DEBUG
/* Check duplicated map. */
SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
- KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
+ KDASSERTMSG(!(pv->pv_pmap == pmap && pv->pv_va == vaddr),
+ "pmap: %p pv->pv_pmap: %p, pv->pv_va: %lx, vaddr: %lx\n",
+ pmap, pv->pv_pmap, pv->pv_va, vaddr);
#endif
splx(s);
}
@@ -589,6 +616,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
pte = __pmap_kpte_lookup(va);
KDASSERT(*pte == 0);
+ KDASSERT(entry != 0);
*pte = entry;
sh_tlb_update(0, va, entry);
@@ -684,6 +712,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v
}
entry = (entry & ~PG_PR_MASK) | protbits;
+ KDASSERT(entry != 0);
*pte = entry;
if (pmap->pm_asid != -1)
@@ -725,7 +754,7 @@ pmap_page_protect(struct vm_page *pg, vm
s = splvm();
while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
va = pv->pv_va;
- pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
+ pmap_remove_int(pv->pv_pmap, va, va + PAGE_SIZE, true);
}
splx(s);
}
@@ -801,6 +830,8 @@ pmap_clear_reference(struct vm_page *pg)
pmap_t pmap;
vaddr_t va;
int s;
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist, "pg: %#jx", (uintptr_t)pg, 0, 0, 0);
if ((pvh->pvh_flags & PVH_REFERENCED) == 0)
return (false);
@@ -1016,6 +1047,9 @@ __pmap_pte_load(pmap_t pmap, vaddr_t va,
struct vm_page *pg;
pt_entry_t *pte;
pt_entry_t entry;
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist, "pmap: %#jx va: %#jx flags: %#jx",
+ (uintptr_t)pmap, (uintptr_t)va, (uintptr_t)flags, 0);
KDASSERT((((int)va < 0) && (pmap == pmap_kernel())) ||
(((int)va >= 0) && (pmap != pmap_kernel())));
@@ -1034,6 +1068,7 @@ __pmap_pte_load(pmap_t pmap, vaddr_t va,
if (flags & PVH_REFERENCED) {
pvh->pvh_flags |= PVH_REFERENCED;
entry |= PG_V;
+ UVMHIST_LOG(maphist, "cleared PVH_REFERENCED",0,0,0,0);
}
if (flags & PVH_MODIFIED) {
pvh->pvh_flags |= PVH_MODIFIED;
Home |
Main Index |
Thread Index |
Old Index