Current-Users archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: diagnostic assertion "uvm_page_owner_locked_p(new_pg, false)" failed



On Thu, Feb 27, 2020 at 07:49:53AM +0100, Thomas Klausner wrote:
> Last night I had a repeatable panic on 9.99.48/amd64/20200226. The
> first time the machine was running a bulk build and network traffic,
> then I started X and it paniced.
> 
> I thought I had bad luck with the pbulk and timing, rebooted and
> started X directly after the reboot, and got the same panic again.
> 
> panic: kernel diagnostic assertion "uvm_page_owner_locked_p(new_pg, false)" failed: file "/usr/src/sys/arch/x86/x86/pmap.c", line 4319

ad's commit fixed it for me.

Thanks, Andrew!
 Thomas
--- Begin Message ---
Module Name:	src
Committed By:	ad
Date:		Sat Feb 29 20:17:11 UTC 2020

Modified Files:
	src/sys/arch/x86/x86: pmap.c

Log Message:
PR kern/55033: kernel panics when starting X

Remove the uvm_page_owner_locked_p() assertions in the x86 pmap.  The DRM
code doesn't follow the locking protocol (it's OK though, since pages aren't
changing identity) and having thought about it more we're most likely going
to have to do full PV locking to make progress on concurrent fault handing,
ergo assertions not so important.


To generate a diff of this commit:
cvs rdiff -u -r1.359 -r1.360 src/sys/arch/x86/x86/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.359 src/sys/arch/x86/x86/pmap.c:1.360
--- src/sys/arch/x86/x86/pmap.c:1.359	Sun Feb 23 22:28:53 2020
+++ src/sys/arch/x86/x86/pmap.c	Sat Feb 29 20:17:11 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.359 2020/02/23 22:28:53 ad Exp $	*/
+/*	$NetBSD: pmap.c,v 1.360 2020/02/29 20:17:11 ad Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.359 2020/02/23 22:28:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.360 2020/02/29 20:17:11 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -230,7 +230,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.3
  * - pg->uobject->vmobjlock, pg->uanon->an_lock
  *   These per-object locks are taken by the VM system before calling into
  *   the pmap module.  Holding them prevents concurrent operations on the
- *   given page or set of pages.  Asserted with uvm_page_owner_locked_p().
+ *   given page or set of pages.
  *
  * - pmap->pm_lock (per pmap)
  *   This lock protects the fields in the pmap structure including the
@@ -3588,7 +3588,6 @@ pmap_remove_pte(struct pmap *pmap, struc
 	}
 
 	if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
-		KASSERT(uvm_page_owner_locked_p(pg, false));
 		pp = VM_PAGE_TO_PP(pg);
 	} else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
 		paddr_t pa = pmap_pte2pa(opte);
@@ -3921,9 +3920,6 @@ pmap_page_remove(struct vm_page *pg)
 	struct pmap_page *pp;
 	paddr_t pa;
 
-	/* Need an exclusive lock to prevent PV list changing behind us. */
-	KASSERT(uvm_page_owner_locked_p(pg, true));
-
 	pp = VM_PAGE_TO_PP(pg);
 	pa = VM_PAGE_TO_PHYS(pg);
 	pmap_pp_remove(pp, pa);
@@ -3963,9 +3959,6 @@ pmap_test_attrs(struct vm_page *pg, unsi
 	u_int result;
 	paddr_t pa;
 
-	/* Need an exclusive lock to prevent PV list changing behind us. */
-	KASSERT(uvm_page_owner_locked_p(pg, true));
-
 	pp = VM_PAGE_TO_PP(pg);
 	if ((pp->pp_attrs & testbits) != 0) {
 		return true;
@@ -4037,9 +4030,6 @@ pmap_clear_attrs(struct vm_page *pg, uns
 	struct pmap_page *pp;
 	paddr_t pa;
 
-	/* Need an exclusive lock to prevent PV list changing behind us. */
-	KASSERT(uvm_page_owner_locked_p(pg, true));
-
 	pp = VM_PAGE_TO_PP(pg);
 	pa = VM_PAGE_TO_PHYS(pg);
 
@@ -4316,7 +4306,6 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t
 		/* This is a managed page */
 		npte |= PTE_PVLIST;
 		new_pp = VM_PAGE_TO_PP(new_pg);
-		KASSERT(uvm_page_owner_locked_p(new_pg, false));
 	} else if ((new_pp = pmap_pv_tracked(pa)) != NULL) {
 		/* This is an unmanaged pv-tracked page */
 		npte |= PTE_PVLIST;
@@ -4448,7 +4437,6 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t
 	 */
 	if ((~opte & (PTE_P | PTE_PVLIST)) == 0) {
 		if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
-			KASSERT(uvm_page_owner_locked_p(old_pg, false));
 			old_pp = VM_PAGE_TO_PP(old_pg);
 		} else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
 			panic("%s: PTE_PVLIST with pv-untracked page"
@@ -5212,7 +5200,6 @@ pmap_ept_enter(struct pmap *pmap, vaddr_
 		/* This is a managed page */
 		npte |= EPT_PVLIST;
 		new_pp = VM_PAGE_TO_PP(new_pg);
-		KASSERT(uvm_page_owner_locked_p(new_pg, false));
 	} else if ((new_pp = pmap_pv_tracked(pa)) != NULL) {
 		/* This is an unmanaged pv-tracked page */
 		npte |= EPT_PVLIST;
@@ -5329,7 +5316,6 @@ pmap_ept_enter(struct pmap *pmap, vaddr_
 	 */
 	if ((~opte & (EPT_R | EPT_PVLIST)) == 0) {
 		if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
-			KASSERT(uvm_page_owner_locked_p(old_pg, false));
 			old_pp = VM_PAGE_TO_PP(old_pg);
 		} else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
 			panic("%s: EPT_PVLIST with pv-untracked page"
@@ -5495,7 +5481,6 @@ pmap_ept_remove_pte(struct pmap *pmap, s
 	}
 
 	if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
-		KASSERT(uvm_page_owner_locked_p(pg, false));
 		pp = VM_PAGE_TO_PP(pg);
 	} else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
 		paddr_t pa = pmap_pte2pa(opte);


--- End Message ---


Home | Main Index | Thread Index | Old Index