Subject: Re: current DEBUG, DIAGNOSTIC, LOCKDEBUG kernel won't boot
To: None <tech-kern@NetBSD.org>
From: Martin Husemann <martin@duskware.de>
List: tech-kern
Date: 12/12/2006 21:50:40
--qDbXVdCdHGoSgWSk
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
After looking at it a bit closer, there were a few problems.
The attached patch makes it boot to multiuser. Can someone please review
it?
Thanks,
Martin
--qDbXVdCdHGoSgWSk
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename=patch
Index: pmap_motorola.c
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/m68k/pmap_motorola.c,v
retrieving revision 1.21
diff -c -u -p -r1.21 pmap_motorola.c
--- pmap_motorola.c 16 Sep 2006 17:31:13 -0000 1.21
+++ pmap_motorola.c 12 Dec 2006 20:45:11 -0000
@@ -309,7 +309,7 @@ void pmap_remove_mapping(pmap_t, vaddr_t
void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
boolean_t pmap_testbit(paddr_t, int);
boolean_t pmap_changebit(paddr_t, int, int);
-void pmap_enter_ptpage(pmap_t, vaddr_t);
+boolean_t pmap_enter_ptpage(pmap_t, vaddr_t, boolean_t);
void pmap_ptpage_addref(vaddr_t);
int pmap_ptpage_delref(vaddr_t);
void pmap_collect1(pmap_t, paddr_t, paddr_t);
@@ -1144,6 +1144,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
boolean_t cacheable = TRUE;
boolean_t checkpv = TRUE;
boolean_t wired = (flags & PMAP_WIRED) != 0;
+ boolean_t can_fail = (flags & PMAP_CANFAIL) != 0;
PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
("pmap_enter(%p, %lx, %lx, %x, %x)\n",
@@ -1161,16 +1162,24 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
/*
* For user mapping, allocate kernel VM resources if necessary.
*/
- if (pmap->pm_ptab == NULL)
+ if (pmap->pm_ptab == NULL) {
pmap->pm_ptab = (pt_entry_t *)
uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
- UVM_KMF_VAONLY | UVM_KMF_WAITVA);
+ UVM_KMF_VAONLY |
+ (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
+ if (pmap->pm_ptab == NULL)
+ return ENOMEM;
+ }
/*
* Segment table entry not valid, we need a new PT page
*/
- if (!pmap_ste_v(pmap, va))
- pmap_enter_ptpage(pmap, va);
+ if (!pmap_ste_v(pmap, va)) {
+ int err = pmap_enter_ptpage(pmap, va, can_fail);
+ if (err)
+ return err;
+ }
+
pa = m68k_trunc_page(pa);
pte = pmap_pte(pmap, va);
@@ -1446,7 +1455,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
if (!pmap_ste_v(pmap, va)) {
s = splvm();
- pmap_enter_ptpage(pmap, va);
+ pmap_enter_ptpage(pmap, va, FALSE);
splx(s);
}
@@ -2220,7 +2229,9 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
#endif
pmap_remove_mapping(pmap_kernel(), ptpva,
NULL, PRM_TFLUSH|PRM_CFLUSH);
+ simple_lock(&uvm.kernel_object->vmobjlock);
uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
+ simple_unlock(&uvm.kernel_object->vmobjlock);
PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
("remove: PT page 0x%lx (0x%lx) freed\n",
ptpva, ptppa));
@@ -2532,8 +2543,8 @@ pmap_changebit(paddr_t pa, int set, int
* Allocate and map a PT page for the specified pmap/va pair.
*/
/* static */
-void
-pmap_enter_ptpage(pmap_t pmap, vaddr_t va)
+int
+pmap_enter_ptpage(pmap_t pmap, vaddr_t va, boolean_t can_fail)
{
paddr_t ptpa;
struct vm_page *pg;
@@ -2554,7 +2565,12 @@ pmap_enter_ptpage(pmap_t pmap, vaddr_t v
if (pmap->pm_stab == Segtabzero) {
pmap->pm_stab = (st_entry_t *)
uvm_km_alloc(st_map, M68K_STSIZE, 0,
- UVM_KMF_WIRED | UVM_KMF_ZERO);
+ UVM_KMF_WIRED | UVM_KMF_ZERO |
+ (can_fail ? UVM_KMF_NOWAIT : 0));
+ if (pmap->pm_stab == NULL) {
+ pmap->pm_stab = Segtabzero;
+ return ENOMEM;
+ }
(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040) || defined(M68060)
@@ -2675,11 +2691,15 @@ pmap_enter_ptpage(pmap_t pmap, vaddr_t v
pmap->pm_sref++;
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
("enter: about to alloc UPT pg at %lx\n", va));
+ simple_lock(&uvm.kernel_object->vmobjlock);
while ((pg = uvm_pagealloc(uvm.kernel_object,
va - vm_map_min(kernel_map),
NULL, UVM_PGA_ZERO)) == NULL) {
+ simple_unlock(&uvm.kernel_object->vmobjlock);
uvm_wait("ptpage");
+ simple_lock(&uvm.kernel_object->vmobjlock);
}
+ simple_unlock(&uvm.kernel_object->vmobjlock);
pg->flags &= ~(PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
ptpa = VM_PAGE_TO_PHYS(pg);
@@ -2773,6 +2793,8 @@ pmap_enter_ptpage(pmap_t pmap, vaddr_t v
TBIAU();
pmap->pm_ptpages++;
splx(s);
+
+ return 0;
}
/*
--qDbXVdCdHGoSgWSk--