NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kern/40027 (pagedaemon loops on memory shortage)
The following reply was made to PR kern/40027; it has been noted by GNATS.
From: Andrew Doran <ad%NetBSD.org@localhost>
To: gnats-bugs%netbsd.org@localhost
Cc:
Subject: Re: kern/40027 (pagedaemon loops on memory shortage)
Date: Wed, 3 Dec 2008 14:04:57 +0000
With this patch against -current and the test program, a dual CPU machine
with 128MB RAM stalls during heavy paging activity, but eventually the
memory hog process is killed and it recovers.
Memory: 3728K Act, 2156K Inact, 500K Wired, 4484K Exec, 124K File, 95M Free
Andrew
Index: uvm_km.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
retrieving revision 1.102
diff -u -p -r1.102 uvm_km.c
--- uvm_km.c 1 Dec 2008 10:54:57 -0000 1.102
+++ uvm_km.c 3 Dec 2008 13:53:34 -0000
@@ -589,7 +589,9 @@ uvm_km_alloc(struct vm_map *map, vsize_t
loopva = kva;
loopsize = size;
- pgaflags = UVM_PGA_USERESERVE;
+ pgaflags = 0;
+ if (flags & UVM_KMF_NOWAIT)
+ pgaflags |= UVM_PGA_USERESERVE;
if (flags & UVM_KMF_ZERO)
pgaflags |= UVM_PGA_ZERO;
prot = VM_PROT_READ | VM_PROT_WRITE;
@@ -698,7 +700,7 @@ uvm_km_alloc_poolpage_cache(struct vm_ma
return 0;
KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
again:
- pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+ pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
if (__predict_false(pg == NULL)) {
if (waitok) {
uvm_wait("plpg");
@@ -724,7 +726,7 @@ uvm_km_alloc_poolpage(struct vm_map *map
vaddr_t va;
again:
- pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+ pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
if (__predict_false(pg == NULL)) {
if (waitok) {
uvm_wait("plpg");
Index: uvm_map.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
retrieving revision 1.264
diff -u -p -r1.264 uvm_map.c
--- uvm_map.c 1 Dec 2008 10:54:57 -0000 1.264
+++ uvm_map.c 3 Dec 2008 13:53:35 -0000
@@ -4616,7 +4616,8 @@ again:
* for simplicity, always allocate one page chunk of them at once.
*/
- pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ pg = uvm_pagealloc(NULL, 0, NULL,
+ (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
if (__predict_false(pg == NULL)) {
if (flags & UVM_FLAG_NOWAIT)
return NULL;
Index: uvm_page.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v
retrieving revision 1.140
diff -u -p -r1.140 uvm_page.c
--- uvm_page.c 4 Jul 2008 10:56:59 -0000 1.140
+++ uvm_page.c 3 Dec 2008 13:53:35 -0000
@@ -1072,6 +1072,7 @@ uvm_pagealloc_strat(struct uvm_object *o
struct uvm_cpu *ucpu;
struct vm_page *pg;
bool use_reserve;
+ lwp_t *l;
KASSERT(obj == NULL || anon == NULL);
KASSERT(anon == NULL || off == 0);
@@ -1079,6 +1080,15 @@ uvm_pagealloc_strat(struct uvm_object *o
KASSERT(obj == NULL || mutex_owned(&obj->vmobjlock));
KASSERT(anon == NULL || mutex_owned(&anon->an_lock));
+ /*
+ * make kernel reserve pages available if called by a kernel
+ * thread or a realtime thread.
+ */
+ l = curlwp;
+ if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
+ flags |= UVM_PGA_USERESERVE;
+ }
+
mutex_spin_enter(&uvm_fpageqlock);
/*
Index: uvm_pdaemon.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v
retrieving revision 1.96
diff -u -p -r1.96 uvm_pdaemon.c
--- uvm_pdaemon.c 3 Dec 2008 11:43:51 -0000 1.96
+++ uvm_pdaemon.c 3 Dec 2008 13:53:35 -0000
@@ -294,20 +294,20 @@ uvm_pageout(void *arg)
needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
needsscan = needsfree || uvmpdpol_needsscan_p();
- mutex_spin_exit(&uvm_fpageqlock);
/*
* scan if needed
*/
- if (needsscan)
+ if (needsscan) {
+ mutex_spin_exit(&uvm_fpageqlock);
uvmpd_scan();
+ mutex_spin_enter(&uvm_fpageqlock);
+ }
/*
* if there's any free memory to be had,
* wake up any waiters.
*/
-
- mutex_spin_enter(&uvm_fpageqlock);
if (uvmexp.free > uvmexp.reserve_kernel ||
uvmexp.paging == 0) {
wakeup(&uvmexp.free);
@@ -870,7 +870,9 @@ uvmpd_scan_queue(void)
if (swapcluster_allocslots(&swc)) {
mutex_exit(slock);
+#if 0
dirtyreacts++; /* XXX */
+#endif
continue;
}
Home |
Main Index |
Thread Index |
Old Index