NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kern/56764
The following reply was made to PR kern/56764; it has been noted by GNATS.
From: Thomas Klausner <wiz%gatalith.at@localhost>
To: NetBSD bugtracking <gnats-bugs%netbsd.org@localhost>
Cc:
Subject: Re: kern/56764
Date: Sat, 21 Feb 2026 16:49:59 +0100
--ykk54ve36zcfxrfd
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
Hi!
I'm trying to find causes why I get
kernel diagnostic assertion "uvmexp.swpgonly < uvmexp.swpginuse" failed
quite regularly.
void uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool
write, int error) in uvm_pager.c v1.131 ends with this:
if (!swap) {
uvm_page_unbusy(pgs, npages);
rw_exit(slock);
} else {
#if defined(VMSWAP)
KASSERT(write);
/* these pages are now only in swap. */
if (error != ENOMEM) {
atomic_add_int(&uvmexp.swpgonly, npages);
}
if (error) {
if (error != ENOMEM)
uvm_swap_markbad(swslot, npages);
else
uvm_swap_free(swslot, npages);
}
atomic_dec_uint(&uvmexp.pdpending);
#endif /* defined(VMSWAP) */
}
In GENERIC on my system, VMSWAP is 1.
So, if 'swap != 0' and 'error != ENOMEM', uvmexp.swpgonly is increased
by npages. In case error is not 0 and not ENOMEM, it is then decreased
again in uvm_swap_markbad().
In uvm_swap_markbad() the change is protected by
mutex_enter(&uvm_swap_data_lock), but here in uvm_aio_aiodone_pages()
it isn't.
1. Should this be protected by mutex_enter(&uvm_swap_data_lock) in
uvm_aio_aiodone_pages()?
2. How is it ensured that uvmexp.swpgonly < uvmexp.swpginuse here when
error = 0? uvm_aio_aiodone_pages() does not increase swpginuse.
I see a couple other places where changing swpgonly is not protected
by the mutex.
The other suspect are the two places where swpginuse is reduced,
neither of them compares against swpgonly.
I'll try the attached diff (I've had to make the lock available in
uvm_swap.h for the three places outside of uvm_swap.c that modify
swpgonly.)
Thomas
--ykk54ve36zcfxrfd
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename="uvm.diff"
Index: uvm_km.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
retrieving revision 1.166
diff -u -r1.166 uvm_km.c
--- uvm_km.c 7 Dec 2024 23:19:07 -0000 1.166
+++ uvm_km.c 21 Feb 2026 15:32:36 -0000
@@ -491,8 +491,10 @@
rw_exit(uobj->vmobjlock);
if (swpgonlydelta > 0) {
+ mutex_enter(&uvm_swap_data_lock);
KASSERT(uvmexp.swpgonly >= swpgonlydelta);
atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
+ mutex_exit(&uvm_swap_data_lock);
}
}
Index: uvm_pager.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pager.c,v
retrieving revision 1.131
diff -u -r1.131 uvm_pager.c
--- uvm_pager.c 15 Mar 2024 07:09:37 -0000 1.131
+++ uvm_pager.c 21 Feb 2026 15:32:36 -0000
@@ -483,7 +483,12 @@
/* these pages are now only in swap. */
if (error != ENOMEM) {
+ mutex_enter(&uvm_swap_data_lock);
+ KASSERTMSG(uvmexp.swpginuse >= uvmexp.swpgonly + npages,
+ "swpginuse %d swpgonly %d npages %d",
+ uvmexp.swpginuse, uvmexp.swpgonly, npages);
atomic_add_int(&uvmexp.swpgonly, npages);
+ mutex_enter(&uvm_swap_data_lock);
}
if (error) {
if (error != ENOMEM)
Index: uvm_pdaemon.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v
retrieving revision 1.135
diff -u -r1.135 uvm_pdaemon.c
--- uvm_pdaemon.c 4 Jan 2026 00:41:14 -0000 1.135
+++ uvm_pdaemon.c 21 Feb 2026 15:32:37 -0000
@@ -792,10 +792,12 @@
}
if (slot > 0) {
/* this page is now only in swap. */
+ mutex_enter(&uvm_swap_data_lock);
KASSERTMSG(uvmexp.swpgonly < uvmexp.swpginuse,
"swpgonly %d swpginuse %d",
uvmexp.swpgonly, uvmexp.swpginuse);
atomic_inc_uint(&uvmexp.swpgonly);
+ mutex_exit(&uvm_swap_data_lock);
}
rw_exit(slock);
continue;
Index: uvm_swap.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_swap.c,v
retrieving revision 1.215
diff -u -r1.215 uvm_swap.c
--- uvm_swap.c 13 Feb 2026 19:16:41 -0000 1.215
+++ uvm_swap.c 21 Feb 2026 15:32:37 -0000
@@ -201,7 +201,7 @@
static struct swap_priority swap_priority;
/* locks */
-static kmutex_t uvm_swap_data_lock __cacheline_aligned;
+kmutex_t uvm_swap_data_lock __cacheline_aligned;
static krwlock_t swap_syscall_lock;
bool uvm_swap_init_done = false;
@@ -1246,6 +1246,9 @@
KASSERTMSG(uvmexp.swpginuse >= sdp->swd_npgbad,
"swpginuse %d sdp->swd_npgbad %d",
uvmexp.swpginuse, sdp->swd_npgbad);
+ KASSERTMSG(uvmexp.swpginuse >= uvmexp.swpgonly + sdp->swd_npgbad,
+ "swpginuse %d swpgonly %d sdp->swd_npgbad %d",
+ uvmexp.swpginuse, uvmexp.swpgonly, sdp->swd_npgbad);
uvmexp.swpginuse -= sdp->swd_npgbad;
if (swaplist_find(sdp->swd_vp, true) == NULL)
@@ -1932,6 +1935,9 @@
sdp->swd_npginuse -= nslots;
KASSERTMSG(uvmexp.swpginuse >= nslots, "swpginuse %d nslots %d",
uvmexp.swpginuse, nslots);
+ KASSERTMSG(uvmexp.swpginuse >= uvmexp.swpgonly + nslots,
+ "swpginuse %d swpgonly %d nslots %d",
+ uvmexp.swpginuse, uvmexp.swpgonly, nslots);
uvmexp.swpginuse -= nslots;
mutex_exit(&uvm_swap_data_lock);
}
@@ -1977,8 +1983,10 @@
* this page is no longer only in swap.
*/
+ mutex_enter(&uvm_swap_data_lock);
KASSERT(uvmexp.swpgonly > 0);
atomic_dec_uint(&uvmexp.swpgonly);
+ mutex_exit(&uvm_swap_data_lock);
}
return error;
}
Index: uvm_swap.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_swap.h,v
retrieving revision 1.29
diff -u -r1.29 uvm_swap.h
--- uvm_swap.h 15 Mar 2024 22:15:21 -0000 1.29
+++ uvm_swap.h 21 Feb 2026 15:32:37 -0000
@@ -56,6 +56,7 @@
int uvm_swap_stats(char *, int,
void (*)(void *, const struct swapent *), size_t, register_t *);
+extern kmutex_t uvm_swap_data_lock;
#else /* defined(VMSWAP) */
#define uvm_swapisfull() true
--ykk54ve36zcfxrfd--
Home |
Main Index |
Thread Index |
Old Index