NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: kern/56764



    Date:        Sat, 21 Feb 2026 16:25:02 +0000 (UTC)
    From:        "Thomas Klausner via gnats" <gnats-admin%NetBSD.org@localhost>
    Message-ID:  <20260221162502.0175B1A923D%mollari.NetBSD.org@localhost>

  |  The first diff causes a 'locking against myself' error in
  |  uvm_aio_aiodone_pages, so I removed the lock access in uvm_pager.c and
  |  am trying this version now.

I think you're protecting the wrong thing, and the lock you're using
is the wrong one anyway (not that there is a right one that I can see).

You shouldn't need locks around atomic operations - they're atomic for
a reason, if anything what ought to be protected (probably by making the
relevant ops atomic) are the uvmexp.swpginuse manipulations.

But none of that will help if the compiler is generating code which is
reading any of these vars (struct members) and caching them for sometime
later.   It is entitled to do that.   To fix that things need to be made
volatile, but I'm not sure whether that would mean the entire uvmexp
struct would have to be made volatile, or whether just a few members of
it can be, with the others not.   Making the entire struct volatile might
have performance implications.

kre

  |   Thomas
  |  
  |  --blvg5djcc6d5falj
  |  Content-Type: text/plain; charset=us-ascii
  |  Content-Disposition: attachment; filename="uvm.2.diff"
  |  
  |  Index: uvm_km.c
  |  ===================================================================
  |  RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
  |  retrieving revision 1.166
  |  diff -u -r1.166 uvm_km.c
  |  --- uvm_km.c	7 Dec 2024 23:19:07 -0000	1.166
  |  +++ uvm_km.c	21 Feb 2026 16:21:45 -0000
  |  @@ -491,8 +491,10 @@
  |   	rw_exit(uobj->vmobjlock);
  |   
  |   	if (swpgonlydelta > 0) {
  |  +		mutex_enter(&uvm_swap_data_lock);
  |   		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
  |   		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
  |  +		mutex_exit(&uvm_swap_data_lock);
  |   	}
  |   }
  |   
  |  Index: uvm_pager.c
  |  ===================================================================
  |  RCS file: /cvsroot/src/sys/uvm/uvm_pager.c,v
  |  retrieving revision 1.131
  |  diff -u -r1.131 uvm_pager.c
  |  --- uvm_pager.c	15 Mar 2024 07:09:37 -0000	1.131
  |  +++ uvm_pager.c	21 Feb 2026 16:21:45 -0000
  |  @@ -483,6 +483,9 @@
  |   
  |   		/* these pages are now only in swap. */
  |   		if (error != ENOMEM) {
  |  +			KASSERTMSG(uvmexp.swpginuse >= uvmexp.swpgonly + npages,
  |  +				   "swpginuse %d swpgonly %d npages %d",
  |  +				   uvmexp.swpginuse, uvmexp.swpgonly, npages);
  |   			atomic_add_int(&uvmexp.swpgonly, npages);
  |   		}
  |   		if (error) {
  |  Index: uvm_pdaemon.c
  |  ===================================================================
  |  RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v
  |  retrieving revision 1.135
  |  diff -u -r1.135 uvm_pdaemon.c
  |  --- uvm_pdaemon.c	4 Jan 2026 00:41:14 -0000	1.135
  |  +++ uvm_pdaemon.c	21 Feb 2026 16:21:45 -0000
  |  @@ -792,10 +792,12 @@
  |   			}
  |   			if (slot > 0) {
  |   				/* this page is now only in swap. */
  |  +				mutex_enter(&uvm_swap_data_lock);
  |   				KASSERTMSG(uvmexp.swpgonly < uvmexp.swpginuse,
  |   					   "swpgonly %d swpginuse %d",
  |   					   uvmexp.swpgonly, uvmexp.swpginuse);
  |   				atomic_inc_uint(&uvmexp.swpgonly);
  |  +				mutex_exit(&uvm_swap_data_lock);
  |   			}
  |   			rw_exit(slock);
  |   			continue;
  |  Index: uvm_swap.c
  |  ===================================================================
  |  RCS file: /cvsroot/src/sys/uvm/uvm_swap.c,v
  |  retrieving revision 1.215
  |  diff -u -r1.215 uvm_swap.c
  |  --- uvm_swap.c	13 Feb 2026 19:16:41 -0000	1.215
  |  +++ uvm_swap.c	21 Feb 2026 16:21:45 -0000
  |  @@ -201,7 +201,7 @@
  |   static struct swap_priority swap_priority;
  |   
  |   /* locks */
  |  -static kmutex_t uvm_swap_data_lock __cacheline_aligned;
  |  +kmutex_t uvm_swap_data_lock __cacheline_aligned;
  |   static krwlock_t swap_syscall_lock;
  |   bool uvm_swap_init_done = false;
  |   
  |  @@ -1246,6 +1246,9 @@
  |   	KASSERTMSG(uvmexp.swpginuse >= sdp->swd_npgbad,
  |   		   "swpginuse %d sdp->swd_npgbad %d",
  |   		   uvmexp.swpginuse, sdp->swd_npgbad);
  |  +	KASSERTMSG(uvmexp.swpginuse >= uvmexp.swpgonly + sdp->swd_npgbad,
  |  +		   "swpginuse %d swpgonly %d sdp->swd_npgbad %d",
  |  +		   uvmexp.swpginuse, uvmexp.swpgonly, sdp->swd_npgbad);
  |   	uvmexp.swpginuse -= sdp->swd_npgbad;
  |   
  |   	if (swaplist_find(sdp->swd_vp, true) == NULL)
  |  @@ -1932,6 +1935,9 @@
  |   	sdp->swd_npginuse -= nslots;
  |   	KASSERTMSG(uvmexp.swpginuse >= nslots, "swpginuse %d nslots %d",
  |   		   uvmexp.swpginuse, nslots);
  |  +	KASSERTMSG(uvmexp.swpginuse >= uvmexp.swpgonly + nslots,
  |  +		   "swpginuse %d swpgonly %d nslots %d",
  |  +		   uvmexp.swpginuse, uvmexp.swpgonly, nslots);
  |   	uvmexp.swpginuse -= nslots;
  |   	mutex_exit(&uvm_swap_data_lock);
  |   }
  |  @@ -1977,8 +1983,10 @@
  |   		 * this page is no longer only in swap.
  |   		 */
  |   
  |  +		mutex_enter(&uvm_swap_data_lock);
  |   		KASSERT(uvmexp.swpgonly > 0);
  |   		atomic_dec_uint(&uvmexp.swpgonly);
  |  +		mutex_exit(&uvm_swap_data_lock);
  |   	}
  |   	return error;
  |   }
  |  Index: uvm_swap.h
  |  ===================================================================
  |  RCS file: /cvsroot/src/sys/uvm/uvm_swap.h,v
  |  retrieving revision 1.29
  |  diff -u -r1.29 uvm_swap.h
  |  --- uvm_swap.h	15 Mar 2024 22:15:21 -0000	1.29
  |  +++ uvm_swap.h	21 Feb 2026 16:21:45 -0000
  |  @@ -56,6 +56,7 @@
  |   int	uvm_swap_stats(char *, int,
  |       void (*)(void *, const struct swapent *), size_t, register_t *);
  |   
  |  +extern kmutex_t uvm_swap_data_lock;
  |   #else /* defined(VMSWAP) */
  |   
  |   #define	uvm_swapisfull()	true
  |  
  |  --blvg5djcc6d5falj--
  |  
  |




Home | Main Index | Thread Index | Old Index