NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

kern/60030: ddb/crash: show all locks without LOCKDEBUG



>Number:         60030
>Category:       kern
>Synopsis:       ddb/crash: show all locks without LOCKDEBUG
>Confidential:   no
>Severity:       serious
>Priority:       medium
>Responsible:    kern-bug-people
>State:          open
>Class:          change-request
>Submitter-Id:   net
>Arrival-Date:   Mon Feb 23 02:45:00 +0000 2026
>Originator:     Taylor R Campbell
>Release:        current, 11, 10, 9, ...
>Organization:
The LockBSD Foundebuggation, Inc.
>Environment:
>Description:

	We have had a number of deadlocks involving softints leading to
	heartbeat panics recently.  The heartbeat mechanism itself is
	not the problem -- it's just exposing other problems that might
	otherwise manifest as one CPU quietly getting stuck while
	dozens of others continue normally.

	Examples:

	PR kern/59963: Lock Order Reversal issue with PPP
	PR kern/60029: panic: cpu0: softints stuck for 16 seconds

	The obvious symptom is often the same -- panic: softints stuck
	for N seconds.  But the underlying problem is likely entirely
	unrelated.  We can track it down by following ps and ps/w and
	show all tstiles output to find which softint threads are
	waiting for which locks, and x/Lx on carefully chosen bits of
	what the wchan points to -- when it is applicable -- to find
	who holds the lock.  But sometimes it is hard to find, e.g. if
	the waiter is actually spinning because the lock holder is
	running on another CPU.

	In a kernel with LOCKDEBUG, detailed information about who is
	holding or waiting for which locks is recorded, but it's very
	expensive and never deployed in production (and for some
	workloads it just doesn't work, leading to kernel lock spinouts
	or similar, because its implications aren't exercised well
	enough).

	But we could easily add a bit of overhead just to the
	_contended_ paths of lock acquisition to record enough
	information for ddb or crash to tell us everything we need to
	diagnose these: just store a pointer to the lock in the
	existing struct lwp::l_ld_wanted member (currently used only
	under LOCKDEBUG) while acquiring a lock, saving whatever was
	there before on the stack.

	We can even just put this into the existing LOCKDEBUG_WANTLOCK
	and LOCKDEBUG_LOCKED macros, with a small tweak.

>How-To-Repeat:

	In ddb or crash:

	show all locks

>Fix:

	Two parts:

	1. Teach all the types of locks to record what they're waiting
	   for -- while _either_ spinning or sleeping -- by tweaking
	   the LOCKDEBUG_WANTLOCK and LOCKDEBUG_LOCKED macros (plus
	   tidying up a few cases where they weren't matched).

	2. Teach ddb/crash to show for each lwp waiting for a lock
	   which lock it's waiting for.

	XXX Not really fit for commit yet -- too much duplication to
	sync in the ddb and crash paths, need to disentangle the parts.
	Tricky because for the _kernel_ ddb build, it's
	compile-time-conditional on LOCKDEBUG, while for the _userland_
	crash(8) build, it's run-time-conditional on whether the kernel
	it's examining was built with LOCKDEBUG.

# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1771802723 0
#      Sun Feb 22 23:25:23 2026 +0000
# Branch trunk
# Node ID 6012a26811baaef76ae6093b3032098c2d60caca
# Parent  8103eaab019500f5780e323f8ad0056f7f973eef
# EXP-Topic riastradh-prNNNNN-showalllocks
WIP: Track what lock each lwp is waiting for without LOCKDEBUG.

This is reasonably cheap -- one extra pointer on the stack, couple
extra stores when _waiting_ (spinning or sleeping) for a _contended_
lock -- and doesn't change the kernel ABI.  Will enable us to get
diagnostics from crash dumps when, e.g., there's a softint deadlock
tripping a heartbeat panic.

diff -r 8103eaab0195 -r 6012a26811ba sys/external/bsd/drm2/linux/linux_rwsem.c
--- a/sys/external/bsd/drm2/linux/linux_rwsem.c	Sun Feb 22 11:40:19 2026 +0000
+++ b/sys/external/bsd/drm2/linux/linux_rwsem.c	Sun Feb 22 23:25:23 2026 +0000
@@ -42,15 +42,15 @@ __KERNEL_RCSID(0, "$NetBSD: linux_rwsem.
 
 #include <linux/rwsem.h>
 
-#define	RWSEM_WANTLOCK(RWSEM)						      \
+#define	RWSEM_WANTLOCK(RWSEM, OWANTEDP)					      \
 	LOCKDEBUG_WANTLOCK((RWSEM)->rws_debug, (RWSEM),			      \
-	    (uintptr_t)__builtin_return_address(0), 0)
-#define	RWSEM_LOCKED_EX(RWSEM)						      \
+	    (uintptr_t)__builtin_return_address(0), 0, OWANTEDP)
+#define	RWSEM_LOCKED_EX(RWSEM, OWANTEDP)				      \
 	LOCKDEBUG_LOCKED((RWSEM)->rws_debug, (RWSEM), NULL,		      \
-	    (uintptr_t)__builtin_return_address(0), 0)
-#define	RWSEM_LOCKED_SH(RWSEM)						      \
+	    (uintptr_t)__builtin_return_address(0), 0, OWANTEDP)
+#define	RWSEM_LOCKED_SH(RWSEM, OWANTEDP)				      \
 	LOCKDEBUG_LOCKED((RWSEM)->rws_debug, (RWSEM), NULL,		      \
-	    (uintptr_t)__builtin_return_address(0), 1)
+	    (uintptr_t)__builtin_return_address(0), 1, OWANTEDP)
 #define	RWSEM_UNLOCKED_EX(RWSEM)					      \
 	LOCKDEBUG_UNLOCKED((RWSEM)->rws_debug, (RWSEM),			      \
 	    (uintptr_t)__builtin_return_address(0), 0)
@@ -109,8 +109,9 @@ destroy_rwsem(struct rw_semaphore *rwsem
 void
 down_read(struct rw_semaphore *rwsem)
 {
+	volatile void *owanted;
 
-	RWSEM_WANTLOCK(rwsem);
+	RWSEM_WANTLOCK(rwsem, &owanted);
 
 	mutex_enter(&rwsem->rws_lock);
 	while (rwsem->rws_writer || rwsem->rws_writewanted)
@@ -119,7 +120,7 @@ down_read(struct rw_semaphore *rwsem)
 	rwsem->rws_readers++;
 	mutex_exit(&rwsem->rws_lock);
 
-	RWSEM_LOCKED_SH(rwsem);
+	RWSEM_LOCKED_SH(rwsem, &owanted);
 }
 
 bool
@@ -144,7 +145,8 @@ down_read_trylock(struct rw_semaphore *r
 	mutex_exit(&rwsem->rws_lock);
 
 	if (ret) {
-		RWSEM_LOCKED_SH(rwsem);
+		RWSEM_WANTLOCK(rwsem, NULL);
+		RWSEM_LOCKED_SH(rwsem, NULL);
 	}
 
 	return ret;
@@ -167,8 +169,9 @@ up_read(struct rw_semaphore *rwsem)
 void
 down_write(struct rw_semaphore *rwsem)
 {
+	volatile void *owanted;
 
-	RWSEM_WANTLOCK(rwsem);
+	RWSEM_WANTLOCK(rwsem, &owanted);
 
 	mutex_enter(&rwsem->rws_lock);
 
@@ -196,7 +199,7 @@ down_write(struct rw_semaphore *rwsem)
 
 	mutex_exit(&rwsem->rws_lock);
 
-	RWSEM_LOCKED_EX(rwsem);
+	RWSEM_LOCKED_EX(rwsem, &owanted);
 }
 
 void
@@ -227,5 +230,6 @@ downgrade_write(struct rw_semaphore *rws
 	cv_broadcast(&rwsem->rws_cv);
 	mutex_exit(&rwsem->rws_lock);
 
-	RWSEM_LOCKED_SH(rwsem);
+	RWSEM_WANTLOCK(rwsem, NULL);
+	RWSEM_LOCKED_SH(rwsem, NULL);
 }
diff -r 8103eaab0195 -r 6012a26811ba sys/external/bsd/drm2/linux/linux_ww_mutex.c
--- a/sys/external/bsd/drm2/linux/linux_ww_mutex.c	Sun Feb 22 11:40:19 2026 +0000
+++ b/sys/external/bsd/drm2/linux/linux_ww_mutex.c	Sun Feb 22 23:25:23 2026 +0000
@@ -43,12 +43,12 @@ __KERNEL_RCSID(0, "$NetBSD: linux_ww_mut
 #include <linux/ww_mutex.h>
 #include <linux/errno.h>
 
-#define	WW_WANTLOCK(WW)							      \
+#define	WW_WANTLOCK(WW, OWANTEDP)					      \
 	LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW),			      \
-	    (uintptr_t)__builtin_return_address(0), 0)
-#define	WW_LOCKED(WW)							      \
+	    (uintptr_t)__builtin_return_address(0), 0, OWANTEDP)
+#define	WW_LOCKED(WW, OWANTEDP)						      \
 	LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL,			      \
-	    (uintptr_t)__builtin_return_address(0), 0)
+	    (uintptr_t)__builtin_return_address(0), 0, OWANTEDP)
 #define	WW_UNLOCKED(WW)							      \
 	LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW),			      \
 	    (uintptr_t)__builtin_return_address(0), 0)
@@ -458,7 +458,7 @@ ww_mutex_lock_wait_sig(struct ww_mutex *
 }
 
 /*
- * ww_mutex_lock_noctx(mutex)
+ * ww_mutex_lock_noctx(mutex, &owanted)
  *
  *	Acquire mutex without an acquire context.  Caller must not
  *	already hold the mutex.  Uninterruptible; never fails.
@@ -468,7 +468,7 @@ ww_mutex_lock_wait_sig(struct ww_mutex *
  *	Internal subroutine, implementing ww_mutex_lock(..., NULL).
  */
 static void
-ww_mutex_lock_noctx(struct ww_mutex *mutex)
+ww_mutex_lock_noctx(struct ww_mutex *mutex, volatile void **owantedp)
 {
 
 	mutex_enter(&mutex->wwm_lock);
@@ -497,12 +497,12 @@ retry:	switch (mutex->wwm_state) {
 	}
 	KASSERT(mutex->wwm_state == WW_OWNED);
 	KASSERT(mutex->wwm_u.owner == curlwp);
-	WW_LOCKED(mutex);
+	WW_LOCKED(mutex, owantedp);
 	mutex_exit(&mutex->wwm_lock);
 }
 
 /*
- * ww_mutex_lock_noctx_sig(mutex)
+ * ww_mutex_lock_noctx_sig(mutex, &owanted)
  *
  *	Acquire mutex without an acquire context and return 0, or fail
  *	and return -EINTR if interrupted by a signal.  Caller must not
@@ -514,7 +514,7 @@ retry:	switch (mutex->wwm_state) {
  *	ww_mutex_lock_interruptible(..., NULL).
  */
 static int
-ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
+ww_mutex_lock_noctx_sig(struct ww_mutex *mutex, volatile void **owantedp)
 {
 	int ret;
 
@@ -552,7 +552,7 @@ retry:	switch (mutex->wwm_state) {
 	}
 	KASSERT(mutex->wwm_state == WW_OWNED);
 	KASSERT(mutex->wwm_u.owner == curlwp);
-	WW_LOCKED(mutex);
+	WW_LOCKED(mutex, owantedp);
 	ret = 0;
 out:	mutex_exit(&mutex->wwm_lock);
 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
@@ -577,6 +577,7 @@ out:	mutex_exit(&mutex->wwm_lock);
 int
 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
+	volatile void *owanted;
 	int ret;
 
 	/*
@@ -587,8 +588,8 @@ ww_mutex_lock(struct ww_mutex *mutex, st
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL) {
-		WW_WANTLOCK(mutex);
-		ww_mutex_lock_noctx(mutex);
+		WW_WANTLOCK(mutex, &owanted);
+		ww_mutex_lock_noctx(mutex, &owanted);
 		ret = 0;
 		goto out;
 	}
@@ -605,12 +606,12 @@ ww_mutex_lock(struct ww_mutex *mutex, st
 	ww_acquire_done_check(mutex, ctx);
 retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
-		WW_WANTLOCK(mutex);
+		WW_WANTLOCK(mutex, &owanted);
 		mutex->wwm_state = WW_CTX;
 		mutex->wwm_u.ctx = ctx;
 		goto locked;
 	case WW_OWNED:
-		WW_WANTLOCK(mutex);
+		WW_WANTLOCK(mutex, &owanted);
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ww_mutex_state_wait(mutex, WW_OWNED);
@@ -644,7 +645,7 @@ retry:	switch (mutex->wwm_state) {
 	 * We do not own it.  We can safely assert to LOCKDEBUG that we
 	 * want it.
 	 */
-	WW_WANTLOCK(mutex);
+	WW_WANTLOCK(mutex, &owanted);
 
 	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
 		/*
@@ -667,7 +668,7 @@ retry:	switch (mutex->wwm_state) {
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
-	WW_LOCKED(mutex);
+	WW_LOCKED(mutex, &owanted);
 	ctx->wwx_acquired++;
 	ret = 0;
 out_unlock:
@@ -697,6 +698,7 @@ out:	KASSERTMSG((ret == 0 || ret == -EAL
 int
 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
+	volatile void *owanted;
 	int ret;
 
 	/*
@@ -707,8 +709,8 @@ ww_mutex_lock_interruptible(struct ww_mu
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL) {
-		WW_WANTLOCK(mutex);
-		ret = ww_mutex_lock_noctx_sig(mutex);
+		WW_WANTLOCK(mutex, &owanted);
+		ret = ww_mutex_lock_noctx_sig(mutex, &owanted);
 		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 		goto out;
 	}
@@ -725,12 +727,12 @@ ww_mutex_lock_interruptible(struct ww_mu
 	ww_acquire_done_check(mutex, ctx);
 retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
-		WW_WANTLOCK(mutex);
+		WW_WANTLOCK(mutex, &owanted);
 		mutex->wwm_state = WW_CTX;
 		mutex->wwm_u.ctx = ctx;
 		goto locked;
 	case WW_OWNED:
-		WW_WANTLOCK(mutex);
+		WW_WANTLOCK(mutex, &owanted);
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
@@ -772,7 +774,7 @@ retry:	switch (mutex->wwm_state) {
 	 * We do not own it.  We can safely assert to LOCKDEBUG that we
 	 * want it.
 	 */
-	WW_WANTLOCK(mutex);
+	WW_WANTLOCK(mutex, &owanted);
 
 	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
 		/*
@@ -799,7 +801,7 @@ retry:	switch (mutex->wwm_state) {
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
-	WW_LOCKED(mutex);
+	WW_LOCKED(mutex, &owanted);
 	ctx->wwx_acquired++;
 	ret = 0;
 out_unlock:
@@ -824,13 +826,14 @@ out:	KASSERTMSG((ret == 0 || ret == -EAL
 void
 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
+	volatile void *owanted;
 
 	/* Caller must not try to lock against self here.  */
-	WW_WANTLOCK(mutex);
+	WW_WANTLOCK(mutex, &owanted);
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL) {
-		ww_mutex_lock_noctx(mutex);
+		ww_mutex_lock_noctx(mutex, &owanted);
 		return;
 	}
 
@@ -881,7 +884,7 @@ retry:	switch (mutex->wwm_state) {
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
-	WW_LOCKED(mutex);
+	WW_LOCKED(mutex, &owanted);
 	ctx->wwx_acquired++;
 	mutex_exit(&mutex->wwm_lock);
 }
@@ -901,13 +904,14 @@ int
 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
     struct ww_acquire_ctx *ctx)
 {
+	volatile void *owanted;
 	int ret;
 
-	WW_WANTLOCK(mutex);
+	WW_WANTLOCK(mutex, &owanted);
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL) {
-		ret = ww_mutex_lock_noctx_sig(mutex);
+		ret = ww_mutex_lock_noctx_sig(mutex, &owanted);
 		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 		goto out;
 	}
@@ -971,7 +975,7 @@ retry:	switch (mutex->wwm_state) {
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
-	WW_LOCKED(mutex);
+	WW_LOCKED(mutex, &owanted);
 	ctx->wwx_acquired++;
 	ret = 0;
 out_unlock:
@@ -995,8 +999,8 @@ ww_mutex_trylock(struct ww_mutex *mutex)
 	if (mutex->wwm_state == WW_UNLOCKED) {
 		mutex->wwm_state = WW_OWNED;
 		mutex->wwm_u.owner = curlwp;
-		WW_WANTLOCK(mutex);
-		WW_LOCKED(mutex);
+		WW_WANTLOCK(mutex, NULL);
+		WW_LOCKED(mutex, NULL);
 		ret = 1;
 	} else {
 		/*
diff -r 8103eaab0195 -r 6012a26811ba sys/kern/kern_lock.c
--- a/sys/kern/kern_lock.c	Sun Feb 22 11:40:19 2026 +0000
+++ b/sys/kern/kern_lock.c	Sun Feb 22 23:25:23 2026 +0000
@@ -271,6 +271,7 @@ _kernel_lock(int nlocks)
 	u_int starttime;
 	int s;
 	struct lwp *l = curlwp;
+	volatile void *owanted;
 
 	_KERNEL_LOCK_ASSERT(nlocks > 0);
 
@@ -287,7 +288,7 @@ _kernel_lock(int nlocks)
 
 	_KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
 	LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
-	    0);
+	    0, &owanted);
 
 	if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
 		atomic_store_relaxed(&kernel_lock_holder, curcpu());
@@ -295,7 +296,7 @@ _kernel_lock(int nlocks)
 		ci->ci_biglock_count = nlocks;
 		l->l_blcnt = nlocks;
 		LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
-		    RETURN_ADDRESS, 0);
+		    RETURN_ADDRESS, 0, &owanted);
 		splx(s);
 		return;
 	}
@@ -319,9 +320,6 @@ _kernel_lock(int nlocks)
 	membar_producer();
 	owant = ci->ci_biglock_wanted;
 	atomic_store_relaxed(&ci->ci_biglock_wanted, l);
-#if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
-	l->l_ld_wanted = __builtin_return_address(0);
-#endif
 
 	/*
 	 * Spin until we acquire the lock.  Once we have it, record the
@@ -351,7 +349,7 @@ _kernel_lock(int nlocks)
 	l->l_blcnt = nlocks;
 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
 	LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
-	    RETURN_ADDRESS, 0);
+	    RETURN_ADDRESS, 0, &owanted);
 	if (owant == NULL) {
 		LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
 		    LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
diff -r 8103eaab0195 -r 6012a26811ba sys/kern/kern_mutex.c
--- a/sys/kern/kern_mutex.c	Sun Feb 22 11:40:19 2026 +0000
+++ b/sys/kern/kern_mutex.c	Sun Feb 22 23:25:23 2026 +0000
@@ -77,15 +77,15 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c
  * Debugging support.
  */
 
-#define	MUTEX_WANTLOCK(mtx)					\
+#define	MUTEX_WANTLOCK(mtx, owantedp)				\
     LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),		\
-        (uintptr_t)__builtin_return_address(0), 0)
+        (uintptr_t)__builtin_return_address(0), 0, owantedp)
 #define	MUTEX_TESTLOCK(mtx)					\
     LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),		\
-        (uintptr_t)__builtin_return_address(0), -1)
-#define	MUTEX_LOCKED(mtx)					\
+        (uintptr_t)__builtin_return_address(0), -1, NULL)
+#define	MUTEX_LOCKED(mtx, owantedp)				\
     LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL,		\
-        (uintptr_t)__builtin_return_address(0), 0)
+        (uintptr_t)__builtin_return_address(0), 0, owantedp)
 #define	MUTEX_UNLOCKED(mtx)					\
     LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx),		\
         (uintptr_t)__builtin_return_address(0), 0)
@@ -454,6 +454,7 @@ mutex_vector_enter(kmutex_t *mtx)
 #ifdef MULTIPROCESSOR
 	u_int count;
 #endif
+	volatile void *owanted;
 	LOCKSTAT_COUNTER(spincnt);
 	LOCKSTAT_COUNTER(slpcnt);
 	LOCKSTAT_TIMER(spintime);
@@ -471,10 +472,10 @@ mutex_vector_enter(kmutex_t *mtx)
 #endif
 		KPREEMPT_ENABLE(curlwp);
 		MUTEX_SPIN_SPLRAISE(mtx);
-		MUTEX_WANTLOCK(mtx);
+		MUTEX_WANTLOCK(mtx, &owanted);
 #ifdef FULL
 		if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
-			MUTEX_LOCKED(mtx);
+			MUTEX_LOCKED(mtx, &owanted);
 			return;
 		}
 #if !defined(MULTIPROCESSOR)
@@ -508,7 +509,7 @@ mutex_vector_enter(kmutex_t *mtx)
 		LOCKSTAT_EXIT(lsflag);
 #endif	/* !MULTIPROCESSOR */
 #endif	/* FULL */
-		MUTEX_LOCKED(mtx);
+		MUTEX_LOCKED(mtx, &owanted);
 		return;
 	}
 
@@ -517,7 +518,7 @@ mutex_vector_enter(kmutex_t *mtx)
 	MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(owner));
 	MUTEX_ASSERT(mtx, curthread != 0);
 	MUTEX_ASSERT(mtx, !cpu_intr_p());
-	MUTEX_WANTLOCK(mtx);
+	MUTEX_WANTLOCK(mtx, &owanted);
 
 	if (__predict_true(panicstr == NULL)) {
 		KDASSERT(pserialize_not_in_read_section());
@@ -704,7 +705,7 @@ mutex_vector_enter(kmutex_t *mtx)
 	LOCKSTAT_EXIT(lsflag);
 
 	MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
-	MUTEX_LOCKED(mtx);
+	MUTEX_LOCKED(mtx, &owanted);
 }
 
 /*
@@ -881,22 +882,22 @@ mutex_tryenter(kmutex_t *mtx)
 		MUTEX_SPIN_SPLRAISE(mtx);
 #ifdef FULL
 		if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
-			MUTEX_WANTLOCK(mtx);
-			MUTEX_LOCKED(mtx);
+			MUTEX_WANTLOCK(mtx, NULL);
+			MUTEX_LOCKED(mtx, NULL);
 			return 1;
 		}
 		MUTEX_SPIN_SPLRESTORE(mtx);
 #else
-		MUTEX_WANTLOCK(mtx);
-		MUTEX_LOCKED(mtx);
+		MUTEX_WANTLOCK(mtx, NULL);
+		MUTEX_LOCKED(mtx, NULL);
 		return 1;
 #endif
 	} else {
 		curthread = (uintptr_t)curlwp;
 		MUTEX_ASSERT(mtx, curthread != 0);
 		if (MUTEX_ACQUIRE(mtx, curthread)) {
-			MUTEX_WANTLOCK(mtx);
-			MUTEX_LOCKED(mtx);
+			MUTEX_WANTLOCK(mtx, NULL);
+			MUTEX_LOCKED(mtx, NULL);
 			MUTEX_DASSERT(mtx,
 			    MUTEX_OWNER(mtx->mtx_owner) == curthread);
 			return 1;
@@ -923,8 +924,9 @@ mutex_spin_retry(kmutex_t *mtx)
 #ifdef LOCKDEBUG
 	u_int spins = 0;
 #endif	/* LOCKDEBUG */
+	volatile void *owanted;
 
-	MUTEX_WANTLOCK(mtx);
+	MUTEX_WANTLOCK(mtx, &owanted);
 
 	LOCKSTAT_ENTER(lsflag);
 	LOCKSTAT_START_TIMER(lsflag, spintime);
@@ -948,7 +950,7 @@ mutex_spin_retry(kmutex_t *mtx)
 	LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
 	LOCKSTAT_EXIT(lsflag);
 
-	MUTEX_LOCKED(mtx);
+	MUTEX_LOCKED(mtx, &owanted);
 #else	/* MULTIPROCESSOR */
 	MUTEX_ABORT(mtx, "locking against myself");
 #endif	/* MULTIPROCESSOR */
diff -r 8103eaab0195 -r 6012a26811ba sys/kern/kern_rwlock.c
--- a/sys/kern/kern_rwlock.c	Sun Feb 22 11:40:19 2026 +0000
+++ b/sys/kern/kern_rwlock.c	Sun Feb 22 23:25:23 2026 +0000
@@ -75,12 +75,12 @@ __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.
 
 #define	RW_DEBUG_P(rw)		(((rw)->rw_owner & RW_NODEBUG) == 0)
 
-#define	RW_WANTLOCK(rw, op) \
+#define	RW_WANTLOCK(rw, op, owantedp) \
     LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \
-        (uintptr_t)__builtin_return_address(0), op == RW_READER);
-#define	RW_LOCKED(rw, op) \
+        (uintptr_t)__builtin_return_address(0), op == RW_READER, owantedp);
+#define	RW_LOCKED(rw, op, owantedp) \
     LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \
-        (uintptr_t)__builtin_return_address(0), op == RW_READER);
+        (uintptr_t)__builtin_return_address(0), op == RW_READER, owantedp);
 #define	RW_UNLOCKED(rw, op) \
     LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \
         (uintptr_t)__builtin_return_address(0), op == RW_READER);
@@ -286,6 +286,7 @@ rw_vector_enter(krwlock_t *rw, const krw
 	turnstile_t *ts;
 	int queue;
 	lwp_t *l;
+	volatile void *owanted;
 	LOCKSTAT_TIMER(slptime);
 	LOCKSTAT_TIMER(slpcnt);
 	LOCKSTAT_TIMER(spintime);
@@ -297,7 +298,7 @@ rw_vector_enter(krwlock_t *rw, const krw
 
 	RW_ASSERT(rw, !cpu_intr_p());
 	RW_ASSERT(rw, curthread != 0);
-	RW_WANTLOCK(rw, op);
+	RW_WANTLOCK(rw, op, &owanted);
 
 	if (__predict_true(panicstr == NULL)) {
 		KDASSERT(pserialize_not_in_read_section());
@@ -425,7 +426,7 @@ rw_vector_enter(krwlock_t *rw, const krw
 
 	RW_ASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
 	    (op == RW_READER && RW_COUNT(rw) != 0));
-	RW_LOCKED(rw, op);
+	RW_LOCKED(rw, op, &owanted);
 }
 
 /*
@@ -576,8 +577,8 @@ rw_vector_tryenter(krwlock_t *rw, const 
 		}
 	}
 
-	RW_WANTLOCK(rw, op);
-	RW_LOCKED(rw, op);
+	RW_WANTLOCK(rw, op, NULL);
+	RW_LOCKED(rw, op, NULL);
 	RW_ASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
 	    (op == RW_READER && RW_COUNT(rw) != 0));
 
@@ -617,7 +618,8 @@ rw_downgrade(krwlock_t *rw)
 			newown = (owner & RW_NODEBUG);
 			next = rw_cas(rw, owner, newown + RW_READ_INCR);
 			if (__predict_true(next == owner)) {
-				RW_LOCKED(rw, RW_READER);
+				RW_WANTLOCK(rw, RW_READER, NULL);
+				RW_LOCKED(rw, RW_READER, NULL);
 				RW_ASSERT(rw,
 				    (rw->rw_owner & RW_WRITE_LOCKED) == 0);
 				RW_ASSERT(rw, RW_COUNT(rw) != 0);
@@ -676,8 +678,8 @@ rw_downgrade(krwlock_t *rw)
 		}
 	}
 
-	RW_WANTLOCK(rw, RW_READER);
-	RW_LOCKED(rw, RW_READER);
+	RW_WANTLOCK(rw, RW_READER, NULL);
+	RW_LOCKED(rw, RW_READER, NULL);
 	RW_ASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
 	RW_ASSERT(rw, RW_COUNT(rw) != 0);
 }
@@ -714,8 +716,8 @@ rw_tryupgrade(krwlock_t *rw)
 	}
 
 	RW_UNLOCKED(rw, RW_READER);
-	RW_WANTLOCK(rw, RW_WRITER);
-	RW_LOCKED(rw, RW_WRITER);
+	RW_WANTLOCK(rw, RW_WRITER, NULL);
+	RW_LOCKED(rw, RW_WRITER, NULL);
 	RW_ASSERT(rw, rw->rw_owner & RW_WRITE_LOCKED);
 	RW_ASSERT(rw, RW_OWNER(rw) == curthread);
 
diff -r 8103eaab0195 -r 6012a26811ba sys/sys/lockdebug.h
--- a/sys/sys/lockdebug.h	Sun Feb 22 11:40:19 2026 +0000
+++ b/sys/sys/lockdebug.h	Sun Feb 22 23:25:23 2026 +0000
@@ -82,10 +82,10 @@ void	lockdebug_mem_check(const char *, s
     lockdebug_alloc(__func__, __LINE__, lock, ops, addr)
 #define	LOCKDEBUG_FREE(dodebug, lock) \
     if (dodebug) lockdebug_free(__func__, __LINE__, lock)
-#define	LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) \
-    if (dodebug) lockdebug_wantlock(__func__, __LINE__, lock, where, s)
-#define	LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) \
-    if (dodebug) lockdebug_locked(__func__, __LINE__, lock, al, where, s)
+#define	LOCKDEBUG_WANTLOCK(dodebug, lock, where, s, owantedp) \
+    if (dodebug) { lockdebug_wantlock(__func__, __LINE__, lock, where, s); __USE(owantedp); }
+#define	LOCKDEBUG_LOCKED(dodebug, lock, al, where, s, owantedp) \
+    if (dodebug) { lockdebug_locked(__func__, __LINE__, lock, al, where, s); __USE(owantedp); }
 #define	LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) \
     if (dodebug) lockdebug_unlocked(__func__, __LINE__, lock, where, s)
 #define	LOCKDEBUG_BARRIER(lock, slp) \
@@ -96,12 +96,31 @@ void	lockdebug_mem_check(const char *, s
 #else	/* LOCKDEBUG */
 
 #define	LOCKDEBUG_ALLOC(lock, ops, addr)		false
-#define	LOCKDEBUG_FREE(dodebug, lock)			/* nothing */
-#define	LOCKDEBUG_WANTLOCK(dodebug, lock, where, s)	/* nothing */
-#define	LOCKDEBUG_LOCKED(dodebug, lock, al, where, s)	/* nothing */
-#define	LOCKDEBUG_UNLOCKED(dodebug, lock, where, s)	/* nothing */
-#define	LOCKDEBUG_BARRIER(lock, slp)			/* nothing */
-#define	LOCKDEBUG_MEM_CHECK(base, sz)			/* nothing */
+#define	LOCKDEBUG_FREE(dodebug, lock)			__nothing
+#if 0
+#define	LOCKDEBUG_WANTLOCK(dodebug, lock, where, s, owantedp)	__nothing
+#define	LOCKDEBUG_LOCKED(dodebug, lock, al, where, s, owantedp)	__nothing
+#else
+#define	LOCKDEBUG_WANTLOCK(dodebug, lock, where, s, owantedp) do	      \
+{									      \
+	if ((owantedp) == NULL)						      \
+		break;							      \
+	*((owantedp) == NULL ? (volatile void **)NULL : (owantedp)) =	      \
+	    curlwp->l_ld_wanted;					      \
+	curlwp->l_ld_wanted = (lock);					      \
+} while (0)
+#define	LOCKDEBUG_LOCKED(dodebug, lock, al, where, s, owantedp) do	      \
+{									      \
+	if ((owantedp) == NULL)						      \
+		break;							      \
+	curlwp->l_ld_wanted =						      \
+	    *((owantedp) == NULL ? (volatile void **)NULL : (owantedp));      \
+	*((owantedp) == NULL ? (volatile void **)NULL : (owantedp)) = NULL;   \
+} while (0)
+#endif
+#define	LOCKDEBUG_UNLOCKED(dodebug, lock, where, s)	__nothing
+#define	LOCKDEBUG_BARRIER(lock, slp)			__nothing
+#define	LOCKDEBUG_MEM_CHECK(base, sz)			__nothing
 
 #endif	/* LOCKDEBUG */
 
# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1771813534 0
#      Mon Feb 23 02:25:34 2026 +0000
# Branch trunk
# Node ID 0ed7b826070d61341f63757571d7406fc8466522
# Parent  6012a26811baaef76ae6093b3032098c2d60caca
# EXP-Topic riastradh-prNNNNN-showalllocks
WIP: Make `show all locks' work without LOCKDEBUG.

diff -r 6012a26811ba -r 0ed7b826070d sys/kern/subr_lockdebug.c
--- a/sys/kern/subr_lockdebug.c	Sun Feb 22 23:25:23 2026 +0000
+++ b/sys/kern/subr_lockdebug.c	Mon Feb 23 02:25:34 2026 +0000
@@ -60,6 +60,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_lockdeb
 #include <machine/db_machdep.h>
 #include <ddb/db_interface.h>
 #include <ddb/db_access.h>
+#include <ddb/db_proc.h>
 #include <ddb/db_sym.h>
 #endif
 
@@ -863,7 +864,17 @@ lockdebug_lock_print(void *addr,
 }
 
 #ifdef _KERNEL
+
+static void
+lockdebug_show_trace(const void *ptr,
+    void (*pr)(const char *, ...) __printflike(1, 2))
+{
+
+	db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
+}
+
 #ifdef LOCKDEBUG
+
 static void
 lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
     void (*pr)(const char *, ...) __printflike(1, 2))
@@ -880,14 +891,6 @@ lockdebug_show_one(lwp_t *l, lockdebug_t
 }
 
 static void
-lockdebug_show_trace(const void *ptr,
-    void (*pr)(const char *, ...) __printflike(1, 2))
-{
-
-	db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
-}
-
-static void
 lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
     bool show_trace)
 {
@@ -957,15 +960,18 @@ lockdebug_show_all_locks_cpu(void (*pr)(
 		}
 	}
 }
-#endif /* _KERNEL */
+
 #endif	/* LOCKDEBUG */
 
+#endif /* _KERNEL */
+
 #ifdef _KERNEL
+/* XXX dedup with usr.sbin/crash.c lockdebug_show_all_locks */
 void
 lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
     const char *modif)
 {
-#ifdef LOCKDEBUG
+#if defined LOCKDEBUG
 	bool show_trace = false;
 	if (modif[0] == 't')
 		show_trace = true;
@@ -977,6 +983,78 @@ lockdebug_show_all_locks(void (*pr)(cons
 	(*pr)("[Locks tracked through CPUs]\n");
 	lockdebug_show_all_locks_cpu(pr, show_trace);
 	(*pr)("\n");
+#elif defined DDB
+	struct proc *p;
+	bool show_trace = false;
+
+	if (modif[0] == 't')
+		show_trace = true;
+
+	(*pr)("%-5s %5s %3s %18s %18s %18s %4s\n",
+	    "PID", "LID", "CPU", "STRUCT LWP *", "LOCK", "OWNER", "BITS");
+	for (p = db_proc_first(); p != NULL; p = db_proc_next(p)) {
+		pid_t pid;
+		struct lwp *l;
+
+		db_read_bytes((db_addr_t)(uintptr_t)&p->p_pid, sizeof(pid),
+		    (char *)&pid);
+
+		for (db_read_bytes(
+			(db_addr_t)(uintptr_t) &LIST_FIRST(&p->p_lwps),
+			sizeof(l), (char *)&l);
+		    l != NULL;
+		    db_read_bytes(
+			(db_addr_t)(uintptr_t)&LIST_NEXT(l, l_sibling),
+			sizeof(l), (char *)&l)) {
+			lwpid_t lid;
+			int stat, pflag;
+			struct cpu_info *ci;
+			int cpuno = -1;
+			volatile void *ld_wanted;
+			uintptr_t ownerword;
+
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_ld_wanted,
+			    sizeof(ld_wanted), (char *)&ld_wanted);
+			if (ld_wanted == NULL)
+				continue;
+
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_lid,
+			    sizeof(lid), (char *)&lid);
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_stat,
+			    sizeof(stat), (char *)&stat);
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_pflag,
+			    sizeof(pflag), (char *)&pflag);
+			const bool run = (stat == LSONPROC ||
+			    (pflag & LP_RUNNING) != 0);
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_cpu,
+			    sizeof(ci), (char *)&ci);
+			if (ci != NULL) {
+				db_read_bytes(
+				    (db_addr_t)(uintptr_t)&ci->ci_index,
+				    sizeof(cpuno), (char *)&cpuno);
+			}
+			db_read_bytes((db_addr_t)(uintptr_t)ld_wanted,
+			    sizeof(ownerword), (char *)&ownerword);
+			(*pr)("%-5d%c%5d %3d %18lx %18lx %18lx %4x\n",
+			    pid, run ? '>' : ' ', lid, cpuno,
+			    (long)l, (long)ld_wanted,
+			    (long)(ownerword & ~(uintptr_t)ALIGNBYTES),
+			    (int)(ownerword & ALIGNBYTES));
+			if (show_trace) {
+				const struct lwp *owner =
+				    (const void *)(ownerword &
+					~(uintptr_t)ALIGNBYTES);
+
+				/*
+				 * XXX Skip this if the owner isn't a
+				 * pointer at all -- reader-held
+				 * rwlock, or spin mutex.
+				 */
+				if (owner != NULL)
+					lockdebug_show_trace(owner, pr);
+			}
+		}
+	}
 #else
 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
 #endif	/* LOCKDEBUG */
diff -r 6012a26811ba -r 0ed7b826070d usr.sbin/crash/crash.c
--- a/usr.sbin/crash/crash.c	Sun Feb 22 23:25:23 2026 +0000
+++ b/usr.sbin/crash/crash.c	Mon Feb 23 02:25:34 2026 +0000
@@ -85,13 +85,115 @@ static struct nlist nl[] = {
 };
 
 #ifdef LOCKDEBUG
+
 struct lockdebug;
 TAILQ_HEAD(, lockdebug) ld_all;
+static bool lockdebug_enabled;
+
+/* XXX dedup with sys/kern/subr_lockdebug.c lockdebug_show_trace */
+#include <sys/lockdebug.h>
+static void
+lockdebug_show_trace(const void *ptr,
+    void (*pr)(const char *, ...) __printflike(1, 2))
+{
+
+	db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
+}
+
+/* XXX dedup with sys/kern/subr_lockdebug.c.c lockdebug_show_all_locks */
+#include <sys/cpu.h>
+#include <sys/lwp.h>
+#include <sys/proc.h>
+void
+lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1,2),
+    const char *modif)
+{
+	struct proc *p;
+	bool show_trace = false;
+
+	if (modif[0] == 't')
+		show_trace = true;
+
+	if (lockdebug_enabled) {
+		/* XXX implement me */
+		printf("This command only works in-kernel.\n");
+		return;
+	}
+
+	(*pr)("%-5s %5s %3s %18s %18s %18s %4s\n",
+	    "PID", "LID", "CPU", "STRUCT LWP *", "LOCK", "OWNER", "BITS");
+	for (p = db_proc_first(); p != NULL; p = db_proc_next(p)) {
+		pid_t pid;
+		struct lwp *l;
+
+		db_read_bytes((db_addr_t)(uintptr_t)&p->p_pid, sizeof(pid),
+		    (char *)&pid);
+
+		for (db_read_bytes(
+			(db_addr_t)(uintptr_t) &LIST_FIRST(&p->p_lwps),
+			sizeof(l), (char *)&l);
+		    l != NULL;
+		    db_read_bytes(
+			(db_addr_t)(uintptr_t)&LIST_NEXT(l, l_sibling),
+			sizeof(l), (char *)&l)) {
+			lwpid_t lid;
+			int stat, pflag;
+			struct cpu_info *ci;
+			int cpuno = -1;
+			volatile void *ld_wanted;
+			uintptr_t ownerword;
+
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_ld_wanted,
+			    sizeof(ld_wanted), (char *)&ld_wanted);
+			if (ld_wanted == NULL)
+				continue;
+
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_lid,
+			    sizeof(lid), (char *)&lid);
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_stat,
+			    sizeof(stat), (char *)&stat);
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_pflag,
+			    sizeof(pflag), (char *)&pflag);
+			const bool run = (stat == LSONPROC ||
+			    (pflag & LP_RUNNING) != 0);
+			db_read_bytes((db_addr_t)(uintptr_t)&l->l_cpu,
+			    sizeof(ci), (char *)&ci);
+			if (ci != NULL) {
+				db_read_bytes(
+				    (db_addr_t)(uintptr_t)&ci->ci_index,
+				    sizeof(cpuno), (char *)&cpuno);
+			}
+			db_read_bytes((db_addr_t)(uintptr_t)ld_wanted,
+			    sizeof(ownerword), (char *)&ownerword);
+			(*pr)("%-5d%c%5d %3d %18lx %18lx %18lx %4x\n",
+			    pid, run ? '>' : ' ', lid, cpuno,
+			    (long)l, (long)ld_wanted,
+			    (long)(ownerword & ~(uintptr_t)ALIGNBYTES),
+			    (int)(ownerword & ALIGNBYTES));
+			if (show_trace) {
+				const struct lwp *owner =
+				    (const void *)(ownerword &
+					~(uintptr_t)ALIGNBYTES);
+
+				/*
+				 * XXX Skip this if the owner isn't a
+				 * pointer at all -- reader-held
+				 * rwlock, or spin mutex.
+				 */
+				if (owner != NULL)
+					lockdebug_show_trace(owner, pr);
+			}
+		}
+	}
+}
+
 #else
+
 void lockdebug_lock_print(void);
 void lockdebug_lock_print(void) {
 	warnx("No lockdebug support compiled in");
 }
+
 #endif
 
 static void
@@ -439,8 +541,12 @@ main(int argc, char **argv)
 	}
 #ifdef LOCKDEBUG
 	if ((size_t)kvm_read(kd, nl[X_LOCKDEBUG].n_value, &ld_all,
-	    sizeof(ld_all)) != sizeof(ld_all))
+	    sizeof(ld_all)) == sizeof(ld_all)) {
+		lockdebug_enabled = true;
+	} else {
 		printf("Kernel compiled without options LOCKDEBUG.\n");
+		lockdebug_enabled = false;
+	}
 #endif
 
 	/*



Home | Main Index | Thread Index | Old Index