Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/drm2 Adapt ww_mutex to use LOCKDEBUG.
details: https://anonhg.NetBSD.org/src/rev/ad69ef5714a7
branches: trunk
changeset: 338401:ad69ef5714a7
user: riastradh <riastradh%NetBSD.org@localhost>
date: Thu May 21 21:55:55 2015 +0000
description:
Adapt ww_mutex to use LOCKDEBUG.
Should help track down PR 49862.
diffstat:
sys/external/bsd/drm2/include/linux/ww_mutex.h | 11 +-
sys/external/bsd/drm2/linux/linux_ww_mutex.c | 164 +++++++++++++++++++++---
2 files changed, 152 insertions(+), 23 deletions(-)
diffs (truncated from 427 to 300 lines):
diff -r e7d62fb888fc -r ad69ef5714a7 sys/external/bsd/drm2/include/linux/ww_mutex.h
--- a/sys/external/bsd/drm2/include/linux/ww_mutex.h Thu May 21 20:50:57 2015 +0000
+++ b/sys/external/bsd/drm2/include/linux/ww_mutex.h Thu May 21 21:55:55 2015 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: ww_mutex.h,v 1.10 2015/01/08 23:35:47 riastradh Exp $ */
+/* $NetBSD: ww_mutex.h,v 1.11 2015/05/21 21:55:55 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -56,7 +56,6 @@
};
struct ww_mutex {
- kmutex_t wwm_lock;
enum ww_mutex_state {
WW_UNLOCKED, /* nobody owns it */
WW_OWNED, /* owned by a lwp without a context */
@@ -67,9 +66,17 @@
struct lwp *owner;
struct ww_acquire_ctx *ctx;
} wwm_u;
+ /*
+ * XXX wwm_lock must *not* be first, so that the ww_mutex has a
+ * different address from the kmutex for LOCKDEBUG purposes.
+ */
+ kmutex_t wwm_lock;
struct ww_class *wwm_class;
struct rb_tree wwm_waiters;
kcondvar_t wwm_cv;
+#ifdef LOCKDEBUG
+ bool wwm_debug;
+#endif
};
/* XXX Make the nm output a little more greppable... */
diff -r e7d62fb888fc -r ad69ef5714a7 sys/external/bsd/drm2/linux/linux_ww_mutex.c
--- a/sys/external/bsd/drm2/linux/linux_ww_mutex.c Thu May 21 20:50:57 2015 +0000
+++ b/sys/external/bsd/drm2/linux/linux_ww_mutex.c Thu May 21 21:55:55 2015 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $ */
+/* $NetBSD: linux_ww_mutex.c,v 1.2 2015/05/21 21:55:55 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,17 +30,28 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.2 2015/05/21 21:55:55 riastradh Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/condvar.h>
+#include <sys/lockdebug.h>
#include <sys/lwp.h>
#include <sys/mutex.h>
#include <sys/rbtree.h>
#include <linux/ww_mutex.h>
+#define WW_WANTLOCK(WW) \
+ LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
+ (uintptr_t)__builtin_return_address(0), 0)
+#define WW_LOCKED(WW) \
+ LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
+ (uintptr_t)__builtin_return_address(0), 0)
+#define WW_UNLOCKED(WW) \
+ LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
+ (uintptr_t)__builtin_return_address(0), 0)
+
static int
ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
{
@@ -109,6 +120,53 @@
ctx->wwx_owner = NULL;
}
+#ifdef LOCKDEBUG
+static void
+ww_dump(volatile void *cookie)
+{
+ volatile struct ww_mutex *mutex = cookie;
+
+ printf_nolog("%-13s: ", "state");
+ switch (mutex->wwm_state) {
+ case WW_UNLOCKED:
+ printf_nolog("unlocked\n");
+ break;
+ case WW_OWNED:
+ printf_nolog("owned by lwp\n");
+ printf_nolog("%-13s: %p\n", "owner", mutex->wwm_u.owner);
+ printf_nolog("%-13s: %s\n", "waiters",
+ cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv))
+ ? "yes" : "no");
+ break;
+ case WW_CTX:
+ printf_nolog("owned via ctx\n");
+ printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
+ printf_nolog("%-13s: %p\n", "lwp",
+ mutex->wwm_u.ctx->wwx_owner);
+ printf_nolog("%-13s: %s\n", "waiters",
+ cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv))
+ ? "yes" : "no");
+ break;
+ case WW_WANTOWN:
+ printf_nolog("owned via ctx\n");
+ printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
+ printf_nolog("%-13s: %p\n", "lwp",
+ mutex->wwm_u.ctx->wwx_owner);
+ printf_nolog("%-13s: %s\n", "waiters", "yes (noctx)");
+ break;
+ default:
+ printf_nolog("unknown\n");
+ break;
+ }
+}
+
+static lockops_t ww_lockops = {
+ .lo_name = "Wait/wound mutex",
+ .lo_type = LOCKOPS_SLEEP,
+ .lo_dump = ww_dump,
+};
+#endif
+
void
ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
{
@@ -122,12 +180,21 @@
mutex->wwm_class = class;
rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
cv_init(&mutex->wwm_cv, "linuxwwm");
+#ifdef LOCKDEBUG
+ mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
+ (uintptr_t)__builtin_return_address(0));
+#endif
}
void
ww_mutex_destroy(struct ww_mutex *mutex)
{
+ KASSERT(mutex->wwm_state == WW_UNLOCKED);
+
+#ifdef LOCKDEBUG
+ LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
+#endif
cv_destroy(&mutex->wwm_cv);
#if 0
rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
@@ -267,6 +334,7 @@
case WW_UNLOCKED:
mutex->wwm_state = WW_OWNED;
mutex->wwm_u.owner = curlwp;
+ WW_LOCKED(mutex);
break;
case WW_OWNED:
KASSERTMSG((mutex->wwm_u.owner != curlwp),
@@ -301,6 +369,7 @@
case WW_UNLOCKED:
mutex->wwm_state = WW_OWNED;
mutex->wwm_u.owner = curlwp;
+ WW_LOCKED(mutex);
break;
case WW_OWNED:
KASSERTMSG((mutex->wwm_u.owner != curlwp),
@@ -335,9 +404,15 @@
ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
+ /*
+ * We do not WW_WANTLOCK at the beginning because we may
+ * correctly already hold it, if we have a context, in which
+ * case we must return EALREADY to the caller.
+ */
ASSERT_SLEEPABLE();
if (ctx == NULL) {
+ WW_WANTLOCK(mutex);
ww_mutex_lock_noctx(mutex);
return 0;
}
@@ -355,10 +430,13 @@
mutex_enter(&mutex->wwm_lock);
retry: switch (mutex->wwm_state) {
case WW_UNLOCKED:
+ WW_WANTLOCK(mutex);
mutex->wwm_state = WW_CTX;
mutex->wwm_u.ctx = ctx;
+ WW_LOCKED(mutex);
goto locked;
case WW_OWNED:
+ WW_WANTLOCK(mutex);
KASSERTMSG((mutex->wwm_u.owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ww_mutex_state_wait(mutex, WW_OWNED);
@@ -372,10 +450,12 @@
panic("wait/wound mutex %p in bad state: %d",
mutex, (int)mutex->wwm_state);
}
+
KASSERT(mutex->wwm_state == WW_CTX);
KASSERT(mutex->wwm_u.ctx != NULL);
KASSERT((mutex->wwm_u.ctx == ctx) ||
(mutex->wwm_u.ctx->wwx_owner != curlwp));
+
if (mutex->wwm_u.ctx == ctx) {
/*
* We already own it. Yes, this can happen correctly
@@ -384,7 +464,15 @@
*/
mutex_exit(&mutex->wwm_lock);
return -EALREADY;
- } else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
+ }
+
+ /*
+ * We do not own it. We can safely assert to LOCKDEBUG that we
+ * want it.
+ */
+ WW_WANTLOCK(mutex);
+
+ if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
/*
* Owned by a higher-priority party. Tell the caller
* to unlock everything and start over.
@@ -394,14 +482,14 @@
ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
mutex_exit(&mutex->wwm_lock);
return -EDEADLK;
- } else {
- /*
- * Owned by a lower-priority party. Ask that party to
- * wake us when it is done or it realizes it needs to
- * back off.
- */
- ww_mutex_lock_wait(mutex, ctx);
}
+
+ /*
+ * Owned by a lower-priority party. Ask that party to wake us
+ * when it is done or it realizes it needs to back off.
+ */
+ ww_mutex_lock_wait(mutex, ctx);
+
locked: ctx->wwx_acquired++;
KASSERT((mutex->wwm_state == WW_CTX) ||
(mutex->wwm_state == WW_WANTOWN));
@@ -415,10 +503,17 @@
{
int ret;
+ /*
+ * We do not WW_WANTLOCK at the beginning because we may
+ * correctly already hold it, if we have a context, in which
+ * case we must return EALREADY to the caller.
+ */
ASSERT_SLEEPABLE();
- if (ctx == NULL)
+ if (ctx == NULL) {
+ WW_WANTLOCK(mutex);
return ww_mutex_lock_noctx_sig(mutex);
+ }
KASSERTMSG((ctx->wwx_owner == curlwp),
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
@@ -433,10 +528,13 @@
mutex_enter(&mutex->wwm_lock);
retry: switch (mutex->wwm_state) {
case WW_UNLOCKED:
+ WW_WANTLOCK(mutex);
mutex->wwm_state = WW_CTX;
mutex->wwm_u.ctx = ctx;
+ WW_LOCKED(mutex);
goto locked;
case WW_OWNED:
+ WW_WANTLOCK(mutex);
KASSERTMSG((mutex->wwm_u.owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
@@ -454,10 +552,12 @@
panic("wait/wound mutex %p in bad state: %d",
mutex, (int)mutex->wwm_state);
}
+
KASSERT(mutex->wwm_state == WW_CTX);
KASSERT(mutex->wwm_u.ctx != NULL);
KASSERT((mutex->wwm_u.ctx == ctx) ||
(mutex->wwm_u.ctx->wwx_owner != curlwp));
+
if (mutex->wwm_u.ctx == ctx) {
/*
* We already own it. Yes, this can happen correctly
@@ -466,7 +566,15 @@
*/
mutex_exit(&mutex->wwm_lock);
return -EALREADY;
- } else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
+ }
Home |
Main Index |
Thread Index |
Old Index