tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: Modules loading modules?
On Wed, 28 Jul 2010, Andrew Doran wrote:
it seems to me the root problem is that module_mutex is held while
calling into the module startup routines.
thus, the right solution is to remove this requirement.
Yes, that's what is needed.
I'm far from convinced that's a good idea. First, it will probably
make the module code a nightmare -- what happens when you have multiple
interleaved loads, some of which fail at some point in their dependency
stack, and let's just throw in a manual modunload to mix up things
further. Second, and pretty much related to number one, it goes against
one of the most fundamental principles of robust code: atomic actions.
This: atomicity. The atomic behaviour is relied upon to give "all or
nothing" semantics upon load and unload, like transactions in a database.
Admittely some modules (not of my making) are sloppy about this and so
break that bit of the interface contract.
If modload-from-modcmd is found necessary, sounds more like a case for
the infamous recursive lock.
Recursive lock is the way to go. I think the same lock should also cover
all device configuration activites (i.e. autoconf) and any other
heavy lifting where we have chunks of the system coming and going.
Well, folks, here is a first pass recursive locks! The attached diffs
are against -current as of a few minutes ago.
Some caveats:
1. This has only been compile-tested for x86 (amd64 & i386).
2. Other architectures have not even been compiled yet. "It should
work" but you never know.
3. HPPA is an exceptional exception here, since it is the only in-tree
architecture I found that cannot use SIMPLE_LOCKS.
4. There is only one known use case for this so far: modules loading
other modules from within their xxx_modcmd() routine. The specific
use case we have involves loading the acpicpu driver/module which
eventually results in an attempt to load acpiverbose.
It would be really nice if the community could
A. Compile-test on additional architectures, especially HPPA
B. Test to see that existing mutex operations still work correctly
C. Exercise the known use case if possible
D. Identify additional use cases
There is one place in the code (marked with great big XXX) where I am
simply unsure if I need to use a CAS-type operation for updating the
reference count on the recursive mutex. I _think_ it is save to simply
auto-increment the field, but I am totally willing for any of you
experts to tell my why this might not be safe. :)
I will be updating one of my home machines (amd64) to use this code in
the next few days. I don't plan on making any commits until I've had
some positive testing feedback, and hopefully some expert commentary on
the XXX. (Some additional "concensus" that this is the "right thing to
do" would also be appreciated!)
-------------------------------------------------------------------------
| Paul Goyette | PGP Key fingerprint: | E-mail addresses: |
| Customer Service | FA29 0E3B 35AF E8AE 6651 | paul at whooppee.com |
| Network Engineer | 0786 F758 55DE 53BA 7731 | pgoyette at juniper.net |
| Kernel Developer | | pgoyette at netbsd.org |
-------------------------------------------------------------------------
Index: sys/arch/alpha/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- sys/arch/alpha/include/mutex.h 28 Apr 2008 20:23:11 -0000 1.4
+++ sys/arch/alpha/include/mutex.h 31 Jul 2010 22:34:30 -0000
@@ -43,7 +43,11 @@ struct kmutex {
struct kmutex {
union {
- volatile uintptr_t mtxa_owner;
+ struct {
+ volatile uintptr_t mtxa_owner;
+ volatile uint8_t mtxa_recurse;
+ volatile uint8_t mtxa_unused[3];
+ } a;
struct {
volatile uint8_t mtxs_flags;
ipl_cookie_t mtxs_ipl;
@@ -53,7 +57,8 @@ struct kmutex {
__cpu_simple_lock_t mtx_lock;
};
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_flags u.s.mtxs_flags
#define mtx_ipl u.s.mtxs_ipl
Index: sys/arch/arm/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/include/mutex.h,v
retrieving revision 1.10
diff -u -p -r1.10 mutex.h
--- sys/arch/arm/include/mutex.h 28 Apr 2008 20:23:14 -0000 1.10
+++ sys/arch/arm/include/mutex.h 31 Jul 2010 22:34:31 -0000
@@ -57,7 +57,11 @@ struct kmutex {
struct kmutex {
union {
/* Adaptive mutex */
- volatile uintptr_t mtxa_owner; /* 0-3 */
+ struct {
+ volatile uintptr_t mtxa_owner; /* 0-3 */
+ volatile uint8_t mtxa_recurse;
+ volatile uint8_t mtxa_unused[3];
+ } a;
/* Spin mutex */
struct {
@@ -69,7 +73,8 @@ struct kmutex {
} u;
};
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/hppa/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/hppa/include/mutex.h,v
retrieving revision 1.9
diff -u -p -r1.9 mutex.h
--- sys/arch/hppa/include/mutex.h 28 Apr 2008 20:23:23 -0000 1.9
+++ sys/arch/hppa/include/mutex.h 31 Jul 2010 22:34:32 -0000
@@ -60,6 +60,10 @@ struct kmutex {
/* For LOCKDEBUG */
uint8_t mtxs_dodebug; /* 25 */
+
+ /* For recursive locks */
+ uint8_t mtxs_recurse; /* 26 */
+ uint8_t mtxs_recursive; /* 27 */
} s;
#endif
uint8_t mtxu_pad[32]; /* 0 - 32 */
@@ -73,6 +77,7 @@ struct kmutex {
#define mtx_lock u.s.mtxu_lock
#define mtx_owner u.s.mtxs_owner
+#define mtx_recurse u.s.mtxs_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_waiters u.s.mtxs_waiters
#define mtx_dodebug u.s.mtxs_dodebug
@@ -129,6 +134,12 @@ MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx,
}
static inline void
+MUTEX_INITIALIZE_RECURSIVE(kmutex_t *mtx)
+{
+ mtx->mtx_recursive = 1;
+}
+
+static inline void
MUTEX_DESTROY(kmutex_t *mtx)
{
mtx->mtx_owner = 0xffffffff;
@@ -147,6 +158,12 @@ MUTEX_SPIN_P(volatile kmutex_t *mtx)
}
static inline int
+MUTEX_RECURSIVE_P(volatile kmutex_t *mtx)
+{
+ return mtx->mtx_recursive != 0;
+}
+
+static inline int
MUTEX_ADAPTIVE_P(volatile kmutex_t *mtx)
{
return mtx->mtx_owner != MUTEX_SPIN_FLAG;
Index: sys/arch/ia64/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/ia64/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- sys/arch/ia64/include/mutex.h 20 Jul 2009 04:41:37 -0000 1.4
+++ sys/arch/ia64/include/mutex.h 31 Jul 2010 22:34:32 -0000
@@ -36,7 +36,7 @@
struct kmutex {
uintptr_t mtx_pad1;
- uint32_t mtx_pad2[2];
+ uint32_t mtx_pad2[3];
};
#else
@@ -45,6 +45,8 @@ struct kmutex {
volatile uintptr_t mtx_owner;
ipl_cookie_t mtx_ipl;
__cpu_simple_lock_t mtx_lock;
+ volatile uint8_t mtx_recurse;
+ volatile unit8_t mtx_unused[3];
};
Index: sys/arch/m68k/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/include/mutex.h,v
retrieving revision 1.7
diff -u -p -r1.7 mutex.h
--- sys/arch/m68k/include/mutex.h 28 Apr 2008 20:23:26 -0000 1.7
+++ sys/arch/m68k/include/mutex.h 31 Jul 2010 22:34:33 -0000
@@ -45,17 +45,23 @@ struct kmutex {
struct kmutex {
union {
/* Adaptive mutex */
- volatile uintptr_t mtxu_owner; /* 0-3 */
+ struct {
+ volatile uintptr_t mtxa_owner; /* 0-3 */
+ volatile uint8_t mtxa_recurse; /* 4 */
+ volatile unit8_t mtxa_unused[3]; /* 5-7 */
+ } a;
+ /* Spin */
struct {
ipl_cookie_t mtxs_ipl; /* 0-1 */
- __cpu_simple_lock_t mtxs_lock; /* 2 */
- uint8_t mtxs_unused; /* 3 */
+ __cpu_simple_lock_t mtxs_lock; /* 2 */
+ uint8_t mtxs_unused[5]; /* 3-7 */
} s;
} u;
};
-#define mtx_owner u.mtxu_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/mips/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/include/mutex.h,v
retrieving revision 1.6
diff -u -p -r1.6 mutex.h
--- sys/arch/mips/include/mutex.h 28 Apr 2008 20:23:28 -0000 1.6
+++ sys/arch/mips/include/mutex.h 31 Jul 2010 22:34:34 -0000
@@ -49,6 +49,7 @@ struct kmutex {
volatile uintptr_t mtx_owner;
ipl_cookie_t mtx_ipl;
__cpu_simple_lock_t mtx_lock;
+ volatile uint8_t mtx_recurse;
};
#define __HAVE_SIMPLE_MUTEXES 1
Index: sys/arch/powerpc/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- sys/arch/powerpc/include/mutex.h 28 Apr 2008 20:23:32 -0000 1.4
+++ sys/arch/powerpc/include/mutex.h 31 Jul 2010 22:34:34 -0000
@@ -43,11 +43,13 @@ struct kmutex {
volatile uintptr_t mtxm_owner;
ipl_cookie_t mtxm_ipl;
__cpu_simple_lock_t mtxm_lock;
+ volatile uint8_t mtxm_recurse;
+ volatile uint8_t mtxm_unused[3];
} m;
#endif
struct {
uintptr_t mtxp_a;
- uint32_t mtxp_b[2];
+ uint32_t mtxp_b[3];
} p;
} u;
};
@@ -55,6 +57,7 @@ struct kmutex {
#ifdef __MUTEX_PRIVATE
#define mtx_owner u.m.mtxm_owner
+#define mtx_recurse u.m.mtxm_recurse
#define mtx_ipl u.m.mtxm_ipl
#define mtx_lock u.m.mtxm_lock
Index: sys/arch/sh3/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/include/mutex.h,v
retrieving revision 1.9
diff -u -p -r1.9 mutex.h
--- sys/arch/sh3/include/mutex.h 28 Apr 2008 20:23:35 -0000 1.9
+++ sys/arch/sh3/include/mutex.h 31 Jul 2010 22:34:35 -0000
@@ -43,19 +43,24 @@ struct kmutex {
struct kmutex {
union {
/* Adaptive mutex */
- volatile uintptr_t mtxa_owner; /* 0-3 */
+ struct {
+ volatile uintptr_t mtxa_owner; /* 0-3 */
+ volatile uint8_t mtxa_recurse; /* 4 */
+ volatile uint8_t mtxa_unused[3]; /* 5-7 */
+ } a;
/* Spin mutex */
struct {
volatile uint8_t mtxs_dummy;
ipl_cookie_t mtxs_ipl;
__cpu_simple_lock_t mtxs_lock;
- volatile uint8_t mtxs_unused;
+ volatile uint8_t mtxs_unused[5];
} s;
} u;
};
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/sparc/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/include/mutex.h,v
retrieving revision 1.10
diff -u -p -r1.10 mutex.h
--- sys/arch/sparc/include/mutex.h 28 Apr 2008 20:23:36 -0000 1.10
+++ sys/arch/sparc/include/mutex.h 31 Jul 2010 22:34:36 -0000
@@ -38,13 +38,18 @@
struct kmutex {
union {
- volatile uintptr_t mtxa_owner;
+ struct {
+ volatile uintptr_t mtxa_owner;
+ volatile uint8_t mtxa_recurse;
+ volatile uint8_t mtxa_unused[3];
+ } a;
+
#ifdef __MUTEX_PRIVATE
struct {
volatile uint8_t mtxs_dummy;
ipl_cookie_t mtxs_ipl;
__cpu_simple_lock_t mtxs_lock;
- volatile uint8_t mtxs_unused;
+ volatile uint8_t mtxs_unused[5];
} s;
#endif
} u;
@@ -52,7 +57,8 @@ struct kmutex {
#ifdef __MUTEX_PRIVATE
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/sparc64/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- sys/arch/sparc64/include/mutex.h 28 Apr 2008 20:23:37 -0000 1.4
+++ sys/arch/sparc64/include/mutex.h 31 Jul 2010 22:34:36 -0000
@@ -34,13 +34,18 @@
struct kmutex {
union {
- volatile uintptr_t mtxa_owner;
+ struct {
+ volatile uintptr_t mtxa_owner;
+ volatile uint8_t mtxa_recurse;
+ volatile uint8_t mtxa_unused[3];
+ } a;
+
#ifdef __MUTEX_PRIVATE
struct {
uint8_t mtxs_unused;
__cpu_simple_lock_t mtxs_lock;
ipl_cookie_t mtxs_ipl;
- uint8_t mtxs_dummy;
+ uint8_t mtxs_dummy[5];
} s;
#endif
} u;
@@ -48,7 +53,8 @@ struct kmutex {
#ifdef __MUTEX_PRIVATE
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/usermode/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/usermode/include/mutex.h,v
retrieving revision 1.3
diff -u -p -r1.3 mutex.h
--- sys/arch/usermode/include/mutex.h 28 Apr 2008 20:23:39 -0000 1.3
+++ sys/arch/usermode/include/mutex.h 31 Jul 2010 22:34:37 -0000
@@ -34,13 +34,18 @@
struct kmutex {
union {
- volatile uintptr_t mtxa_owner;
+ struct {
+ volatile uintptr_t mtxa_owner;
+ volatile uint8_t mtxa_recurse;
+ volatile uint8_t mtxa_unused[3];
+ } a;
+
#ifdef __MUTEX_PRIVATE
struct {
volatile uint8_t mtxs_dummy;
ipl_cookie_t mtxs_ipl;
__cpu_simple_lock_t mtxs_lock;
- volatile uint8_t mtxs_unused;
+ volatile uint8_t mtxs_unused[5];
} s;
#endif
} u;
@@ -48,7 +53,8 @@ struct kmutex {
#ifdef __MUTEX_PRIVATE
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/vax/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/include/mutex.h,v
retrieving revision 1.12
diff -u -p -r1.12 mutex.h
--- sys/arch/vax/include/mutex.h 28 Apr 2008 20:23:39 -0000 1.12
+++ sys/arch/vax/include/mutex.h 31 Jul 2010 22:34:38 -0000
@@ -44,6 +44,7 @@
struct kmutex {
uintptr_t mtx_pad1;
+ uint8_t mtx_pad2[4];
};
#else /* __MUTEX_PRIVATE */
@@ -51,19 +52,24 @@ struct kmutex {
struct kmutex {
union {
/* Adaptive mutex */
- volatile uintptr_t mtxa_owner; /* 0-3 */
+ struct {
+ volatile uintptr_t mtxa_owner; /* 0-3 */
+ volatile uint8_t mtxa_recurse; /* 4 */
+ volatile uint8_t mtxa_unused[3]; /* 5-7 */
+ } a;
/* Spin mutex */
struct {
volatile uint8_t mtxs_dummy;
ipl_cookie_t mtxs_ipl;
__cpu_simple_lock_t mtxs_lock;
- volatile uint8_t mtxs_unused;
+ volatile uint8_t mtxs_unused[5];
} s;
} u;
};
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/arch/x86/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/include/mutex.h,v
retrieving revision 1.6
diff -u -p -r1.6 mutex.h
--- sys/arch/x86/include/mutex.h 24 Apr 2009 17:49:51 -0000 1.6
+++ sys/arch/x86/include/mutex.h 31 Jul 2010 22:34:39 -0000
@@ -34,13 +34,17 @@
struct kmutex {
union {
- volatile uintptr_t mtxa_owner;
+ struct {
+ volatile uintptr_t mtxa_owner;
+ volatile uint8_t mtxa_recurse;
+ volatile uint8_t mtxa_unused[3];
+ } a;
#ifdef __MUTEX_PRIVATE
struct {
volatile uint8_t mtxs_dummy;
ipl_cookie_t mtxs_ipl;
__cpu_simple_lock_t mtxs_lock;
- volatile uint8_t mtxs_unused;
+ volatile uint8_t mtxs_unused[5];
} s;
#endif
} u;
@@ -48,7 +52,8 @@ struct kmutex {
#ifdef __MUTEX_PRIVATE
-#define mtx_owner u.mtxa_owner
+#define mtx_owner u.a.mtxa_owner
+#define mtx_recurse u.a.mtxa_recurse
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
Index: sys/sys/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/sys/mutex.h,v
retrieving revision 1.20
diff -u -p -r1.20 mutex.h
--- sys/sys/mutex.h 8 Feb 2010 09:54:27 -0000 1.20
+++ sys/sys/mutex.h 31 Jul 2010 22:34:39 -0000
@@ -51,6 +51,7 @@
* be able to access the following members:
*
* uintptr_t mtx_owner
+ * uint8_t mtx_recurse
* ipl_cookie_t mtx_ipl
* __cpu_simple_lock_t mtx_lock
*
@@ -82,6 +83,9 @@
* MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)
* Initialize an adaptive mutex.
*
+ * MUTEX_INITIALIZE_RECURSIVE(mtx)
+ * Finish initialization of a recursive mutex.
+ *
* MUTEX_DESTROY(mtx)
* Tear down a mutex.
*
@@ -91,6 +95,9 @@
* MUTEX_SPIN_P(mtx)
* Evaluates to true if the mutex is a spin mutex.
*
+ * MUTEX_RECURSIVE_P(mtx)
+ * Evaluates to true if the mutex is a recursive mutex.
+ *
* MUTEX_OWNER(owner)
* Returns the owner of the adaptive mutex (LWP address).
*
@@ -160,6 +167,7 @@ typedef struct kmutex kmutex_t;
#define MUTEX_BIT_SPIN 0x01
#define MUTEX_BIT_WAITERS 0x02
+#define MUTEX_BIT_RECURSIVE 0x08
#if defined(LOCKDEBUG)
#define MUTEX_BIT_NODEBUG 0x04 /* LOCKDEBUG disabled */
@@ -192,6 +200,7 @@ void mutex_wakeup(kmutex_t *);
#ifdef _KERNEL
+void mutex_init_recursive(kmutex_t *);
void mutex_init(kmutex_t *, kmutex_type_t, int);
void mutex_destroy(kmutex_t *);
Index: sys/kern/kern_mutex.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_mutex.c,v
retrieving revision 1.49
diff -u -p -r1.49 kern_mutex.c
--- sys/kern/kern_mutex.c 8 Feb 2010 09:54:27 -0000 1.49
+++ sys/kern/kern_mutex.c 31 Jul 2010 22:34:40 -0000
@@ -170,6 +170,11 @@ do {
\
__cpu_simple_lock_init(&(mtx)->mtx_lock); \
} while (/* CONSTCOND */ 0)
+#define MUTEX_INITIALIZE_RECURSIVE(mtx)
\
+do { \
+ (mtx)->mtx_owner |= MUTEX_BIT_RECURSIVE; \
+} while (/* CONSTCOND */ 0)
+
#define MUTEX_DESTROY(mtx)
\
do { \
(mtx)->mtx_owner = MUTEX_THREAD; \
@@ -179,6 +184,8 @@ do {
\
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
#define MUTEX_ADAPTIVE_P(mtx) \
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
+#define MUTEX_RECURSIVE_P(mtx) \
+ (((mtx)->mtx_owner & MUTEX_BIT_RECURSIVE) == 0)
#define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG)
== 0)
#if defined(LOCKDEBUG)
@@ -286,6 +293,8 @@ mutex_dump(volatile void *cookie)
printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
(long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
MUTEX_SPIN_P(mtx));
+ if (MUTEX_RECURSIVE_P(mtx))
+ printf_nolog("recursion lvl : %16d\n", mtx->mtx_recurse);
}
/*
@@ -304,6 +313,21 @@ mutex_abort(kmutex_t *mtx, const char *f
}
/*
+ * mutex_init_recursive:
+ *
+ * Initialize a recursive mutex for use. These are just like
+ * adaptive mutexes, except that
+ * 1. They must be initialized at IPL_NONE
+ * 2. They may be acquired recursively
+ */
+void
+mutex_init_recursive(kmutex_t *mtx)
+{
+ mutex_init(mtx, MUTEX_DEFAULT, IPL_NONE);
+ MUTEX_INITIALIZE_RECURSIVE(mtx);
+}
+
+/*
* mutex_init:
*
* Initialize a mutex for use. Note that adaptive mutexes are in
@@ -532,8 +556,23 @@ mutex_vector_enter(kmutex_t *mtx)
if (__predict_false(panicstr != NULL))
return;
- if (__predict_false(MUTEX_OWNER(owner) == curthread))
+ if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
+ /*
+ * Handle recursive mutexes
+ *
+ * XXX Does this need to be done atomically? The
+ * XXX curthread already owns the mutex and it cannot
+ * XXX possibly be running on another CPU, so there
+ * XXX really should not be any chance of conflicting
+ * XXX updates.
+ */
+ if (__predict_false(MUTEX_RECURSIVE_P(mtx))) {
+ mtx->mtx_recurse++;
+ MUTEX_ASSERT(mtx,mtx->mtx_recurse);
+ break;
+ }
MUTEX_ABORT(mtx, "locking against myself");
+ }
#ifdef MULTIPROCESSOR
/*
@@ -740,6 +779,17 @@ mutex_vector_exit(kmutex_t *mtx)
curthread = (uintptr_t)curlwp;
MUTEX_DASSERT(mtx, curthread != 0);
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
+
+ /*
+ * For recursive mutexes, decrement recursion count. If still
+ * non-zero, just exit since curthread still owns it.
+ */
+ if (__predict_false(MUTEX_RECURSIVE_P(mtx))) {
+ MUTEX_ASSERT(mtx, mtx->mtx_recurse);
+ if (--mtx->mtx_recurse)
+ return;
+ }
+
MUTEX_UNLOCKED(mtx);
#ifdef LOCKDEBUG
Index: sys/arch/amd64/amd64/genassym.cf
===================================================================
RCS file: /cvsroot/src/sys/arch/amd64/amd64/genassym.cf,v
retrieving revision 1.45
diff -u -p -r1.45 genassym.cf
--- sys/arch/amd64/amd64/genassym.cf 7 Jul 2010 01:14:52 -0000 1.45
+++ sys/arch/amd64/amd64/genassym.cf 31 Jul 2010 22:34:41 -0000
@@ -342,7 +342,8 @@ define PSL_MBO PSL_MBO
define MTX_IPL offsetof(struct kmutex, u.s.mtxs_ipl)
define MTX_LOCK offsetof(struct kmutex, u.s.mtxs_lock)
-define MTX_OWNER offsetof(struct kmutex, u.mtxa_owner)
+define MTX_OWNER offsetof(struct kmutex, u.a.mtxa_owner)
+define MTX_RECURSE offsetof(struct kmutex, u.a.mtxa_recurse)
define RW_OWNER offsetof(struct krwlock, rw_owner)
define RW_WRITE_LOCKED RW_WRITE_LOCKED
Index: sys/rump/librump/rumpkern/locks.c
===================================================================
RCS file: /cvsroot/src/sys/rump/librump/rumpkern/locks.c,v
retrieving revision 1.42
diff -u -p -r1.42 locks.c
--- sys/rump/librump/rumpkern/locks.c 9 Jun 2010 07:54:13 -0000 1.42
+++ sys/rump/librump/rumpkern/locks.c 31 Jul 2010 22:34:42 -0000
@@ -64,6 +64,15 @@ mutex_init(kmutex_t *mtx, kmutex_type_t
}
void
+mutex_init_recursive(kmutex_t *mtx)
+{
+
+ CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
+
+ rumpuser_mutex_recursive_init((struct rumpuser_mtx **)mtx);
+}
+
+void
mutex_destroy(kmutex_t *mtx)
{
Index: sys/kern/kern_module.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_module.c,v
retrieving revision 1.70
diff -u -p -r1.70 kern_module.c
--- sys/kern/kern_module.c 26 Jun 2010 07:23:57 -0000 1.70
+++ sys/kern/kern_module.c 31 Jul 2010 22:34:42 -0000
@@ -332,7 +332,7 @@ module_init(void)
if (module_map == NULL) {
module_map = kernel_map;
}
- mutex_init(&module_lock, MUTEX_DEFAULT, IPL_NONE);
+ mutex_init_recursive(&module_lock);
cv_init(&module_thread_cv, "modunload");
mutex_init(&module_thread_lock, MUTEX_DEFAULT, IPL_NONE);
Home |
Main Index |
Thread Index |
Old Index