Source-Changes archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: CVS commit: [vmlocking] src/sys



> On Mon, Jul 30, 2007 at 11:29:09AM +0900, YAMAMOTO Takashi wrote:
> 
> > > Trap free() of areas that contain undestroyed locks. Not a major problem
> > > but it helps to catch bugs.
> > 
> > how about making lockdebug_lookup use the hash as well, and kill lock id?
> 
> I wanted to avoid that so LOCKDEBUG was not so slow. There again, LOCKDEBUG
> doesn't really need to be quick. It only needs to _not_ make the machine
> unusable.
> 
> I think this is a good idea. lockdebug_lookup() could do with a different
> method of hashing that takes the lower order bits into consideration - I'll
> investigate that.
> 
> Andrew

is the attached patch ok for you?

YAMAMOTO Takashi
Index: arch/alpha/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/include/mutex.h,v
retrieving revision 1.2
diff -u -p -r1.2 mutex.h
--- arch/alpha/include/mutex.h  9 Feb 2007 21:55:01 -0000       1.2
+++ arch/alpha/include/mutex.h  20 Nov 2007 14:40:52 -0000
@@ -44,7 +44,6 @@
 struct kmutex {
        uintptr_t       mtx_pad1;
        uint32_t        mtx_pad2;
-       uint32_t        mtx_pad3;
 };
 
 #else  /* __MUTEX_PRIVATE */
@@ -59,7 +58,6 @@ struct kmutex {
                } s;
        } u;
        __cpu_simple_lock_t     mtx_lock;
-       volatile uint32_t       mtx_id;
 };
 
 #define        mtx_owner                       u.mtxa_owner
Index: arch/alpha/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/alpha/include/rwlock.h 9 Feb 2007 21:55:01 -0000       1.2
+++ arch/alpha/include/rwlock.h 20 Nov 2007 14:40:52 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/arm/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/include/mutex.h,v
retrieving revision 1.7
diff -u -p -r1.7 mutex.h
--- arch/arm/include/mutex.h    9 Mar 2007 19:21:58 -0000       1.7
+++ arch/arm/include/mutex.h    20 Nov 2007 14:40:52 -0000
@@ -58,7 +58,6 @@
 
 struct kmutex {
        uintptr_t       mtx_pad1;
-       uint32_t        mtx_pad2;
 };
 
 #else  /* __MUTEX_PRIVATE */
@@ -76,7 +75,6 @@ struct kmutex {
                        volatile uint8_t        mtxs_unused;
                } s;
        } u;
-       volatile uint32_t       mtx_id;                 /* 4-7 */
 };
 
 #define        mtx_owner               u.mtxa_owner
Index: arch/arm/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/arm/include/rwlock.h   9 Mar 2007 20:08:26 -0000       1.2
+++ arch/arm/include/rwlock.h   20 Nov 2007 14:40:57 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/hppa/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/hppa/include/mutex.h,v
retrieving revision 1.7
diff -u -p -r1.7 mutex.h
--- arch/hppa/include/mutex.h   17 Oct 2007 19:54:41 -0000      1.7
+++ arch/hppa/include/mutex.h   20 Nov 2007 14:40:57 -0000
@@ -66,7 +66,7 @@ struct kmutex {
                        volatile uint8_t        mtxs_waiters;   /* 24 */
 
                        /* For LOCKDEBUG */
-                       uint8_t                 mtxs_id[3];     /* 25-27 */
+                       uint8_t                 mtxs_dodebug;   /* 25 */
                } s;
 #endif
                uint8_t                 mtxu_pad[32];   /* 0 - 32 */
@@ -82,7 +82,7 @@ struct kmutex {
 #define        mtx_owner       u.s.mtxs_owner
 #define        mtx_ipl         u.s.mtxs_ipl
 #define        mtx_waiters     u.s.mtxs_waiters
-#define        mtx_id          u.s.mtxs_id
+#define        mtx_dodebug     u.s.mtxs_dodebug
 
 /* Magic constants for mtx_owner */
 #define        MUTEX_ADAPTIVE_UNOWNED          0xffffff00
@@ -119,22 +119,18 @@ MUTEX_HAS_WAITERS(volatile kmutex_t *mtx
 }
 
 static inline void
-MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, u_int id, int ipl)
+MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, bool dodebug, int ipl)
 {
        mtx->mtx_ipl = makeiplcookie(ipl);
-       mtx->mtx_id[0] = (uint8_t)id;
-       mtx->mtx_id[1] = (uint8_t)(id >> 8);
-       mtx->mtx_id[2] = (uint8_t)(id >> 16);
+       mtx->mtx_dodebug = dodebug;
        mtx->mtx_owner = MUTEX_SPIN_FLAG;
        __cpu_simple_lock_init(&mtx->mtx_lock);
 }
 
 static inline void
-MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, u_int id)
+MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, bool dodebug)
 {
-       mtx->mtx_id[0] = (uint8_t)id;
-       mtx->mtx_id[1] = (uint8_t)(id >> 8);
-       mtx->mtx_id[2] = (uint8_t)(id >> 16);
+       mtx->mtx_dodebug = dodebug;
        mtx->mtx_owner = MUTEX_ADAPTIVE_UNOWNED;
        __cpu_simple_lock_init(&mtx->mtx_lock);
 }
@@ -143,17 +139,12 @@ static inline void
 MUTEX_DESTROY(kmutex_t *mtx)
 {
        mtx->mtx_owner = 0xffffffff;
-       mtx->mtx_id[0] = 0xff;
-       mtx->mtx_id[1] = 0xff;
-       mtx->mtx_id[2] = 0xff;
 }
 
-static inline u_int
-MUTEX_GETID(kmutex_t *mtx)
+static inline bool
+MUTEX_DEBUG_P(kmutex_t *mtx)
 {
-       return (u_int)mtx->mtx_id[0] |
-           ((u_int)mtx->mtx_id[1] << 8) |
-           ((u_int)mtx->mtx_id[2] << 16);
+       return mtx->mtx_dodebug != 0;
 }
 
 static inline int
Index: arch/hppa/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/hppa/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/hppa/include/rwlock.h  9 Feb 2007 21:55:04 -0000       1.2
+++ arch/hppa/include/rwlock.h  20 Nov 2007 14:40:57 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/m68k/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- arch/m68k/include/mutex.h   11 Mar 2007 05:22:25 -0000      1.4
+++ arch/m68k/include/mutex.h   20 Nov 2007 14:41:04 -0000
@@ -43,7 +43,6 @@
 
 struct kmutex {
        uintptr_t       mtx_pad1;
-       uint32_t        mtx_pad2;
 };
 
 #else  /* __MUTEX_PRIVATE */
@@ -61,7 +60,6 @@ struct kmutex {
                        uint8_t                 mtxs_unused;    /* 3 */
                } s;
        } u;
-       volatile uint32_t       mtx_id;                 /* 4-7 */
 };
 
 #define        mtx_owner               u.mtxu_owner
Index: arch/m68k/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/m68k/include/rwlock.h  9 Feb 2007 21:55:05 -0000       1.2
+++ arch/m68k/include/rwlock.h  20 Nov 2007 14:41:04 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/mips/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/include/mutex.h,v
retrieving revision 1.2
diff -u -p -r1.2 mutex.h
--- arch/mips/include/mutex.h   9 Feb 2007 21:55:06 -0000       1.2
+++ arch/mips/include/mutex.h   20 Nov 2007 14:41:04 -0000
@@ -43,7 +43,7 @@
 
 struct kmutex {
        uintptr_t       mtx_pad1;
-       uint32_t        mtx_pad2[3];
+       uint32_t        mtx_pad2[2];
 };
 
 #else  /* __MUTEX_PRIVATE */
@@ -56,7 +56,6 @@ struct kmutex {
        volatile uintptr_t      mtx_owner;
        ipl_cookie_t            mtx_ipl;
        __cpu_simple_lock_t     mtx_lock;
-       volatile uint32_t       mtx_id;
 };
 
 #define        __HAVE_SIMPLE_MUTEXES           1
Index: arch/mips/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/mips/include/rwlock.h  9 Feb 2007 21:55:06 -0000       1.2
+++ arch/mips/include/rwlock.h  20 Nov 2007 14:41:04 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/powerpc/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/include/mutex.h,v
retrieving revision 1.2
diff -u -p -r1.2 mutex.h
--- arch/powerpc/include/mutex.h        9 Feb 2007 21:55:10 -0000       1.2
+++ arch/powerpc/include/mutex.h        20 Nov 2007 14:41:04 -0000
@@ -50,12 +50,11 @@ struct kmutex {
                        volatile uintptr_t      mtxm_owner;
                        ipl_cookie_t            mtxm_ipl;
                        __cpu_simple_lock_t     mtxm_lock;
-                       volatile uint32_t       mtxm_id;
                } m;
 #endif
                struct {
                        uintptr_t               mtxp_a;
-                       uint32_t                mtxp_b[3];
+                       uint32_t                mtxp_b[2];
                } p;
        } u;
 };
@@ -65,7 +64,6 @@ struct kmutex {
 #define        mtx_owner       u.m.mtxm_owner
 #define        mtx_ipl         u.m.mtxm_ipl
 #define        mtx_lock        u.m.mtxm_lock
-#define        mtx_id          u.m.mtxm_id
 
 #define        __HAVE_SIMPLE_MUTEXES           1
 #define        __HAVE_MUTEX_STUBS              1
Index: arch/powerpc/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/powerpc/include/rwlock.h       9 Feb 2007 21:55:11 -0000       1.2
+++ arch/powerpc/include/rwlock.h       20 Nov 2007 14:41:04 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/sh3/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/include/mutex.h,v
retrieving revision 1.6
diff -u -p -r1.6 mutex.h
--- arch/sh3/include/mutex.h    14 Mar 2007 01:14:25 -0000      1.6
+++ arch/sh3/include/mutex.h    20 Nov 2007 14:41:08 -0000
@@ -43,7 +43,6 @@
 
 struct kmutex {
        uintptr_t       mtx_pad1;
-       uint32_t        mtx_pad2;
 };
 
 #else  /* __MUTEX_PRIVATE */
@@ -61,7 +60,6 @@ struct kmutex {
                        volatile uint8_t        mtxs_unused;
                } s;
        } u;
-       volatile uint32_t       mtx_id;                 /* 4-7 */
 };
 
 #define        mtx_owner               u.mtxa_owner
Index: arch/sh3/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/sh3/include/rwlock.h   9 Feb 2007 21:55:12 -0000       1.2
+++ arch/sh3/include/rwlock.h   20 Nov 2007 14:41:08 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/sparc/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/include/mutex.h,v
retrieving revision 1.5
diff -u -p -r1.5 mutex.h
--- arch/sparc/include/mutex.h  17 Oct 2007 19:57:13 -0000      1.5
+++ arch/sparc/include/mutex.h  20 Nov 2007 14:41:08 -0000
@@ -104,7 +104,9 @@ struct kmutex {
                } s;
        } u;
        __cpu_simple_lock_t     mtx_lock;                       /* 4 */
-       uint8_t                 mtx_idtype[3];                  /* 5-7 */
+       uint8_t                 mtx_dodebug;                    /* 5 */
+       uint8_t                 mtx_isspin;                     /* 6 */
+       uint8_t                 mtx_pad[1];                     /* 7 */
 };
 
 #define        __HAVE_MUTEX_STUBS      1
@@ -145,22 +147,20 @@ MUTEX_HAS_WAITERS(volatile kmutex_t *mtx
 }
 
 static inline void
-MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, u_int id, int ipl)
+MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, bool dodebug, int ipl)
 {
-       mtx->mtx_idtype[0] = (uint8_t)id;
-       mtx->mtx_idtype[1] = (uint8_t)(id >> 8);
-       mtx->mtx_idtype[2] = (uint8_t)((id >> 16) | 0x80);
+       mtx->mtx_dodebug = dodebug;
+       mtx->mtx_isspin = 1;
        mtx->mtx_ipl = makeiplcookie(ipl);
        mtx->mtx_interlock = __SIMPLELOCK_LOCKED;
        __cpu_simple_lock_init(&mtx->mtx_lock);
 }
 
 static inline void
-MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, u_int id)
+MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, bool dodebug)
 {
-       mtx->mtx_idtype[0] = (uint8_t)id;
-       mtx->mtx_idtype[1] = (uint8_t)(id >> 8);
-       mtx->mtx_idtype[2] = (uint8_t)(id >> 16);
+       mtx->mtx_dodebug = dodebug;
+       mtx->mtx_isspin = 0;
        __cpu_simple_lock_init(&mtx->mtx_lock);
 }
 
@@ -168,29 +168,24 @@ static inline void
 MUTEX_DESTROY(kmutex_t *mtx)
 {
        mtx->mtx_owner = (uintptr_t)-1L;
-       mtx->mtx_idtype[0] = 0xff;
-       mtx->mtx_idtype[1] = 0xff;
-       mtx->mtx_idtype[2] = 0xff;
 }
 
-static inline u_int
-MUTEX_GETID(kmutex_t *mtx)
+static inline bool
+MUTEX_DEBUG_P(kmutex_t *mtx)
 {
-       return (u_int)mtx->mtx_idtype[0] |
-           ((u_int)mtx->mtx_idtype[1] << 8) |
-           (((u_int)mtx->mtx_idtype[2] & 0x7f) << 16);
+       return mtx->mtx_dodebug != 0;
 }
 
 static inline int
 MUTEX_SPIN_P(volatile kmutex_t *mtx)
 {
-       return mtx->mtx_idtype[2] & 0x80;
+       return mtx->mtx_isspin != 0;
 }
 
 static inline int
 MUTEX_ADAPTIVE_P(volatile kmutex_t *mtx)
 {
-       return (mtx->mtx_idtype[2] & 0x80) == 0;
+       return mtx->mtx_isspin == 0;
 }
 
 static inline int
Index: arch/sparc/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/sparc/include/rwlock.h 9 Feb 2007 21:55:12 -0000       1.2
+++ arch/sparc/include/rwlock.h 20 Nov 2007 14:41:08 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/sparc64/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/include/mutex.h,v
retrieving revision 1.2
diff -u -p -r1.2 mutex.h
--- arch/sparc64/include/mutex.h        9 Feb 2007 21:55:12 -0000       1.2
+++ arch/sparc64/include/mutex.h        20 Nov 2007 14:41:08 -0000
@@ -51,7 +51,6 @@ struct kmutex {
                } s;
 #endif
        } u;
-       volatile uint32_t       mtx_id;
 };
 
 #ifdef __MUTEX_PRIVATE
Index: arch/sparc64/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/sparc64/include/rwlock.h       9 Feb 2007 21:55:12 -0000       1.2
+++ arch/sparc64/include/rwlock.h       20 Nov 2007 14:41:08 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: arch/vax/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/include/mutex.h,v
retrieving revision 1.8
diff -u -p -r1.8 mutex.h
--- arch/vax/include/mutex.h    6 Apr 2007 17:48:06 -0000       1.8
+++ arch/vax/include/mutex.h    20 Nov 2007 14:41:08 -0000
@@ -106,7 +106,7 @@ struct kmutex {
                        uint8_t                 s_dummyhi;      /* 3 */
                } u_s;
        } mtx_u;
-       uint32_t                        mtx_id;                 /* 4-7 */
+       uint32_t                        mtx_flags;              /* 4-7 */
 };
 #define        mtx_owner       mtx_u.u_owner
 #define        mtx_lock        mtx_u.u_s.s_lock
@@ -147,18 +147,18 @@ MUTEX_CLEAR_WAITERS(volatile kmutex_t *m
 }
 
 static inline void
-MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, u_int id, int ipl)
+MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, bool dodebug, int ipl)
 {
-       mtx->mtx_id = (id << 1) | 1;
+       mtx->mtx_flags = (dodebug << 1) | 1;
        mtx->mtx_owner = 0x80000000;
        mtx->mtx_ipl = makeiplcookie(ipl);
        mtx->mtx_lock = 0;
 }
 
 static inline void
-MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, u_int id)
+MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, bool dodebug)
 {
-       mtx->mtx_id = id << 1;
+       mtx->mtx_flags = (dodebug << 1);
        mtx->mtx_ipl = makeiplcookie(-1);
        mtx->mtx_owner = 0;
 }
@@ -167,25 +167,25 @@ static inline void
 MUTEX_DESTROY(kmutex_t *mtx)
 {
        mtx->mtx_owner = (uintptr_t)-1L;
-       mtx->mtx_id = 0xdeadface << 1;
+       mtx->mtx_flags = 0xdeadface << 1;
 }
 
-static inline u_int
-MUTEX_GETID(volatile kmutex_t *mtx)
+static inline bool
+MUTEX_DEBUG_P(volatile kmutex_t *mtx)
 {
-       return mtx->mtx_id >> 1;
+       return mtx->mtx_flags >> 1;
 }
 
 static inline bool
 MUTEX_SPIN_P(volatile kmutex_t *mtx)
 {
-       return (mtx->mtx_id & 1) != 0;
+       return (mtx->mtx_flags & 1) != 0;
 }
 
 static inline bool
 MUTEX_ADAPTIVE_P(volatile kmutex_t *mtx)
 {
-       return (mtx->mtx_id & 1) == 0;
+       return (mtx->mtx_flags & 1) == 0;
 }
 
 static inline bool
Index: arch/vax/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/vax/include/rwlock.h   17 Feb 2007 05:34:07 -0000      1.2
+++ arch/vax/include/rwlock.h   20 Nov 2007 14:41:08 -0000
@@ -42,7 +42,7 @@
 struct krwlock {
        volatile uintptr_t      rw_owner;
        __cpu_simple_lock_t     rw_lock;
-       unsigned int            rw_id : 24;
+       unsigned int            rw_dodebug : 24;
 };
 
 #ifdef __RWLOCK_PRIVATE
@@ -95,9 +95,9 @@ RW_SET_WAITERS(krwlock_t *rw, uintptr_t 
  *             used in the LOCKDEBUG case.
  */
 static inline void
-RW_SETID(krwlock_t *rw, u_int id)
+RW_SETDEBUG(krwlock_t *rw, bool dodebug)
 {
-       rw->rw_id = id;
+       rw->rw_dodebug = dodebug;
 }
 
 /*
@@ -105,10 +105,10 @@ RW_SETID(krwlock_t *rw, u_int id)
  *             Get the debugging ID for the lock, an integer.  Only
  *             used in the LOCKDEBUG case.
  */
-static inline u_int
-RW_GETID(krwlock_t *rw)
+static inline bool
+RW_DEBUG_P(krwlock_t *rw)
 {
-       return rw->rw_id;
+       return rw->rw_dodebug;
 }
 
 #endif /* __RWLOCK_PRIVATE */
Index: arch/x86/include/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/include/mutex.h,v
retrieving revision 1.2
diff -u -p -r1.2 mutex.h
--- arch/x86/include/mutex.h    9 Feb 2007 21:55:14 -0000       1.2
+++ arch/x86/include/mutex.h    20 Nov 2007 14:41:08 -0000
@@ -51,7 +51,6 @@ struct kmutex {
                } s;
 #endif
        } u;
-       volatile uint32_t       mtx_id;
 };
 
 #ifdef __MUTEX_PRIVATE
Index: arch/x86/include/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/include/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- arch/x86/include/rwlock.h   9 Feb 2007 21:55:14 -0000       1.2
+++ arch/x86/include/rwlock.h   20 Nov 2007 14:41:08 -0000
@@ -41,7 +41,6 @@
 
 struct krwlock {
        volatile uintptr_t      rw_owner;
-       uint32_t                rw_id;
 };
 
 #ifdef __RWLOCK_PRIVATE
Index: kern/kern_lock.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_lock.c,v
retrieving revision 1.126
diff -u -p -r1.126 kern_lock.c
--- kern/kern_lock.c    13 Nov 2007 22:14:35 -0000      1.126
+++ kern/kern_lock.c    20 Nov 2007 14:41:12 -0000
@@ -104,7 +104,7 @@ void        lock_printf(const char *fmt, ...)
 static int acquire(struct lock **, int *, int, int, int, uintptr_t);
 
 int    lock_debug_syslog = 0;  /* defaults to printf, but can be patched */
-int    kernel_lock_id;
+bool   kernel_lock_dodebug;
 __cpu_simple_lock_t kernel_lock;
 
 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
@@ -705,8 +705,7 @@ assert_sleepable(struct simplelock *inte
  */
 
 #define        _KERNEL_LOCK_ABORT(msg)                                         
\
-    LOCKDEBUG_ABORT(kernel_lock_id, &kernel_lock, &_kernel_lock_ops,   \
-        __func__, msg)
+    LOCKDEBUG_ABORT(&kernel_lock, &_kernel_lock_ops, __func__, msg)
 
 #ifdef LOCKDEBUG
 #define        _KERNEL_LOCK_ASSERT(cond)                                       
\
@@ -734,7 +733,7 @@ kernel_lock_init(void)
 {
 
        __cpu_simple_lock_init(&kernel_lock);
-       kernel_lock_id = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops,
+       kernel_lock_dodebug = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops,
            RETURN_ADDRESS);
 }
 
@@ -782,13 +781,15 @@ _kernel_lock(int nlocks, struct lwp *l)
        }
 
        _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
-       LOCKDEBUG_WANTLOCK(kernel_lock_id, RETURN_ADDRESS, 0);
+       LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, &kernel_lock, RETURN_ADDRESS,
+           0);
 
        s = splvm();
        if (__cpu_simple_lock_try(&kernel_lock)) {
                ci->ci_biglock_count = nlocks;
                l->l_blcnt = nlocks;
-               LOCKDEBUG_LOCKED(kernel_lock_id, RETURN_ADDRESS, 0);
+               LOCKDEBUG_LOCKED(kernel_lock_dodebug, &kernel_lock,
+                   RETURN_ADDRESS, 0);
                splx(s);
                return;
        }
@@ -825,7 +826,7 @@ _kernel_lock(int nlocks, struct lwp *l)
        ci->ci_biglock_count = nlocks;
        l->l_blcnt = nlocks;
        LOCKSTAT_STOP_TIMER(lsflag, spintime);
-       LOCKDEBUG_LOCKED(kernel_lock_id, RETURN_ADDRESS, 0);
+       LOCKDEBUG_LOCKED(kernel_lock_dodebug, &kernel_lock, RETURN_ADDRESS, 0);
        splx(s);
 
        /*
@@ -877,7 +878,8 @@ _kernel_unlock(int nlocks, struct lwp *l
        l->l_blcnt -= nlocks;
        if (ci->ci_biglock_count == nlocks) {
                s = splvm();
-               LOCKDEBUG_UNLOCKED(kernel_lock_id, RETURN_ADDRESS, 0);
+               LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, &kernel_lock,
+                   RETURN_ADDRESS, 0);
                ci->ci_biglock_count = 0;
                __cpu_simple_unlock(&kernel_lock);
                splx(s);
Index: kern/kern_mutex.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_mutex.c,v
retrieving revision 1.22
diff -u -p -r1.22 kern_mutex.c
--- kern/kern_mutex.c   7 Nov 2007 00:23:22 -0000       1.22
+++ kern/kern_mutex.c   20 Nov 2007 14:41:12 -0000
@@ -78,13 +78,13 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c
  */
 
 #define        MUTEX_WANTLOCK(mtx)                                     \
-    LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx),                       \
+    LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),              \
         (uintptr_t)__builtin_return_address(0), 0)
 #define        MUTEX_LOCKED(mtx)                                       \
-    LOCKDEBUG_LOCKED(MUTEX_GETID(mtx),                         \
+    LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx),                        \
         (uintptr_t)__builtin_return_address(0), 0)
 #define        MUTEX_UNLOCKED(mtx)                                     \
-    LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx),                       \
+    LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx),              \
         (uintptr_t)__builtin_return_address(0), 0)
 #define        MUTEX_ABORT(mtx, msg)                                   \
     mutex_abort(mtx, __func__, msg)
@@ -154,28 +154,27 @@ do {                                                      
                \
 
 #define        MUTEX_OWNER(owner)                                              
\
        (owner & MUTEX_THREAD)
-#define        MUTEX_OWNED(owner)                                              
\
-       (owner != 0)
 #define        MUTEX_HAS_WAITERS(mtx)                                          
\
        (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
 
-#define        MUTEX_INITIALIZE_ADAPTIVE(mtx, id)                              
\
+#define        MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)                         
\
 do {                                                                   \
-       (mtx)->mtx_id = (id);                                           \
+       if (dodebug)                                                    \
+               (mtx)->mtx_owner |= MUTEX_BIT_DEBUG;                    \
 } while (/* CONSTCOND */ 0);
 
-#define        MUTEX_INITIALIZE_SPIN(mtx, id, ipl)                             
\
+#define        MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl)                        
\
 do {                                                                   \
        (mtx)->mtx_owner = MUTEX_BIT_SPIN;                              \
+       if (dodebug)                                                    \
+               (mtx)->mtx_owner |= MUTEX_BIT_DEBUG;                    \
        (mtx)->mtx_ipl = makeiplcookie((ipl));                          \
-       (mtx)->mtx_id = (id);                                           \
        __cpu_simple_lock_init(&(mtx)->mtx_lock);                       \
 } while (/* CONSTCOND */ 0)
 
 #define        MUTEX_DESTROY(mtx)                                              
\
 do {                                                                   \
        (mtx)->mtx_owner = MUTEX_THREAD;                                \
-       (mtx)->mtx_id = -1;                                             \
 } while (/* CONSTCOND */ 0);
 
 #define        MUTEX_SPIN_P(mtx)               \
@@ -183,13 +182,25 @@ do {                                                      
                \
 #define        MUTEX_ADAPTIVE_P(mtx)           \
     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
 
-#define        MUTEX_GETID(mtx)                ((mtx)->mtx_id)
+#define        MUTEX_DEBUG_P(mtx)      (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) 
!= 0)
+#if defined(LOCKDEBUG)
+#define        MUTEX_OWNED(owner)              (((owner) & ~MUTEX_BIT_DEBUG) 
!= 0)
+#define        MUTEX_INHERITDEBUG(new, old)    (new) |= (old) & MUTEX_BIT_DEBUG
+#else /* defined(LOCKDEBUG) */
+#define        MUTEX_OWNED(owner)              ((owner) != 0)
+#define        MUTEX_INHERITDEBUG(new, old)    /* nothing */
+#endif /* defined(LOCKDEBUG) */
 
 static inline int
 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
 {
        int rv;
-       rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
+       uintptr_t old = 0;
+       uintptr_t new = curthread;
+
+       MUTEX_INHERITDEBUG(old, mtx->mtx_owner);
+       MUTEX_INHERITDEBUG(new, old);
+       rv = MUTEX_CAS(&mtx->mtx_owner, old, new);
        MUTEX_RECEIVE(mtx);
        return rv;
 }
@@ -206,8 +217,12 @@ MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr
 static inline void
 MUTEX_RELEASE(kmutex_t *mtx)
 {
+       uintptr_t new;
+
        MUTEX_GIVE(mtx);
-       mtx->mtx_owner = 0;
+       new = 0;
+       MUTEX_INHERITDEBUG(new, mtx->mtx_owner);
+       mtx->mtx_owner = new;
 }
 
 static inline void
@@ -291,7 +306,7 @@ void
 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
 {
 
-       LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
+       LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
            &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
        /* NOTREACHED */
 }
@@ -308,7 +323,7 @@ mutex_abort(kmutex_t *mtx, const char *f
 void
 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
 {
-       u_int id;
+       bool dodebug;
 
        memset(mtx, 0, sizeof(*mtx));
 
@@ -333,19 +348,19 @@ mutex_init(kmutex_t *mtx, kmutex_type_t 
 
        switch (type) {
        case MUTEX_NODEBUG:
-               id = LOCKDEBUG_ALLOC(mtx, NULL,
+               dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
                    (uintptr_t)__builtin_return_address(0));
-               MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
+               MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
                break;
        case MUTEX_ADAPTIVE:
-               id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
+               dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
                    (uintptr_t)__builtin_return_address(0));
-               MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
+               MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
                break;
        case MUTEX_SPIN:
-               id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
+               dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
                    (uintptr_t)__builtin_return_address(0));
-               MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
+               MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
                break;
        default:
                panic("mutex_init: impossible type");
@@ -369,7 +384,7 @@ mutex_destroy(kmutex_t *mtx)
                MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
        }
 
-       LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
+       LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
        MUTEX_DESTROY(mtx);
 }
 
Index: kern/kern_rwlock.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_rwlock.c,v
retrieving revision 1.11
diff -u -p -r1.11 kern_rwlock.c
--- kern/kern_rwlock.c  11 Oct 2007 19:45:25 -0000      1.11
+++ kern/kern_rwlock.c  20 Nov 2007 14:41:12 -0000
@@ -69,13 +69,13 @@ __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.
 #if defined(LOCKDEBUG)
 
 #define        RW_WANTLOCK(rw, op)                                             
\
-       LOCKDEBUG_WANTLOCK(RW_GETID(rw),                                \
+       LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw),                        \
            (uintptr_t)__builtin_return_address(0), op == RW_READER);
 #define        RW_LOCKED(rw, op)                                               
\
-       LOCKDEBUG_LOCKED(RW_GETID(rw),                                  \
+       LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw),                          \
            (uintptr_t)__builtin_return_address(0), op == RW_READER);
 #define        RW_UNLOCKED(rw, op)                                             
\
-       LOCKDEBUG_UNLOCKED(RW_GETID(rw),                                \
+       LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw),                        \
            (uintptr_t)__builtin_return_address(0), op == RW_READER);
 #define        RW_DASSERT(rw, cond)                                            
\
 do {                                                                   \
@@ -114,10 +114,23 @@ do {                                                      
                \
  * For platforms that use 'simple' RW locks.
  */
 #ifdef __HAVE_SIMPLE_RW_LOCKS
-#define        RW_ACQUIRE(rw, old, new)        RW_CAS(&(rw)->rw_owner, old, 
new)
-#define        RW_RELEASE(rw, old, new)        RW_CAS(&(rw)->rw_owner, old, 
new)
-#define        RW_SETID(rw, id)                ((rw)->rw_id = id)
-#define        RW_GETID(rw)                    ((rw)->rw_id)
+#define        RW_ACQUIRE(rw, old, new)        RW_CAS1(&(rw)->rw_owner, old, 
new)
+#define        RW_RELEASE(rw, old, new)        RW_CAS1(&(rw)->rw_owner, old, 
new)
+#define        RW_SETDEBUG(rw, on)             ((rw)->rw_owner |= (on) ? 
RW_DEBUG : 0)
+#define        RW_DEBUG_P(rw)                  (((rw)->rw_owner & RW_DEBUG) != 
0)
+#if defined(LOCKDEBUG)
+#define        RW_INHERITDEBUG(new, old)       (new) |= (old) & RW_DEBUG
+#else /* defined(LOCKDEBUG) */
+#define        RW_INHERITDEBUG(new, old)       /* nothing */
+#endif /* defined(LOCKDEBUG) */
+
+static inline int
+RW_CAS1(volatile uintptr_t *ptr, uintptr_t old, uintptr_t new)
+{
+
+       RW_INHERITDEBUG(new, old);
+       return RW_CAS(ptr, old, new);
+}
 
 static inline int
 RW_SET_WAITERS(krwlock_t *rw, uintptr_t need, uintptr_t set)
@@ -190,7 +203,7 @@ rw_abort(krwlock_t *rw, const char *func
        if (panicstr != NULL)
                return;
 
-       LOCKDEBUG_ABORT(RW_GETID(rw), rw, &rwlock_lockops, func, msg);
+       LOCKDEBUG_ABORT(rw, &rwlock_lockops, func, msg);
 }
 
 /*
@@ -201,13 +214,13 @@ rw_abort(krwlock_t *rw, const char *func
 void
 rw_init(krwlock_t *rw)
 {
-       u_int id;
+       bool dodebug;
 
        memset(rw, 0, sizeof(*rw));
 
-       id = LOCKDEBUG_ALLOC(rw, &rwlock_lockops,
+       dodebug = LOCKDEBUG_ALLOC(rw, &rwlock_lockops,
            (uintptr_t)__builtin_return_address(0));
-       RW_SETID(rw, id);
+       RW_SETDEBUG(rw, dodebug);
 }
 
 /*
@@ -219,8 +232,8 @@ void
 rw_destroy(krwlock_t *rw)
 {
 
-       LOCKDEBUG_FREE(rw, RW_GETID(rw));
-       RW_ASSERT(rw, rw->rw_owner == 0);
+       RW_ASSERT(rw, (rw->rw_owner & ~RW_DEBUG) == 0);
+       LOCKDEBUG_FREE(RW_DEBUG_P(rw), rw);
 }
 
 /*
@@ -441,7 +454,7 @@ rw_vector_exit(krwlock_t *rw)
                        new = rcnt << RW_READ_COUNT_SHIFT;
                        if (wcnt != 0)
                                new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
-                               
+                       
                        RW_GIVE(rw);
                        if (!RW_RELEASE(rw, owner, new)) {
                                /* Oops, try again. */
Index: kern/subr_lockdebug.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_lockdebug.c,v
retrieving revision 1.15
diff -u -p -r1.15 subr_lockdebug.c
--- kern/subr_lockdebug.c       12 Nov 2007 06:14:57 -0000      1.15
+++ kern/subr_lockdebug.c       20 Nov 2007 14:41:12 -0000
@@ -56,6 +56,8 @@ __KERNEL_RCSID(0, "$NetBSD: subr_lockdeb
 #include <sys/sleepq.h>
 #include <sys/cpu.h>
 
+#include <lib/libkern/rb.h>
+
 #ifdef LOCKDEBUG
 
 #define        LD_BATCH_SHIFT  9
@@ -66,10 +68,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_lockdeb
 
 #define        LD_LOCKED       0x01
 #define        LD_SLEEPER      0x02
-#define        LD_MLOCKS       8
-#define        LD_MLISTS       8192
-
-#define        LD_NOID         (LD_MAX_LOCKS + 1)
 
 typedef union lockdebuglk {
        struct {
@@ -83,16 +81,15 @@ typedef union lockdebuglk {
 #define        lk_oldspl       ul.lku_oldspl
 
 typedef struct lockdebug {
+       struct rb_node  ld_rb_node;     /* must be the first member */
        _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
        _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
-       _TAILQ_ENTRY(struct lockdebug, volatile) ld_mchain;
        volatile void   *ld_lock;
        lockops_t       *ld_lockops;
        struct lwp      *ld_lwp;
        uintptr_t       ld_locked;
        uintptr_t       ld_unlocked;
        uintptr_t       ld_initaddr;
-       u_int           ld_id;
        uint16_t        ld_shares;
        uint16_t        ld_cpu;
        uint8_t         ld_flags;
@@ -103,12 +100,11 @@ typedef struct lockdebug {
 
 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
 
+lockdebuglk_t          ld_tree_lk;
 lockdebuglk_t          ld_sleeper_lk;
 lockdebuglk_t          ld_spinner_lk;
 lockdebuglk_t          ld_free_lk;
-lockdebuglk_t          ld_mem_lk[LD_MLOCKS];
 
-lockdebuglist_t                ld_mem_list[LD_MLISTS];
 lockdebuglist_t                ld_sleepers = 
TAILQ_HEAD_INITIALIZER(ld_sleepers);
 lockdebuglist_t                ld_spinners = 
TAILQ_HEAD_INITIALIZER(ld_spinners);
 lockdebuglist_t                ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
@@ -126,6 +122,38 @@ static void        lockdebug_abort1(lockdebug_t
 static void    lockdebug_more(void);
 static void    lockdebug_init(void);
 
+static signed int
+ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
+{
+       const lockdebug_t *ld1 = (const void *)n1;
+       const lockdebug_t *ld2 = (const void *)n2;
+       intptr_t diff = (intptr_t)ld1->ld_lock - (intptr_t)ld2->ld_lock;
+       if (diff < 0)
+               return -1;
+       else if (diff > 0)
+               return 1;
+       return 0;
+}
+
+static signed int
+ld_rb_compare_key(const struct rb_node *n, const void *key)
+{
+       const lockdebug_t *ld = (const void *)n;
+       intptr_t diff = (intptr_t)ld->ld_lock - (intptr_t)key;
+       if (diff < 0)
+               return -1;
+       else if (diff > 0)
+               return 1;
+       return 0;
+}
+
+static struct rb_tree ld_rb_tree;
+
+static const struct rb_tree_ops ld_rb_tree_ops = {
+       .rb_compare_nodes = ld_rb_compare_nodes,
+       .rb_compare_key = ld_rb_compare_key,
+};
+
 static inline void
 lockdebug_lock(lockdebuglk_t *lk)
 {
@@ -146,38 +174,21 @@ lockdebug_unlock(lockdebuglk_t *lk)
        splx(s);
 }
 
-static inline void
-lockdebug_mhash(volatile void *addr, lockdebuglk_t **lk, lockdebuglist_t 
**head)
-{
-       u_int hash;
-
-       hash = (uintptr_t)addr >> PGSHIFT;
-       *lk = &ld_mem_lk[hash & (LD_MLOCKS - 1)];
-       *head = &ld_mem_list[hash & (LD_MLISTS - 1)];
-       lockdebug_lock(*lk);
-}
-
 /*
  * lockdebug_lookup:
  *
- *     Find a lockdebug structure by ID and return it locked.
+ *     Find a lockdebug structure by a pointer to a lock and return it locked.
  */
 static inline lockdebug_t *
-lockdebug_lookup(u_int id, lockdebuglk_t **lk)
+lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
 {
-       lockdebug_t *base, *ld;
-
-       if (id == LD_NOID)
-               return NULL;
-
-       if (id == 0 || id >= LD_MAX_LOCKS)
-               panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id);
-
-       base = ld_table[id >> LD_BATCH_SHIFT];
-       ld = base + (id & LD_BATCH_MASK);
+       lockdebug_t *ld;
 
-       if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id)
-               panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id);
+       lockdebug_lock(&ld_tree_lk);
+       ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
+       lockdebug_unlock(&ld_tree_lk);
+       if (ld == NULL)
+               panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
 
        if ((ld->ld_flags & LD_SLEEPER) != 0)
                *lk = &ld_sleeper_lk;
@@ -200,24 +211,21 @@ lockdebug_init(void)
        lockdebug_t *ld;
        int i;
 
+       __cpu_simple_lock_init(&ld_tree_lk.lk_lock);
        __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
        __cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
        __cpu_simple_lock_init(&ld_free_lk.lk_lock);
 
+       rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
+
        ld = ld_prime;
        ld_table[0] = ld;
        for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
-               ld->ld_id = i;
                TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
                TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
        }
        ld_freeptr = 1;
        ld_nfree = LD_BATCH - 1;
-
-       for (i = 0; i < LD_MLOCKS; i++)
-               __cpu_simple_lock_init(&ld_mem_lk[i].lk_lock);
-       for (i = 0; i < LD_MLISTS; i++)
-               TAILQ_INIT(&ld_mem_list[i]);
 }
 
 /*
@@ -226,7 +234,7 @@ lockdebug_init(void)
  *     A lock is being initialized, so allocate an associated debug
  *     structure.
  */
-u_int
+bool
 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
 {
 #if 0
@@ -237,7 +245,7 @@ lockdebug_alloc(volatile void *lock, loc
        lockdebug_t *ld;
 
        if (lo == NULL || panicstr != NULL)
-               return LD_NOID;
+               return false;
        if (ld_freeptr == 0)
                lockdebug_init();
 
@@ -258,7 +266,7 @@ lockdebug_alloc(volatile void *lock, loc
                if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
                        ci->ci_lkdebug_recurse--;
                        lockdebug_unlock(&ld_free_lk);
-                       return LD_NOID;
+                       return false;
                }
                lockdebug_more();
        } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
@@ -266,7 +274,7 @@ lockdebug_alloc(volatile void *lock, loc
 
        if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
                lockdebug_unlock(&ld_free_lk);
-               return LD_NOID;
+               return false;
        }
 
        TAILQ_REMOVE(&ld_free, ld, ld_chain);
@@ -291,6 +299,10 @@ lockdebug_alloc(volatile void *lock, loc
        ld->ld_lwp = NULL;
        ld->ld_initaddr = initaddr;
 
+       lockdebug_lock(&ld_tree_lk);
+       rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
+       lockdebug_unlock(&ld_tree_lk);
+
        if (lo->lo_sleeplock) {
                ld->ld_flags = LD_SLEEPER;
                lockdebug_unlock(&ld_sleeper_lk);
@@ -299,14 +311,7 @@ lockdebug_alloc(volatile void *lock, loc
                lockdebug_unlock(&ld_spinner_lk);
        }
 
-#if 0
-       /* Insert into address hash. */
-       lockdebug_mhash(lock, &lk, &head);
-       TAILQ_INSERT_HEAD(head, ld, ld_mchain);
-       lockdebug_unlock(lk);
-#endif
-
-       return ld->ld_id;
+       return true;
 }
 
 /*
@@ -315,44 +320,33 @@ lockdebug_alloc(volatile void *lock, loc
  *     A lock is being destroyed, so release debugging resources.
  */
 void
-lockdebug_free(volatile void *lock, u_int id)
+lockdebug_free(volatile void *lock)
 {
-#if 0
-       lockdebuglist_t *head;
-#endif
        lockdebug_t *ld;
        lockdebuglk_t *lk;
 
        if (panicstr != NULL)
                return;
 
-       if ((ld = lockdebug_lookup(id, &lk)) == NULL)
-               return;
-
-       if (ld->ld_lock != lock) {
+       ld = lockdebug_lookup(lock, &lk);
+       if (ld == NULL) {
                panic("lockdebug_free: destroying uninitialized lock %p"
-                   "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock);
+                   "(ld_lock=%p)", lock, ld->ld_lock);
                lockdebug_abort1(ld, lk, __func__, "lock record follows",
                    true);
        }
        if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
                lockdebug_abort1(ld, lk, __func__, "is locked", true);
-
+       lockdebug_lock(&ld_tree_lk);
+       rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
+       lockdebug_unlock(&ld_tree_lk);
        ld->ld_lock = NULL;
-
        lockdebug_unlock(lk);
 
        lockdebug_lock(&ld_free_lk);
        TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
        ld_nfree++;
        lockdebug_unlock(&ld_free_lk);
-
-#if 0
-       /* Remove from address hash. */
-       lockdebug_mhash(lock, &lk, &head);
-       TAILQ_REMOVE(head, ld, ld_mchain);
-       lockdebug_unlock(lk);
-#endif
 }
 
 /*
@@ -394,7 +388,6 @@ lockdebug_more(void)
                        ld_nomore = true;
 
                for (i = base; i < m; i++, ld++) {
-                       ld->ld_id = i;
                        TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
                        TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
                }
@@ -410,7 +403,7 @@ lockdebug_more(void)
  *     Process the preamble to a lock acquire.
  */
 void
-lockdebug_wantlock(u_int id, uintptr_t where, int shared)
+lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
 {
        struct lwp *l = curlwp;
        lockdebuglk_t *lk;
@@ -423,7 +416,7 @@ lockdebug_wantlock(u_int id, uintptr_t w
        if (panicstr != NULL)
                return;
 
-       if ((ld = lockdebug_lookup(id, &lk)) == NULL)
+       if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
                return;
 
        if ((ld->ld_flags & LD_LOCKED) != 0) {
@@ -461,7 +454,7 @@ lockdebug_wantlock(u_int id, uintptr_t w
  *     Process a lock acquire operation.
  */
 void
-lockdebug_locked(u_int id, uintptr_t where, int shared)
+lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
 {
        struct lwp *l = curlwp;
        lockdebuglk_t *lk;
@@ -470,7 +463,7 @@ lockdebug_locked(u_int id, uintptr_t whe
        if (panicstr != NULL)
                return;
 
-       if ((ld = lockdebug_lookup(id, &lk)) == NULL)
+       if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
                return;
 
        if (shared) {
@@ -506,7 +499,7 @@ lockdebug_locked(u_int id, uintptr_t whe
  *     Process a lock release operation.
  */
 void
-lockdebug_unlocked(u_int id, uintptr_t where, int shared)
+lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
 {
        struct lwp *l = curlwp;
        lockdebuglk_t *lk;
@@ -515,7 +508,7 @@ lockdebug_unlocked(u_int id, uintptr_t w
        if (panicstr != NULL)
                return;
 
-       if ((ld = lockdebug_lookup(id, &lk)) == NULL)
+       if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
                return;
 
        if (shared) {
@@ -611,29 +604,33 @@ lockdebug_barrier(volatile void *spinloc
  * lockdebug_mem_check:
  *
  *     Check for in-use locks within a memory region that is
- *     being freed.  We only check for active locks within the
- *     first page of the allocation.
+ *     being freed.
  */
 void
 lockdebug_mem_check(const char *func, void *base, size_t sz)
 {
 #if 0
-       lockdebuglist_t *head;
-       lockdebuglk_t *lk;
        lockdebug_t *ld;
-       uintptr_t sa, ea, la;
+       lockdebuglk_t *lk;
+       uintptr_t lock;
 
-       sa = (uintptr_t)base;
-       ea = sa + sz;
+       lockdebug_lock(&ld_tree_lk);
+       ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
+       lockdebug_unlock(&ld_tree_lk);
+       if (ld == NULL)
+               return;
+       
+       if ((ld->ld_flags & LD_SLEEPER) != 0)
+               lk = &ld_sleeper_lk;
+       else
+               lk = &ld_spinner_lk;
 
-       lockdebug_mhash(base, &lk, &head);
-       TAILQ_FOREACH(ld, head, ld_mchain) {
-               la = (uintptr_t)ld->ld_lock;
-               if (la >= sa && la < ea) {
-                       lockdebug_abort1(ld, lk, func,
-                           "allocation contains active lock", !cold);
-                       return;
-               }
+       lockdebug_lock(lk);
+       lock = (uintptr_t)ld->ld_lock;
+       if ((uintptr_t)base <= lock && lock < (uintptr_t)base + sz) {
+               lockdebug_abort1(ld, lk, func,
+                   "allocation contains active lock", !cold);
+               return;
        }
        lockdebug_unlock(lk);
 #endif
@@ -726,14 +723,14 @@ lockdebug_lock_print(void *addr, void (*
  *     An error has been trapped - dump lock info and call panic().
  */
 void
-lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops,
-               const char *func, const char *msg)
+lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
+               const char *msg)
 {
 #ifdef LOCKDEBUG
        lockdebug_t *ld;
        lockdebuglk_t *lk;
 
-       if ((ld = lockdebug_lookup(id, &lk)) != NULL) {
+       if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
                lockdebug_abort1(ld, lk, func, msg, true);
                /* NOTREACHED */
        }
Index: sys/lockdebug.h
===================================================================
RCS file: /cvsroot/src/sys/sys/lockdebug.h,v
retrieving revision 1.5
diff -u -p -r1.5 lockdebug.h
--- sys/lockdebug.h     11 Oct 2007 19:45:26 -0000      1.5
+++ sys/lockdebug.h     20 Nov 2007 14:41:16 -0000
@@ -53,41 +53,45 @@ typedef     struct lockops {
        void            (*lo_dump)(volatile void *);
 } lockops_t;
 
-#define        LOCKDEBUG_ABORT(id, l, o, f, m) lockdebug_abort(id, l, o, f, m)
+#define        LOCKDEBUG_ABORT(l, o, f, m)     lockdebug_abort(l, o, f, m)
 
-void   lockdebug_abort(u_int, volatile void *, lockops_t *,
+void   lockdebug_abort(volatile void *, lockops_t *,
                        const char *, const char *);
 
 void   lockdebug_lock_print(void *, void (*)(const char *, ...));
 
 #ifdef LOCKDEBUG
 
-u_int  lockdebug_alloc(volatile void *, lockops_t *, uintptr_t);
-void   lockdebug_free(volatile void *, u_int);
-void   lockdebug_wantlock(u_int, uintptr_t, int);
-void   lockdebug_locked(u_int, uintptr_t, int);
-void   lockdebug_unlocked(u_int, uintptr_t, int);
+bool   lockdebug_alloc(volatile void *, lockops_t *, uintptr_t);
+void   lockdebug_free(volatile void *);
+void   lockdebug_wantlock(volatile void *, uintptr_t, int);
+void   lockdebug_locked(volatile void *, uintptr_t, int);
+void   lockdebug_unlocked(volatile void *, uintptr_t, int);
 void   lockdebug_barrier(volatile void *, int);
 void   lockdebug_mem_check(const char *, void *, size_t);
 
 #define        LOCKDEBUG_ALLOC(lock, ops, addr)        lockdebug_alloc(lock, 
ops, addr)
-#define        LOCKDEBUG_FREE(lock, id)                lockdebug_free(lock, id)
-#define        LOCKDEBUG_WANTLOCK(id, where, s)        lockdebug_wantlock(id, 
where, s)
-#define        LOCKDEBUG_LOCKED(id, where, s)          lockdebug_locked(id, 
where, s)
-#define        LOCKDEBUG_UNLOCKED(id, where, s)        lockdebug_unlocked(id, 
where, s)
-#define        LOCKDEBUG_BARRIER(lk, slp)              lockdebug_barrier(lk, 
slp)
+#define        LOCKDEBUG_FREE(dodebug, lock) \
+    if (dodebug) lockdebug_free(lock)
+#define        LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) \
+    if (dodebug) lockdebug_wantlock(lock, where, s)
+#define        LOCKDEBUG_LOCKED(dodebug, lock, where, s) \
+    if (dodebug) lockdebug_locked(lock, where, s)
+#define        LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) \
+    if (dodebug) lockdebug_unlocked(lock, where, s)
+#define        LOCKDEBUG_BARRIER(lock, slp)            lockdebug_barrier(lock, 
slp)
 #define        LOCKDEBUG_MEM_CHECK(base, sz)   \
     lockdebug_mem_check(__FUNCTION__, base, sz)
 
 #else  /* LOCKDEBUG */
 
-#define        LOCKDEBUG_ALLOC(lock, ops, addr)        0
-#define        LOCKDEBUG_FREE(lock, id)                /* nothing */
-#define        LOCKDEBUG_WANTLOCK(id, where, s)        /* nothing */
-#define        LOCKDEBUG_LOCKED(id, where, s)          /* nothing */
-#define        LOCKDEBUG_UNLOCKED(id, where, s)        /* nothing */
-#define        LOCKDEBUG_BARRIER(lk, slp)              /* nothing */
-#define        LOCKDEBUG_MEM_CHECK(base, sz)           /* nothing */
+#define        LOCKDEBUG_ALLOC(lock, ops, addr)                false
+#define        LOCKDEBUG_FREE(dodebug, lock)                   /* nothing */
+#define        LOCKDEBUG_WANTLOCK(dodebug, lock, where, s)     /* nothing */
+#define        LOCKDEBUG_LOCKED(dodebug, lock, where, s)       /* nothing */
+#define        LOCKDEBUG_UNLOCKED(dodebug, lock, where, s)     /* nothing */
+#define        LOCKDEBUG_BARRIER(lock, slp)                    /* nothing */
+#define        LOCKDEBUG_MEM_CHECK(base, sz)                   /* nothing */
 
 #endif /* LOCKDEBUG */
 
Index: sys/mutex.h
===================================================================
RCS file: /cvsroot/src/sys/sys/mutex.h,v
retrieving revision 1.11
diff -u -p -r1.11 mutex.h
--- sys/mutex.h 19 Oct 2007 12:16:48 -0000      1.11
+++ sys/mutex.h 20 Nov 2007 14:41:16 -0000
@@ -83,10 +83,10 @@
  *
  * Otherwise, the following must be defined:
  *
- *     MUTEX_INITIALIZE_SPIN(mtx, id, minipl)
+ *     MUTEX_INITIALIZE_SPIN(mtx, dodebug, minipl)
  *             Initialize a spin mutex.
  *
- *     MUTEX_INITIALIZE_ADAPTIVE(mtx, id)
+ *     MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)
  *             Initialize an adaptive mutex.
  *
  *     MUTEX_DESTROY(mtx)
@@ -123,9 +123,9 @@
  *             Release the lock and clear the "has waiters" indication.
  *             Must be interrupt atomic, need not be MP safe.
  *
- *     MUTEX_GETID(rw)
- *             Get the debugging ID for the mutex, an integer.  Only
- *             used in the LOCKDEBUG case.
+ *     MUTEX_DEBUG_P(mtx)
+ *             Evaluates to true if the mutex is initialized with
+ *             dodebug==true.  Only used in the LOCKDEBUG case.
  *
  * Machine dependent code may optionally provide stubs for the following
  * functions to implement the easy (unlocked / no waiters) cases.  If
@@ -172,6 +172,7 @@ typedef struct kmutex kmutex_t;
 
 #define        MUTEX_BIT_SPIN                  0x01
 #define        MUTEX_BIT_WAITERS               0x02
+#define        MUTEX_BIT_DEBUG                 0x04
 
 #define        MUTEX_SPIN_IPL(mtx)             ((mtx)->mtx_ipl)
 #define        MUTEX_SPIN_OLDSPL(ci)           ((ci)->ci_mtx_oldspl)
Index: sys/rwlock.h
===================================================================
RCS file: /cvsroot/src/sys/sys/rwlock.h,v
retrieving revision 1.2
diff -u -p -r1.2 rwlock.h
--- sys/rwlock.h        9 Feb 2007 21:55:37 -0000       1.2
+++ sys/rwlock.h        20 Nov 2007 14:41:16 -0000
@@ -99,13 +99,13 @@
  *             condition becomes false, abort the operation.  Must
  *             be MP/interrupt atomic.
  *
- *     RW_SETID(rw, id)
- *             Set the debugging ID for the lock, an integer.  Only
- *             used in the LOCKDEBUG case.
+ *     RW_SETDEBUG(rw, dodebug)
+ *             Set the debugging boolean flag.  The flag is only used in
+ *             the LOCKDEBUG case.  Used by rw_init to initialize a lock.
  *
- *     RW_GETID(rw)
- *             Get the debugging ID for the lock, an integer.  Only
- *             used in the LOCKDEBUG case.
+ *     RW_DEBUG_P(rw)
+ *             Evaluate to true if the lock is initialized by RW_SETDEBUG
+ *             with dodebug==true.  Only used in the LOCKDEBUG case.
  *
  * Architectures may optionally provide stubs for the following functions to
  * implement the easy (unlocked, no waiters) cases.  If these stubs are
@@ -140,7 +140,7 @@ typedef struct krwlock krwlock_t;
 #define        RW_HAS_WAITERS          0x01UL  /* lock has waiters */
 #define        RW_WRITE_WANTED         0x02UL  /* >= 1 waiter is a writer */
 #define        RW_WRITE_LOCKED         0x04UL  /* lock is currently write 
locked */
-#define        RW_UNUSED               0x08UL  /* currently unused */
+#define        RW_DEBUG                0x08UL  /* LOCKDEBUG enabled */
 
 #define        RW_READ_COUNT_SHIFT     4
 #define        RW_READ_INCR            (1UL << RW_READ_COUNT_SHIFT)


Home | Main Index | Thread Index | Old Index