Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Improve the LOCKDEBUG code:



details:   https://anonhg.NetBSD.org/src/rev/ad2f6d921551
branches:  trunk
changeset: 474954:ad2f6d921551
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Tue Jul 27 21:29:15 1999 +0000

description:
Improve the LOCKDEBUG code:
- Now compatible with MULTIPROCESSOR (requires other changes not yet
  committed, but which will be later today).
- In addition to tracking simple locks, track exclusive spin locks.
- Count spin locks like we do sleep locks (in the cpu_info for this
  CPU).
- Lock debug lists are now TAILQs, so as to make the locking order
  more obvious when dumping the list.

Also, some suggestions from Bill Sommerfeld:
- SIMPLELOCK_LOCKED and SIMPLELOCK_UNLOCKED constants, which may be
  defined in <machine/lock.h> (default to 1 and 0, respectively).  This
  makes it easier to support architectures which use test-and-clear
  rather than test-and-set.
- Add __attribute__((__aligned__)) to the `lock_data' member of the
  simplelock structure.  This makes it easier to support architectures
  which can only perform atomic operations on very-well-aligned memory
  locations.  NOTE: This changes the size of struct simplelock, and
  will cause a version bump.

diffstat:

 sys/kern/kern_lock.c |  339 +++++++++++++++++++++++++++++++++++++-------------
 sys/sys/lock.h       |   84 +++++++----
 2 files changed, 302 insertions(+), 121 deletions(-)

diffs (truncated from 706 to 300 lines):

diff -r aba56a0a7ad7 -r ad2f6d921551 sys/kern/kern_lock.c
--- a/sys/kern/kern_lock.c      Tue Jul 27 17:55:00 1999 +0000
+++ b/sys/kern/kern_lock.c      Tue Jul 27 21:29:15 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lock.c,v 1.20 1999/07/26 23:02:53 thorpej Exp $   */
+/*     $NetBSD: kern_lock.c,v 1.21 1999/07/27 21:29:16 thorpej Exp $   */
 
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -79,6 +79,7 @@
  *     @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
  */
 
+#include "opt_multiprocessor.h"
 #include "opt_lockdebug.h"
 #include "opt_ddb.h"
 
@@ -93,11 +94,25 @@
  * Locks provide shared/exclusive sychronization.
  */
 
-#if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
-#define COUNT(p, x) if (p) (p)->p_locks += (x)
+#if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
+#if defined(MULTIPROCESSOR) /* { */
+#define        COUNT_CPU(cpu_id, x)                                            \
+       /* atomic_add_ulong(&curcpu().ci_spin_locks, (x)) */
+#else
+u_long spin_locks;
+#define        COUNT_CPU(cpu_id, x)    spin_locks += (x)
+#endif /* MULTIPROCESSOR */ /* } */
+
+#define        COUNT(lkp, p, cpu_id, x)                                        \
+do {                                                                   \
+       if ((lkp)->lk_flags & LK_SPIN)                                  \
+               COUNT_CPU((cpu_id), (x));                               \
+       else                                                            \
+               (p)->p_locks += (x);                                    \
+} while (0)
 #else
 #define COUNT(p, x)
-#endif
+#endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
 
 /*
  * Acquire a resource.
@@ -150,6 +165,53 @@
        (((lkp)->lk_flags & LK_SPIN) != 0 ?                             \
         ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
 
+#if defined(LOCKDEBUG) /* { */
+#if defined(MULTIPROCESSOR) /* { */
+struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
+
+#define        SPINLOCK_LIST_LOCK()    cpu_simple_lock(&spinlock_list_slock)
+
+#define        SPINLOCK_LIST_UNLOCK()  cpu_simple_unlock(&spinlock_list_slock)
+#else
+#define        SPINLOCK_LIST_LOCK()    /* nothing */
+
+#define        SPINLOCK_LIST_UNLOCK()  /* nothing */
+#endif /* MULTIPROCESSOR */ /* } */
+
+TAILQ_HEAD(, lock) spinlock_list =
+    TAILQ_HEAD_INITIALIZER(spinlock_list);
+
+#define        HAVEIT(lkp)                                                     \
+do {                                                                   \
+       if ((lkp)->lk_flags & LK_SPIN) {                                \
+               int s = splhigh();                                      \
+               SPINLOCK_LIST_LOCK();                                   \
+               /* XXX Cast away volatile. */                           \
+               TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
+                   lk_list);                                           \
+               SPINLOCK_LIST_UNLOCK();                                 \
+               splx(s);                                                \
+       }                                                               \
+} while (0)
+
+#define        DONTHAVEIT(lkp)                                                 \
+do {                                                                   \
+       if ((lkp)->lk_flags & LK_SPIN) {                                \
+               int s = splhigh();                                      \
+               SPINLOCK_LIST_LOCK();                                   \
+               /* XXX Cast away volatile. */                           \
+               TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),      \
+                   lk_list);                                           \
+               SPINLOCK_LIST_UNLOCK();                                 \
+               splx(s);                                                \
+       }                                                               \
+} while (0)
+#else
+#define        HAVEIT(lkp)             /* nothing */
+
+#define        DONTHAVEIT(lkp)         /* nothing */
+#endif /* LOCKDEBUG */ /* } */
+
 /*
  * Initialize a lock; required before use.
  */
@@ -219,27 +281,27 @@
                simple_unlock(interlkp);
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
 
-#ifdef DIAGNOSTIC
+#ifdef DIAGNOSTIC /* { */
        /*
         * Don't allow spins on sleep locks and don't allow sleeps
         * on spin locks.
         */
        if ((flags ^ lkp->lk_flags) & LK_SPIN)
                panic("lockmgr: sleep/spin mismatch\n");
-#endif
+#endif /* } */
 
        if (extflags & LK_SPIN)
                pid = LK_KERNPROC;
        else {
-#ifdef DIAGNOSTIC
+#ifdef DIAGNOSTIC /* { */
                if (p == NULL)
                        panic("lockmgr: no context");
-#endif
+#endif /* } */
                pid = p->p_pid;
        }
        cpu_id = 0;                     /* XXX cpu_number() XXX */
 
-#ifdef DIAGNOSTIC
+#ifdef DIAGNOSTIC /* { */
        /*
         * Once a lock has drained, the LK_DRAINING flag is set and an
         * exclusive lock is returned. The only valid operation thereafter
@@ -262,7 +324,7 @@
                if ((flags & LK_REENABLE) == 0)
                        lkp->lk_flags |= LK_DRAINED;
        }
-#endif /* DIAGNOSTIC */
+#endif /* DIAGNOSTIC */ /* } */
 
        switch (flags & LK_TYPE_MASK) {
 
@@ -284,7 +346,7 @@
                        if (error)
                                break;
                        lkp->lk_sharecount++;
-                       COUNT(p, 1);
+                       COUNT(lkp, p, cpu_id, 1);
                        break;
                }
                /*
@@ -292,7 +354,7 @@
                 * An alternative would be to fail with EDEADLK.
                 */
                lkp->lk_sharecount++;
-               COUNT(p, 1);
+               COUNT(lkp, p, cpu_id, 1);
                /* fall into downgrade */
 
        case LK_DOWNGRADE:
@@ -304,6 +366,7 @@
                lkp->lk_recurselevel = 0;
                lkp->lk_flags &= ~LK_HAVE_EXCL;
                SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+               DONTHAVEIT(lkp);
                if (lkp->lk_waitcount)
                        wakeup_one((void *)lkp);
                break;
@@ -316,7 +379,7 @@
                 */
                if (lkp->lk_flags & LK_WANT_UPGRADE) {
                        lkp->lk_sharecount--;
-                       COUNT(p, -1);
+                       COUNT(lkp, p, cpu_id, -1);
                        error = EBUSY;
                        break;
                }
@@ -334,7 +397,7 @@
                if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
                        panic("lockmgr: upgrade exclusive lock");
                lkp->lk_sharecount--;
-               COUNT(p, -1);
+               COUNT(lkp, p, cpu_id, -1);
                /*
                 * If we are just polling, check to see if we will block.
                 */
@@ -357,12 +420,13 @@
                                break;
                        lkp->lk_flags |= LK_HAVE_EXCL;
                        SETHOLDER(lkp, pid, cpu_id);
+                       HAVEIT(lkp);
                        if (lkp->lk_exclusivecount != 0)
                                panic("lockmgr: non-zero exclusive count");
                        lkp->lk_exclusivecount = 1;
                        if (extflags & LK_SETRECURSE)
                                lkp->lk_recurselevel = 1;
-                       COUNT(p, 1);
+                       COUNT(lkp, p, cpu_id, 1);
                        break;
                }
                /*
@@ -391,7 +455,7 @@
                        if (extflags & LK_SETRECURSE &&
                            lkp->lk_recurselevel == 0)
                                lkp->lk_recurselevel = lkp->lk_exclusivecount;
-                       COUNT(p, 1);
+                       COUNT(lkp, p, cpu_id, 1);
                        break;
                }
                /*
@@ -421,12 +485,13 @@
                        break;
                lkp->lk_flags |= LK_HAVE_EXCL;
                SETHOLDER(lkp, pid, cpu_id);
+               HAVEIT(lkp);
                if (lkp->lk_exclusivecount != 0)
                        panic("lockmgr: non-zero exclusive count");
                lkp->lk_exclusivecount = 1;
                if (extflags & LK_SETRECURSE)
                        lkp->lk_recurselevel = 1;
-               COUNT(p, 1);
+               COUNT(lkp, p, cpu_id, 1);
                break;
 
        case LK_RELEASE:
@@ -446,14 +511,15 @@
                        if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
                                lkp->lk_recurselevel = 0;
                        lkp->lk_exclusivecount--;
-                       COUNT(p, -1);
+                       COUNT(lkp, p, cpu_id, -1);
                        if (lkp->lk_exclusivecount == 0) {
                                lkp->lk_flags &= ~LK_HAVE_EXCL;
                                SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+                               DONTHAVEIT(lkp);
                        }
                } else if (lkp->lk_sharecount != 0) {
                        lkp->lk_sharecount--;
-                       COUNT(p, -1);
+                       COUNT(lkp, p, cpu_id, -1);
                }
                if (lkp->lk_waitcount)
                        wakeup_one((void *)lkp);
@@ -505,11 +571,12 @@
                }
                lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
                SETHOLDER(lkp, pid, cpu_id);
+               HAVEIT(lkp);
                lkp->lk_exclusivecount = 1;
                /* XXX unlikely that we'd want this */
                if (extflags & LK_SETRECURSE)
                        lkp->lk_recurselevel = 1;
-               COUNT(p, 1);
+               COUNT(lkp, p, cpu_id, 1);
                break;
 
        default:
@@ -553,9 +620,50 @@
                printf(" with %d pending", lkp->lk_waitcount);
 }
 
-#if defined(LOCKDEBUG) && !defined(MULTIPROCESSOR)
-LIST_HEAD(slocklist, simplelock) slockdebuglist;
+#if defined(LOCKDEBUG) /* { */
+TAILQ_HEAD(, simplelock) simplelock_list =
+    TAILQ_HEAD_INITIALIZER(simplelock_list);
+
+#if defined(MULTIPROCESSOR) /* { */
+struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
+
+#define        SLOCK_LIST_LOCK()                                               \
+       cpu_simple_lock(&simplelock_list_slock)
+
+#define        SLOCK_LIST_UNLOCK()                                             \
+       cpu_simple_unlock(&simplelock_list_slock)
+
+#define        SLOCK_COUNT(x)                                                  \
+       /* atomic_add_ulong(&curcpu()->ci_simple_locks, (x)) */
+#else
+u_long simple_locks;
+
+#define        SLOCK_LIST_LOCK()       /* nothing */
+
+#define        SLOCK_LIST_UNLOCK()     /* nothing */
+
+#define        SLOCK_COUNT(x)          simple_locks += (x)
+#endif /* MULTIPROCESSOR */ /* } */
+
+#ifdef DDB /* { */
 int simple_lock_debugger = 0;
+#define        SLOCK_DEBUGGER()        if (simple_lock_debugger) Debugger()
+#else
+#define        SLOCK_DEBUGGER()        /* nothing */
+#endif /* } */
+
+#define        SLOCK_WHERE(str, alp, id, l)                                    \
+do {                                                                   \
+       printf(str);                                                    \
+       printf("currently at: %s:%d\n", (id), (l));                     \
+       if ((alp)->lock_file != NULL)                                   \



Home | Main Index | Thread Index | Old Index