Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Move kernel_lock release/switch/reacquire from ltsl...



details:   https://anonhg.NetBSD.org/src/rev/aa47257eae86
branches:  trunk
changeset: 496464:aa47257eae86
user:      sommerfeld <sommerfeld%NetBSD.org@localhost>
date:      Thu Aug 24 02:37:27 2000 +0000

description:
Move kernel_lock release/switch/reacquire from ltsleep() to
mi_switch(), so we don't botch the locking around preempt() or
yield().

diffstat:

 sys/kern/kern_synch.c |  69 ++++++++++++++++++++------------------------------
 1 files changed, 28 insertions(+), 41 deletions(-)

diffs (133 lines):

diff -r 9b028b33dd1d -r aa47257eae86 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Thu Aug 24 02:03:54 2000 +0000
+++ b/sys/kern/kern_synch.c     Thu Aug 24 02:37:27 2000 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_synch.c,v 1.84 2000/08/22 17:28:29 thorpej Exp $  */
+/*     $NetBSD: kern_synch.c,v 1.85 2000/08/24 02:37:27 sommerfeld Exp $       */
 
 /*-
  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@@ -402,9 +402,6 @@
        int sig, s;
        int catch = priority & PCATCH;
        int relock = (priority & PNORELOCK) == 0;
-#if defined(MULTIPROCESSOR)
-       int dobiglock, held_count;
-#endif
 
        /*
         * XXXSMP
@@ -429,9 +426,6 @@
                return (0);
        }
 
-#if defined(MULTIPROCESSOR)
-       dobiglock = (p->p_flag & P_BIGLOCK) != 0;
-#endif
 
 #ifdef KTRACE
        if (KTRPOINT(p, KTR_CSW))
@@ -491,22 +485,11 @@
                        if (p->p_wchan != NULL)
                                unsleep(p);
                        p->p_stat = SONPROC;
-#if defined(MULTIPROCESSOR)
-                       /*
-                        * We're going to skip the unlock, so
-                        * we don't need to relock after resume.
-                        */
-                       dobiglock = 0;
-#endif
                        SCHED_UNLOCK(s);
                        goto resume;
                }
                if (p->p_wchan == NULL) {
                        catch = 0;
-#if defined(MULTIPROCESSOR)
-                       /* See above. */
-                       dobiglock = 0;
-#endif
                        SCHED_UNLOCK(s);
                        goto resume;
                }
@@ -515,18 +498,6 @@
        p->p_stat = SSLEEP;
        p->p_stats->p_ru.ru_nvcsw++;
 
-#if defined(MULTIPROCESSOR)
-       if (dobiglock) {
-               /*
-                * Release the kernel_lock, as we are about to
-                * yield the CPU.  The scheduler_slock is still
-                * held until cpu_switch() selects a new process
-                * and removes it from the run queue.
-                */
-               held_count = kernel_lock_release_all();
-       }
-#endif
-
        SCHED_ASSERT_LOCKED();
        mi_switch(p);
 
@@ -543,17 +514,6 @@
        KDASSERT(p->p_cpu == curcpu());
        p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
 
-#if defined(MULTIPROCESSOR)
-       if (dobiglock) {
-               /*
-                * Reacquire the kernel_lock now.  We do this after
-                * we've released sched_lock to avoid deadlock,
-                * and before we reacquire the interlock.
-                */
-               kernel_lock_acquire_count(LK_EXCLUSIVE|LK_CANRECURSE,
-                   held_count);
-       }
-#endif
        p->p_flag &= ~P_SINTR;
        if (p->p_flag & P_TIMEOUT) {
                p->p_flag &= ~P_TIMEOUT;
@@ -845,9 +805,24 @@
        struct rlimit *rlim;
        long s, u;
        struct timeval tv;
+#if defined(MULTIPROCESSOR)
+       int hold_count;
+#endif
 
        SCHED_ASSERT_LOCKED();
 
+#if defined(MULTIPROCESSOR)
+       if (p->p_flag & P_BIGLOCK) {
+               /*
+                * Release the kernel_lock, as we are about to
+                * yield the CPU.  The scheduler_slock is still
+                * held until cpu_switch() selects a new process
+                * and removes it from the run queue.
+                */
+               hold_count = kernel_lock_release_all();
+       }
+#endif
+
        KDASSERT(p->p_cpu != NULL);
        KDASSERT(p->p_cpu == curcpu());
 
@@ -925,6 +900,18 @@
        KDASSERT(p->p_cpu != NULL);
        KDASSERT(p->p_cpu == curcpu());
        microtime(&p->p_cpu->ci_schedstate.spc_runtime);
+
+#if defined(MULTIPROCESSOR)
+       if (p->p_flag & P_BIGLOCK) {
+               /*
+                * Reacquire the kernel_lock now.  We do this after
+                * we've released sched_lock to avoid deadlock,
+                * and before we reacquire the interlock.
+                */
+               kernel_lock_acquire_count(LK_EXCLUSIVE|LK_CANRECURSE,
+                   hold_count);
+       }
+#endif
 }
 
 /*



Home | Main Index | Thread Index | Old Index