Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern nextlwp(): fix a couple of locking bugs including o...



details:   https://anonhg.NetBSD.org/src/rev/cb13e3549089
branches:  trunk
changeset: 744911:cb13e3549089
user:      ad <ad%NetBSD.org@localhost>
date:      Sun Feb 16 21:31:19 2020 +0000

description:
nextlwp(): fix a couple of locking bugs including one I introduced yesterday,
and add comments around same.

diffstat:

 sys/kern/kern_synch.c |  28 +++++++++++++++++++---------
 1 files changed, 19 insertions(+), 9 deletions(-)

diffs (70 lines):

diff -r 2d0db1860afc -r cb13e3549089 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Sun Feb 16 20:32:29 2020 +0000
+++ b/sys/kern/kern_synch.c     Sun Feb 16 21:31:19 2020 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: kern_synch.c,v 1.339 2020/02/15 18:12:15 ad Exp $      */
+/*     $NetBSD: kern_synch.c,v 1.340 2020/02/16 21:31:19 ad Exp $      */
 
 /*-
- * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
+ * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020
  *    The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.339 2020/02/15 18:12:15 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.340 2020/02/16 21:31:19 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -476,23 +476,34 @@
         * Let sched_nextlwp() select the LWP to run the CPU next.
         * If no LWP is runnable, select the idle LWP.
         * 
-        * Note that spc_lwplock might not necessary be held, and
-        * new thread would be unlocked after setting the LWP-lock.
+        * On arrival here LWPs on a run queue are locked by spc_mutex which
+        * is currently held.  Idle LWPs are always locked by spc_lwplock,
+        * which may or may not be held here.  On exit from this code block,
+        * in all cases newl is locked by spc_lwplock.
         */
        newl = sched_nextlwp();
        if (newl != NULL) {
                sched_dequeue(newl);
                KASSERT(lwp_locked(newl, spc->spc_mutex));
                KASSERT(newl->l_cpu == ci);
-               lwp_setlock(newl, spc->spc_lwplock);
+               newl->l_stat = LSONPROC;
+               newl->l_pflag |= LP_RUNNING;
+               spc->spc_curpriority = lwp_eprio(newl);
                spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
+               lwp_setlock(newl, spc->spc_lwplock);
        } else {
+               /*
+                * Updates to newl here are unlocked, but newl is the idle
+                * LWP and thus sheltered from outside interference, so no
+                * harm is going to come of it.
+                */
                newl = ci->ci_data.cpu_idlelwp;
+               newl->l_stat = LSONPROC;
+               newl->l_pflag |= LP_RUNNING;
+               spc->spc_curpriority = PRI_IDLE;
                spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
                    SPCF_IDLE;
        }
-       newl->l_stat = LSONPROC;
-       newl->l_pflag |= LP_RUNNING;
 
        /*
         * Only clear want_resched if there are no pending (slow) software
@@ -502,7 +513,6 @@
         * the release of spc_mutex becomes globally visible.
         */
        ci->ci_want_resched = ci->ci_data.cpu_softints;
-       spc->spc_curpriority = lwp_eprio(newl);
 
        return newl;
 }



Home | Main Index | Thread Index | Old Index