Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern - Don't give up kpriority boost in preempt(). That...



details:   https://anonhg.NetBSD.org/src/rev/6d9928cccbf6
branches:  trunk
changeset: 846539:6d9928cccbf6
user:      ad <ad%NetBSD.org@localhost>
date:      Thu Nov 21 20:51:05 2019 +0000

description:
- Don't give up kpriority boost in preempt().  That's unfair and bad for
  interactive response.  It should only be dropped on final return to user.
- Clear l_dopreempt with atomics and add some comments around concurrency.
- Hold proc_lock over the lightning bolt and loadavg calc, no reason not to.
- cpu_did_preempt() is useless - don't call it.  Will remove soon.

diffstat:

 sys/kern/kern_synch.c |  38 +++++++++++++++++++-------------------
 1 files changed, 19 insertions(+), 19 deletions(-)

diffs (120 lines):

diff -r 330d623b4496 -r 6d9928cccbf6 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Thu Nov 21 19:57:23 2019 +0000
+++ b/sys/kern/kern_synch.c     Thu Nov 21 20:51:05 2019 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: kern_synch.c,v 1.324 2019/10/03 22:48:44 kamil Exp $   */
+/*     $NetBSD: kern_synch.c,v 1.325 2019/11/21 20:51:05 ad Exp $      */
 
 /*-
- * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
+ * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
  *    The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.324 2019/10/03 22:48:44 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.325 2019/11/21 20:51:05 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -272,6 +272,7 @@
        lwp_lock(l);
        KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
        KASSERT(l->l_stat == LSONPROC);
+       /* Voluntary - ditch kpriority boost. */
        l->l_kpriority = false;
        (void)mi_switch(l);
        KERNEL_LOCK(l->l_biglocks, l);
@@ -290,7 +291,7 @@
        lwp_lock(l);
        KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
        KASSERT(l->l_stat == LSONPROC);
-       l->l_kpriority = false;
+       /* Involuntary - keep kpriority boost. */
        l->l_pflag |= LP_PREEMPTING;
        (void)mi_switch(l);
        KERNEL_LOCK(l->l_biglocks, l);
@@ -324,12 +325,12 @@
                         * been blocked", since we're going to
                         * context switch.
                         */
-                       l->l_dopreempt = 0;
+                       atomic_swap_uint(&l->l_dopreempt, 0);
                        return true;
                }
                if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
                        /* Can't preempt idle loop, don't count as failure. */
-                       l->l_dopreempt = 0;
+                       atomic_swap_uint(&l->l_dopreempt, 0);
                        return true;
                }
                if (__predict_false(l->l_nopreempt != 0)) {
@@ -342,7 +343,7 @@
                }
                if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
                        /* Can't preempt soft interrupts yet. */
-                       l->l_dopreempt = 0;
+                       atomic_swap_uint(&l->l_dopreempt, 0);
                        failed = (uintptr_t)&is_softint;
                        break;
                }
@@ -484,8 +485,11 @@
        }
 
        /*
-        * Only clear want_resched if there are no pending (slow)
-        * software interrupts.
+        * Only clear want_resched if there are no pending (slow) software
+        * interrupts.  We can do this without an atomic, because no new
+        * LWPs can appear in the queue due to our hold on spc_mutex, and
+        * the update to ci_want_resched will become globally visible before
+        * the release of spc_mutex becomes globally visible.
         */
        ci->ci_want_resched = ci->ci_data.cpu_softints;
        spc->spc_flags &= ~SPCF_SWITCHCLEAR;
@@ -606,10 +610,11 @@
        }
 
        /*
-        * Preemption related tasks.  Must be done with the current
-        * CPU locked.
+        * Preemption related tasks.  Must be done holding spc_mutex.  Clear
+        * l_dopreempt without an atomic - it's only ever set non-zero by
+        * sched_resched_cpu() which also holds spc_mutex, and only ever
+        * cleared by the LWP itself (us) with atomics when not under lock.
         */
-       cpu_did_resched(l);
        l->l_dopreempt = 0;
        if (__predict_false(l->l_pfailaddr != 0)) {
                LOCKSTAT_FLAG(lsflag);
@@ -830,12 +835,6 @@
         */
        ci->ci_data.cpu_onproc = newl;
 
-       /*
-        * Preemption related tasks.  Must be done with the current
-        * CPU locked.
-        */
-       cpu_did_resched(l);
-
        /* Unlock the run queue. */
        spc_unlock(ci);
 
@@ -1215,7 +1214,6 @@
                        psignal(p, sig);
                }
        }
-       mutex_exit(proc_lock);
 
        /* Load average calculation. */
        if (__predict_false(lavg_count == 0)) {
@@ -1229,4 +1227,6 @@
 
        /* Lightning bolt. */
        cv_broadcast(&lbolt);
+
+       mutex_exit(proc_lock);
 }



Home | Main Index | Thread Index | Old Index