Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Make it possible to call mi_switch() and immediately swi...



details:   https://anonhg.NetBSD.org/src/rev/20a676d394b4
branches:  trunk
changeset: 967371:20a676d394b4
user:      ad <ad%NetBSD.org@localhost>
date:      Fri Dec 06 21:36:10 2019 +0000

description:
Make it possible to call mi_switch() and immediately switch to another CPU.
This seems to take about 3us on my Intel system.  Two changes required:

- Have the caller to mi_switch() be responsible for calling spc_lock().
- Avoid using l->l_cpu in mi_switch().

While here:

- Add a couple of calls to membar_enter()
- Have the idle LWP set itself to LSIDL, to match softint_thread().
- Remove unused return value from mi_switch().

diffstat:

 sys/kern/kern_exec.c    |   5 +-
 sys/kern/kern_exit.c    |   5 +-
 sys/kern/kern_idle.c    |   6 ++-
 sys/kern/kern_lwp.c     |   5 +-
 sys/kern/kern_sig.c     |   5 +-
 sys/kern/kern_sleepq.c  |   5 +-
 sys/kern/kern_softint.c |   5 +-
 sys/kern/kern_synch.c   |  78 +++++++++++++++++++++++++-----------------------
 sys/sys/sched.h         |   4 +-
 9 files changed, 64 insertions(+), 54 deletions(-)

diffs (truncated from 394 to 300 lines):

diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_exec.c
--- a/sys/kern/kern_exec.c      Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_exec.c      Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_exec.c,v 1.484 2019/11/23 19:42:52 ad Exp $       */
+/*     $NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $       */
 
 /*-
  * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
@@ -62,7 +62,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.484 2019/11/23 19:42:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $");
 
 #include "opt_exec.h"
 #include "opt_execfmt.h"
@@ -1363,6 +1363,7 @@
                mutex_exit(p->p_lock);
                mutex_exit(proc_lock);
                lwp_lock(l);
+               spc_lock(l->l_cpu);
                mi_switch(l);
                ksiginfo_queue_drain(&kq);
                KERNEL_LOCK(l->l_biglocks, l);
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_exit.c
--- a/sys/kern/kern_exit.c      Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_exit.c      Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_exit.c,v 1.277 2019/10/03 22:48:44 kamil Exp $    */
+/*     $NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $       */
 
 /*-
  * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.277 2019/10/03 22:48:44 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_dtrace.h"
@@ -245,6 +245,7 @@
                lwp_unlock(l);
                mutex_exit(p->p_lock);
                lwp_lock(l);
+               spc_lock(l->l_cpu);
                mi_switch(l);
                KERNEL_LOCK(l->l_biglocks, l);
                mutex_enter(p->p_lock);
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_idle.c
--- a/sys/kern/kern_idle.c      Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_idle.c      Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_idle.c,v 1.27 2019/12/01 15:34:46 ad Exp $        */
+/*     $NetBSD: kern_idle.c,v 1.28 2019/12/06 21:36:10 ad Exp $        */
 
 /*-
  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@@ -28,7 +28,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.27 2019/12/01 15:34:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.28 2019/12/06 21:36:10 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -93,6 +93,8 @@
                }
                KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
                lwp_lock(l);
+               l->l_stat = LSIDL;
+               spc_lock(l->l_cpu);
                mi_switch(l);
                KASSERT(curlwp == l);
                KASSERT(l->l_stat == LSONPROC);
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_lwp.c       Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $ */
+/*     $NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $        */
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -209,7 +209,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -1602,6 +1602,7 @@
                        lwp_unlock(l);
                        mutex_exit(p->p_lock);
                        lwp_lock(l);
+                       spc_lock(l->l_cpu);
                        mi_switch(l);
                }
 
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_sig.c
--- a/sys/kern/kern_sig.c       Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_sig.c       Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_sig.c,v 1.380 2019/11/21 18:17:36 ad Exp $        */
+/*     $NetBSD: kern_sig.c,v 1.381 2019/12/06 21:36:10 ad Exp $        */
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
@@ -70,7 +70,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.380 2019/11/21 18:17:36 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.381 2019/12/06 21:36:10 ad Exp $");
 
 #include "opt_ptrace.h"
 #include "opt_dtrace.h"
@@ -1771,6 +1771,7 @@
 
        mutex_exit(p->p_lock);
        lwp_lock(l);
+       spc_lock(l->l_cpu);
        mi_switch(l);
        KERNEL_LOCK(biglocks, l);
 }
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_sleepq.c
--- a/sys/kern/kern_sleepq.c    Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_sleepq.c    Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_sleepq.c,v 1.53 2019/11/23 19:42:52 ad Exp $      */
+/*     $NetBSD: kern_sleepq.c,v 1.54 2019/12/06 21:36:10 ad Exp $      */
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.53 2019/11/23 19:42:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.54 2019/12/06 21:36:10 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -260,6 +260,7 @@
                if (timo) {
                        callout_schedule(&l->l_timeout_ch, timo);
                }
+               spc_lock(l->l_cpu);
                mi_switch(l);
 
                /* The LWP and sleep queue are now unlocked. */
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_softint.c
--- a/sys/kern/kern_softint.c   Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_softint.c   Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_softint.c,v 1.54 2019/12/06 18:15:57 ad Exp $     */
+/*     $NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $     */
 
 /*-
  * Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.54 2019/12/06 18:15:57 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -729,6 +729,7 @@
 
                lwp_lock(l);
                l->l_stat = LSIDL;
+               spc_lock(l->l_cpu);
                mi_switch(l);
        }
 }
diff -r c69892989a1b -r 20a676d394b4 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Fri Dec 06 21:07:07 2019 +0000
+++ b/sys/kern/kern_synch.c     Fri Dec 06 21:36:10 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $       */
+/*     $NetBSD: kern_synch.c,v 1.329 2019/12/06 21:36:10 ad Exp $      */
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.329 2019/12/06 21:36:10 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -269,11 +269,14 @@
 
        KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
        lwp_lock(l);
+
        KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
        KASSERT(l->l_stat == LSONPROC);
+
        /* Voluntary - ditch kpriority boost. */
        l->l_kpriority = false;
-       (void)mi_switch(l);
+       spc_lock(l->l_cpu);
+       mi_switch(l);
        KERNEL_LOCK(l->l_biglocks, l);
 }
 
@@ -288,11 +291,14 @@
 
        KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
        lwp_lock(l);
+
        KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
        KASSERT(l->l_stat == LSONPROC);
+
        /* Involuntary - keep kpriority boost. */
        l->l_pflag |= LP_PREEMPTING;
-       (void)mi_switch(l);
+       spc_lock(l->l_cpu);
+       mi_switch(l);
        KERNEL_LOCK(l->l_biglocks, l);
 }
 
@@ -372,7 +378,9 @@
                        kpreempt_ev_immed.ev_count++;
                }
                lwp_lock(l);
+               /* Involuntary - keep kpriority boost. */
                l->l_pflag |= LP_PREEMPTING;
+               spc_lock(l->l_cpu);
                mi_switch(l);
                l->l_nopreempt++;
                splx(s);
@@ -501,20 +509,22 @@
 /*
  * The machine independent parts of context switch.
  *
- * Returns 1 if another LWP was actually run.
+ * NOTE: do not use l->l_cpu in this routine.  The caller may have enqueued
+ * itself onto another CPU's run queue, so l->l_cpu may point elsewhere.
  */
-int
+void
 mi_switch(lwp_t *l)
 {
        struct cpu_info *ci;
        struct schedstate_percpu *spc;
        struct lwp *newl;
-       int retval, oldspl;
+       int oldspl;
        struct bintime bt;
        bool returning;
 
        KASSERT(lwp_locked(l, NULL));
        KASSERT(kpreempt_disabled());
+       KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
        LOCKDEBUG_BARRIER(l->l_mutex, 1);
 
        kstack_check_magic(l);
@@ -523,8 +533,8 @@
 
        KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
        KASSERT((l->l_pflag & LP_RUNNING) != 0);
-       KASSERT(l->l_cpu == curcpu());
-       ci = l->l_cpu;
+       KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
+       ci = curcpu();
        spc = &ci->ci_schedstate;
        returning = false;
        newl = NULL;
@@ -555,31 +565,24 @@
        }
 #endif /* !__HAVE_FAST_SOFTINTS */
 
-       /* Lock the runqueue */
-       KASSERT(l->l_stat != LSRUN);
-       mutex_spin_enter(spc->spc_mutex);
-
        /*
         * If on the CPU and we have gotten this far, then we must yield.
         */
        if (l->l_stat == LSONPROC && l != newl) {
                KASSERT(lwp_locked(l, spc->spc_lwplock));
-               if ((l->l_flag & LW_IDLE) == 0) {
-                       l->l_stat = LSRUN;
-                       lwp_setlock(l, spc->spc_mutex);
-                       sched_enqueue(l);
-                       /*
-                        * Handle migration.  Note that "migrating LWP" may
-                        * be reset here, if interrupt/preemption happens



Home | Main Index | Thread Index | Old Index