Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Minor scheduler cleanup:



details:   https://anonhg.NetBSD.org/src/rev/8479e3b0550b
branches:  trunk
changeset: 461344:8479e3b0550b
user:      ad <ad%NetBSD.org@localhost>
date:      Sat Nov 23 19:42:52 2019 +0000

description:
Minor scheduler cleanup:

- Adapt to cpu_need_resched() changes. Avoid lost & duplicate IPIs and ASTs.
  sched_resched_cpu() and sched_resched_lwp() contain the logic for this.
- Changes for LSIDL to make the locking scheme match the intended design.
- Reduce lock contention and false sharing further.
- Numerous small bugfixes, including some corrections for SCHED_FIFO/RT.
- Use setrunnable() in more places, and merge cut & pasted code.

diffstat:

 sys/compat/linux/common/linux_sched.c |   34 +----
 sys/kern/kern_exec.c                  |   15 +-
 sys/kern/kern_fork.c                  |   16 +-
 sys/kern/kern_idle.c                  |   19 +-
 sys/kern/kern_kthread.c               |   21 +-
 sys/kern/kern_lwp.c                   |   68 ++++++---
 sys/kern/kern_runq.c                  |  237 +++++++++++++++++++++++++++------
 sys/kern/kern_sleepq.c                |    9 +-
 sys/kern/kern_softint.c               |   19 +-
 sys/kern/kern_synch.c                 |  109 +++++++++------
 sys/kern/sched_4bsd.c                 |   26 ++-
 sys/kern/sys_aio.c                    |   11 +-
 sys/kern/sys_lwp.c                    |   37 +----
 sys/rump/librump/rumpkern/scheduler.c |   27 +++-
 sys/sys/cpu.h                         |   25 +--
 sys/sys/lwp.h                         |    5 +-
 sys/sys/sched.h                       |    9 +-
 17 files changed, 425 insertions(+), 262 deletions(-)

diffs (truncated from 1464 to 300 lines):

diff -r 0d4a8e253469 -r 8479e3b0550b sys/compat/linux/common/linux_sched.c
--- a/sys/compat/linux/common/linux_sched.c     Sat Nov 23 19:40:34 2019 +0000
+++ b/sys/compat/linux/common/linux_sched.c     Sat Nov 23 19:42:52 2019 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: linux_sched.c,v 1.72 2019/10/03 22:16:53 kamil Exp $   */
+/*     $NetBSD: linux_sched.c,v 1.73 2019/11/23 19:42:52 ad Exp $      */
 
 /*-
- * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * Copyright (c) 1999, 2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.72 2019/10/03 22:16:53 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.73 2019/11/23 19:42:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/mount.h>
@@ -180,7 +180,6 @@
        struct lwp *l2;
        struct linux_emuldata *led;
        void *parent_tidptr, *tls, *child_tidptr;
-       struct schedstate_percpu *spc;
        vaddr_t uaddr;
        lwpid_t lid;
        int flags, tnprocs, error;
@@ -248,31 +247,8 @@
                }
        }
 
-       /*
-        * Set the new LWP running, unless the process is stopping,
-        * then the LWP is created stopped.
-        */
-       mutex_enter(p->p_lock);
-       lwp_lock(l2);
-       spc = &l2->l_cpu->ci_schedstate;
-       if ((l->l_flag & (LW_WREBOOT | LW_DBGSUSPEND | LW_WSUSPEND | LW_WEXIT)) == 0) {
-               if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
-                       KASSERT(l2->l_wchan == NULL);
-                       l2->l_stat = LSSTOP;
-                       p->p_nrlwps--;
-                       lwp_unlock_to(l2, spc->spc_lwplock);
-               } else {
-                       KASSERT(lwp_locked(l2, spc->spc_mutex));
-                       l2->l_stat = LSRUN;
-                       sched_enqueue(l2, false);
-                       lwp_unlock(l2);
-               }
-       } else {
-               l2->l_stat = LSSUSPENDED;
-               p->p_nrlwps--;
-               lwp_unlock_to(l2, spc->spc_lwplock);
-       }
-       mutex_exit(p->p_lock);
+       /* Set the new LWP running. */
+       lwp_start(l2, 0);
 
        retval[0] = lid;
        retval[1] = 0;
diff -r 0d4a8e253469 -r 8479e3b0550b sys/kern/kern_exec.c
--- a/sys/kern/kern_exec.c      Sat Nov 23 19:40:34 2019 +0000
+++ b/sys/kern/kern_exec.c      Sat Nov 23 19:42:52 2019 +0000
@@ -1,9 +1,12 @@
-/*     $NetBSD: kern_exec.c,v 1.483 2019/10/12 10:55:23 kamil Exp $    */
+/*     $NetBSD: kern_exec.c,v 1.484 2019/11/23 19:42:52 ad Exp $       */
 
 /*-
- * Copyright (c) 2008 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Andrew Doran.
+ *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
@@ -59,7 +62,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.483 2019/10/12 10:55:23 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.484 2019/11/23 19:42:52 ad Exp $");
 
 #include "opt_exec.h"
 #include "opt_execfmt.h"
@@ -2650,11 +2653,11 @@
 
        lwp_lock(l2);
        KASSERT(p2->p_nrlwps == 1);
+       KASSERT(l2->l_stat == LSIDL);
        p2->p_nrlwps = 1;
        p2->p_stat = SACTIVE;
-       l2->l_stat = LSRUN;
-       sched_enqueue(l2, false);
-       lwp_unlock(l2);
+       setrunnable(l2);
+       /* LWP now unlocked */
 
        mutex_exit(p2->p_lock);
        mutex_exit(proc_lock);
diff -r 0d4a8e253469 -r 8479e3b0550b sys/kern/kern_fork.c
--- a/sys/kern/kern_fork.c      Sat Nov 23 19:40:34 2019 +0000
+++ b/sys/kern/kern_fork.c      Sat Nov 23 19:42:52 2019 +0000
@@ -1,7 +1,8 @@
-/*     $NetBSD: kern_fork.c,v 1.215 2019/10/12 10:55:23 kamil Exp $    */
+/*     $NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $       */
 
 /*-
- * Copyright (c) 1999, 2001, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
+ * Copyright (c) 1999, 2001, 2004, 2006, 2007, 2008, 2019
+ *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -67,7 +68,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.215 2019/10/12 10:55:23 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_dtrace.h"
@@ -561,21 +562,20 @@
        p2->p_acflag = AFORK;
        lwp_lock(l2);
        KASSERT(p2->p_nrlwps == 1);
+       KASSERT(l2->l_stat == LSIDL);
        if (p2->p_sflag & PS_STOPFORK) {
-               struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate;
                p2->p_nrlwps = 0;
                p2->p_stat = SSTOP;
                p2->p_waited = 0;
                p1->p_nstopchild++;
                l2->l_stat = LSSTOP;
                KASSERT(l2->l_wchan == NULL);
-               lwp_unlock_to(l2, spc->spc_lwplock);
+               lwp_unlock(l2);
        } else {
                p2->p_nrlwps = 1;
                p2->p_stat = SACTIVE;
-               l2->l_stat = LSRUN;
-               sched_enqueue(l2, false);
-               lwp_unlock(l2);
+               setrunnable(l2);
+               /* LWP now unlocked */
        }
 
        /*
diff -r 0d4a8e253469 -r 8479e3b0550b sys/kern/kern_idle.c
--- a/sys/kern/kern_idle.c      Sat Nov 23 19:40:34 2019 +0000
+++ b/sys/kern/kern_idle.c      Sat Nov 23 19:42:52 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_idle.c,v 1.25 2012/01/29 22:55:40 rmind Exp $     */
+/*     $NetBSD: kern_idle.c,v 1.26 2019/11/23 19:42:52 ad Exp $        */
 
 /*-
  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@@ -28,7 +28,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.25 2012/01/29 22:55:40 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.26 2019/11/23 19:42:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -50,26 +50,26 @@
        struct lwp *l = curlwp;
 
        kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
+       spc = &ci->ci_schedstate;
        ci->ci_data.cpu_onproc = l;
 
        /* Update start time for this thread. */
        lwp_lock(l);
+       KASSERT(lwp_locked(l, spc->spc_lwplock));
        binuptime(&l->l_stime);
+       spc->spc_flags |= SPCF_RUNNING;
+       l->l_stat = LSONPROC;
+       l->l_pflag |= LP_RUNNING;
        lwp_unlock(l);
 
        /*
         * Use spl0() here to ensure that we have the correct interrupt
         * priority.  This may be the first thread running on the CPU,
-        * in which case we took a dirtbag route to get here.
+        * in which case we took an odd route to get here.
         */
-       spc = &ci->ci_schedstate;
-       (void)splsched();
-       spc->spc_flags |= SPCF_RUNNING;
        spl0();
+       KERNEL_UNLOCK_ALL(l, NULL);
 
-       KERNEL_UNLOCK_ALL(l, NULL);
-       l->l_stat = LSONPROC;
-       l->l_pflag |= LP_RUNNING;
        for (;;) {
                LOCKDEBUG_BARRIER(NULL, 0);
                KASSERT((l->l_flag & LW_IDLE) != 0);
@@ -113,7 +113,6 @@
        lwp_lock(l);
        l->l_flag |= LW_IDLE;
        lwp_unlock(l);
-       l->l_cpu = ci;
        ci->ci_data.cpu_idlelwp = l;
 
        return error;
diff -r 0d4a8e253469 -r 8479e3b0550b sys/kern/kern_kthread.c
--- a/sys/kern/kern_kthread.c   Sat Nov 23 19:40:34 2019 +0000
+++ b/sys/kern/kern_kthread.c   Sat Nov 23 19:42:52 2019 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: kern_kthread.c,v 1.43 2018/01/09 22:58:45 pgoyette Exp $       */
+/*     $NetBSD: kern_kthread.c,v 1.44 2019/11/23 19:42:52 ad Exp $     */
 
 /*-
- * Copyright (c) 1998, 1999, 2007, 2009 The NetBSD Foundation, Inc.
+ * Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.43 2018/01/09 22:58:45 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.44 2019/11/23 19:42:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -108,10 +108,10 @@
        }
        mutex_enter(proc0.p_lock);
        lwp_lock(l);
-       l->l_priority = pri;
+       lwp_changepri(l, pri);
        if (ci != NULL) {
                if (ci != l->l_cpu) {
-                       lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
+                       lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
                        lwp_lock(l);
                }
                l->l_pflag |= LP_BOUND;
@@ -133,15 +133,12 @@
         * Set the new LWP running, unless the caller has requested
         * otherwise.
         */
+       KASSERT(l->l_stat == LSIDL);
        if ((flag & KTHREAD_IDLE) == 0) {
-               l->l_stat = LSRUN;
-               sched_enqueue(l, false);
-               lwp_unlock(l);
+               setrunnable(l);
+               /* LWP now unlocked */
        } else {
-               if (ci != NULL)
-                       lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
-               else
-                       lwp_unlock(l);
+               lwp_unlock(l);
        }
        mutex_exit(proc0.p_lock);
 
diff -r 0d4a8e253469 -r 8479e3b0550b sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Sat Nov 23 19:40:34 2019 +0000
+++ b/sys/kern/kern_lwp.c       Sat Nov 23 19:42:52 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.211 2019/11/21 19:47:21 ad Exp $        */
+/*     $NetBSD: kern_lwp.c,v 1.212 2019/11/23 19:42:52 ad Exp $        */
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -161,22 +161,23 @@
  *
  *     States and their associated locks:
  *
- *     LSONPROC, LSZOMB:
+ *     LSIDL, LSONPROC, LSZOMB, LSSUPENDED:
  *
- *             Always covered by spc_lwplock, which protects running LWPs.
- *             This is a per-CPU lock and matches lwp::l_cpu.
+ *             Always covered by spc_lwplock, which protects LWPs not
+ *             associated with any other sync object.  This is a per-CPU
+ *             lock and matches lwp::l_cpu.
  *
- *     LSIDL, LSRUN:
+ *     LSRUN:
  *
  *             Always covered by spc_mutex, which protects the run queues.
  *             This is a per-CPU lock and matches lwp::l_cpu.
  *
  *     LSSLEEP:
  *



Home | Main Index | Thread Index | Old Index