Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Separate the scheduler from the context switching code.



details:   https://anonhg.NetBSD.org/src/rev/0c974221f7b6
branches:  trunk
changeset: 536787:0c974221f7b6
user:      gmcgarry <gmcgarry%NetBSD.org@localhost>
date:      Sun Sep 22 05:36:48 2002 +0000

description:
Separate the scheduler from the context switching code.

This is done by adding an extra argument to mi_switch() and
cpu_switch() which specifies the new process.  If NULL is passed,
then the new function chooseproc() is invoked to wait for a new
process to appear on the run queue.

Also provides an opportunity for optimisations if "switching to self".

Also added are C versions of the setrunqueue() and remrunqueue()
low-level primitives if __HAVE_MD_RUNQUEUE is not defined by MD code.

All these changes are contingent upon the __HAVE_CHOOSEPROC flag being
defined by MD code to indicate that cpu_switch() supports the changes.

diffstat:

 sys/kern/kern_sig.c   |    8 +-
 sys/kern/kern_synch.c |  223 ++++++++++++++++++++++++++++++++++++++++++-------
 sys/sys/proc.h        |   17 ++-
 3 files changed, 207 insertions(+), 41 deletions(-)

diffs (truncated from 417 to 300 lines):

diff -r 12e03b2e6428 -r 0c974221f7b6 sys/kern/kern_sig.c
--- a/sys/kern/kern_sig.c       Sun Sep 22 04:11:32 2002 +0000
+++ b/sys/kern/kern_sig.c       Sun Sep 22 05:36:48 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_sig.c,v 1.124 2002/09/04 01:32:35 matt Exp $      */
+/*     $NetBSD: kern_sig.c,v 1.125 2002/09/22 05:36:48 gmcgarry Exp $  */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1991, 1993
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.124 2002/09/04 01:32:35 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.125 2002/09/22 05:36:48 gmcgarry Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_compat_sunos.h"
@@ -1081,7 +1081,7 @@
                        if (dolock)
                                SCHED_LOCK(s);
                        proc_stop(p);
-                       mi_switch(p);
+                       mi_switch(p, NULL);
                        SCHED_ASSERT_UNLOCKED();
                        if (dolock)
                                splx(s);
@@ -1152,7 +1152,7 @@
                                if (dolock)
                                        SCHED_LOCK(s);
                                proc_stop(p);
-                               mi_switch(p);
+                               mi_switch(p, NULL);
                                SCHED_ASSERT_UNLOCKED();
                                if (dolock)
                                        splx(s);
diff -r 12e03b2e6428 -r 0c974221f7b6 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Sun Sep 22 04:11:32 2002 +0000
+++ b/sys/kern/kern_synch.c     Sun Sep 22 05:36:48 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_synch.c,v 1.112 2002/09/04 01:32:39 matt Exp $    */
+/*     $NetBSD: kern_synch.c,v 1.113 2002/09/22 05:36:48 gmcgarry Exp $        */
 
 /*-
  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@@ -78,7 +78,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.112 2002/09/04 01:32:39 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.113 2002/09/22 05:36:48 gmcgarry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_ktrace.h"
@@ -469,7 +469,7 @@
        p->p_stats->p_ru.ru_nvcsw++;
 
        SCHED_ASSERT_LOCKED();
-       mi_switch(p);
+       mi_switch(p, NULL);
 
 #if    defined(DDB) && !defined(GPROF)
        /* handy breakpoint location after process "wakes" */
@@ -717,6 +717,31 @@
        SCHED_UNLOCK(s);
 }
 
+#if defined(__HAVE_CHOOSEPROC)
+/*
+ * Remove the next process of the highest priority from the run queue.
+ * If the queue is empty, then call cpu_idle() and wait until one is
+ * available.  Set curproc to NULL to avoid the process accumulating
+ * time while we idle.
+ */
+struct proc *
+chooseproc(void)
+{
+       struct proc *oldp, *newp;
+
+       oldp = curproc;
+       curproc = NULL;
+       for (;;) {
+               newp = nextrunqueue();
+               if (newp != NULL)
+                       break;
+               cpu_idle();
+       }
+       curproc = oldp;
+       return (newp);
+}
+#endif
+
 /*
  * General yield call.  Puts the current process back on its run queue and
  * performs a voluntary context switch.
@@ -732,7 +757,7 @@
        p->p_stat = SRUN;
        setrunqueue(p);
        p->p_stats->p_ru.ru_nvcsw++;
-       mi_switch(p);
+       mi_switch(p, NULL);
        SCHED_ASSERT_UNLOCKED();
        splx(s);
 }
@@ -749,18 +774,12 @@
        struct proc *p = curproc;
        int s;
 
-       /*
-        * XXX Switching to a specific process is not supported yet.
-        */
-       if (newp != NULL)
-               panic("preempt: cpu_preempt not yet implemented");
-
        SCHED_LOCK(s);
        p->p_priority = p->p_usrpri;
        p->p_stat = SRUN;
        setrunqueue(p);
        p->p_stats->p_ru.ru_nivcsw++;
-       mi_switch(p);
+       mi_switch(p, newp);
        SCHED_ASSERT_UNLOCKED();
        splx(s);
 }
@@ -771,7 +790,7 @@
  * the sched_lock held.
  */
 void
-mi_switch(struct proc *p)
+mi_switch(struct proc *p, struct proc *newp)
 {
        struct schedstate_percpu *spc;
        struct rlimit *rlim;
@@ -796,6 +815,10 @@
        KDASSERT(p->p_cpu != NULL);
        KDASSERT(p->p_cpu == curcpu());
 
+#if !defined(__HAVE_CHOOSEPROC)
+       KDASSERT(newp == NULL);
+#endif
+
        spc = &p->p_cpu->ci_schedstate;
 
 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
@@ -807,7 +830,7 @@
 
        /*
         * Compute the amount of time during which the current
-        * process was running, and add that to its total so far.
+        * process was running.
         */
        microtime(&tv);
        u = p->p_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
@@ -819,8 +842,6 @@
                u -= 1000000;
                s++;
        }
-       p->p_rtime.tv_usec = u;
-       p->p_rtime.tv_sec = s;
 
        /*
         * Check if the process exceeds its cpu resource allocation.
@@ -857,34 +878,87 @@
        kstack_check_magic(p);
 #endif
 
+#if defined(__HAVE_CHOOSEPROC)
+
        /*
-        * If we are using h/w performance counters, save context.
+        * If we haven't been told which process to switch to, then
+        * call nextrunqueue() to select the next process from the run
+        * queue.
+        */
+       if (newp == NULL)
+               newp = nextrunqueue();
+
+       /*
+        * If we're switching to ourself then don't bother reloading
+        * the address space or recalculating the process execution
+        * time.  Just short-circuit out of here.
         */
-#if PERFCTRS
-       if (PMC_ENABLED(p))
-               pmc_save_context(p);
+       if (newp == p) {
+               p->p_stat = SONPROC;
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+               sched_unlock_idle();
 #endif
+               goto out;
+       }
+
+#endif /* __HAVE_CHOOSEPROC */
+
+       /*
+        * We won't be short-circuiting our path out of here, so
+        * update the outgoing process CPU usage.
+        */
+       p->p_rtime.tv_usec = u;
+       p->p_rtime.tv_sec = s;
+
+#if defined(__HAVE_CHOOSEPROC)
 
        /*
-        * Pick a new current process and switch to it.  When we
-        * run again, we'll return back here.
+        * If newp == NULL, then nextrunqueue() couldn't find a
+        * runnable process.  We must invoke chooseproc() to wait for
+        * one to become available.
         */
-       uvmexp.swtch++;
-       cpu_switch(p);
+       if (newp == NULL)
+               newp = chooseproc();
 
        /*
-        * If we are using h/w performance counters, restore context.
+        * Check if we're switching to ourself.  If we're not, then
+        * call cpu_switch() to switch to the new current process.
+        * We must have idled so the process CPU time has to be
+        * recalculated.
         */
+       if (p == newp) {
+               p->p_stat = SONPROC;
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+               sched_unlock_idle();
+#endif
+       } else
+
+#endif /* __HAVE_CHOOSEPROC */
+
+       {
+               /*
+                * If we are using h/w performance counters, save context.
+                */
 #if PERFCTRS
-       if (PMC_ENABLED(p))
-               pmc_restore_context(p);
+               if (PMC_ENABLED(p))
+                       pmc_save_context(p);
 #endif
 
-       /*
-        * Make sure that MD code released the scheduler lock before
-        * resuming us.
-        */
-       SCHED_ASSERT_UNLOCKED();
+               /*
+                * Switch to the new current process.  When we
+                * run again, we'll return back here.
+                */
+               uvmexp.swtch++;
+               cpu_switch(p, newp);
+
+               /*
+                * If we are using h/w performance counters, restore context.
+                */
+#if PERFCTRS
+               if (PMC_ENABLED(p))
+                       pmc_restore_context(p);
+#endif
+       }
 
        /*
         * We're running again; record our new start time.  We might
@@ -895,6 +969,16 @@
        KDASSERT(p->p_cpu == curcpu());
        microtime(&p->p_cpu->ci_schedstate.spc_runtime);
 
+#if defined(__HAVE_CHOOSEPROC)
+out:
+#endif
+
+       /*
+        * Make sure that MD code released the scheduler lock before
+        * resuming us.
+        */
+       SCHED_ASSERT_UNLOCKED();
+
 #if defined(MULTIPROCESSOR)
        /*
         * Reacquire the kernel_lock now.  We do this after we've
@@ -1042,7 +1126,7 @@
         */
        proclist_lock_read();
        SCHED_LOCK(s);
-       for (p = LIST_FIRST(&allproc); p != NULL; p = LIST_NEXT(p, p_list)) {
+       LIST_FOREACH(p, &allproc, p_list) {
                if ((p->p_flag & P_SYSTEM) != 0)
                        continue;
                switch (p->p_stat) {
@@ -1066,3 +1150,78 @@
        SCHED_UNLOCK(s);
        proclist_unlock_read();
 }
+
+/*
+ * Low-level routines to access the run queue.  Optimised assembler
+ * routines can override these.



Home | Main Index | Thread Index | Old Index