Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern sched_tick(): don't try to optimise something that'...



details:   https://anonhg.NetBSD.org/src/rev/9a04eed10131
branches:  trunk
changeset: 465898:9a04eed10131
user:      ad <ad%NetBSD.org@localhost>
date:      Fri Dec 06 18:33:19 2019 +0000

description:
sched_tick(): don't try to optimise something that's called 10 times a
second, it's a fine way to introduce bugs (and I did).  Use the MI
interface for rescheduling which always does the correct thing.

diffstat:

 sys/kern/sched_4bsd.c |  33 +++++++++++++++------------------
 sys/kern/sched_m2.c   |   8 +++++---
 2 files changed, 20 insertions(+), 21 deletions(-)

diffs (113 lines):

diff -r 0bf237f0de4e -r 9a04eed10131 sys/kern/sched_4bsd.c
--- a/sys/kern/sched_4bsd.c     Fri Dec 06 18:16:22 2019 +0000
+++ b/sys/kern/sched_4bsd.c     Fri Dec 06 18:33:19 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: sched_4bsd.c,v 1.40 2019/12/01 15:34:46 ad Exp $       */
+/*     $NetBSD: sched_4bsd.c,v 1.41 2019/12/06 18:33:19 ad Exp $       */
 
 /*
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.40 2019/12/01 15:34:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.41 2019/12/06 18:33:19 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -104,13 +104,15 @@
 sched_tick(struct cpu_info *ci)
 {
        struct schedstate_percpu *spc = &ci->ci_schedstate;
+       pri_t pri = PRI_NONE;
        lwp_t *l;
 
        spc->spc_ticks = rrticks;
 
        if (CURCPU_IDLE_P()) {
-               atomic_or_uint(&ci->ci_want_resched,
-                   RESCHED_IDLE | RESCHED_UPREEMPT);
+               spc_lock(ci);
+               sched_resched_cpu(ci, MAXPRI_KTHREAD, true);
+               /* spc now unlocked */
                return;
        }
        l = ci->ci_onproc;
@@ -128,12 +130,7 @@
                break;
        case SCHED_RR:
                /* Force it into mi_switch() to look for other jobs to run. */
-#ifdef __HAVE_PREEMPTION
-               atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
-               atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
-#else
-               atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
-#endif
+               pri = MAXPRI_KERNEL_RT;
                break;
        default:
                if (spc->spc_flags & SPCF_SHOULDYIELD) {
@@ -142,25 +139,25 @@
                         * due to buggy or inefficient code.  Force a
                         * kernel preemption.
                         */
-#ifdef __HAVE_PREEMPTION
-                       atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
-                       atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
-#else
-                       atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
-#endif
+                       pri = MAXPRI_KERNEL_RT;
                } else if (spc->spc_flags & SPCF_SEENRR) {
                        /*
                         * The process has already been through a roundrobin
                         * without switching and may be hogging the CPU.
                         * Indicate that the process should yield.
                         */
-                       spc->spc_flags |= SPCF_SHOULDYIELD;
-                       atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
+                       pri = MAXPRI_KTHREAD;
                } else {
                        spc->spc_flags |= SPCF_SEENRR;
                }
                break;
        }
+
+       if (pri != PRI_NONE) {
+               spc_lock(ci);
+               sched_resched_cpu(ci, pri, true);
+               /* spc now unlocked */
+       }
 }
 
 /*
diff -r 0bf237f0de4e -r 9a04eed10131 sys/kern/sched_m2.c
--- a/sys/kern/sched_m2.c       Fri Dec 06 18:16:22 2019 +0000
+++ b/sys/kern/sched_m2.c       Fri Dec 06 18:33:19 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: sched_m2.c,v 1.36 2019/12/01 15:34:46 ad Exp $ */
+/*     $NetBSD: sched_m2.c,v 1.37 2019/12/06 18:33:19 ad Exp $ */
 
 /*
  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.36 2019/12/01 15:34:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.37 2019/12/06 18:33:19 ad Exp $");
 
 #include <sys/param.h>
 
@@ -330,7 +330,9 @@
         */
        if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
                spc->spc_flags |= SPCF_SHOULDYIELD;
-               atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
+               spc_lock(ci);
+               sched_resched_cpu(ci, MAXPRI_KTHREAD, true);
+               /* spc now unlocked */
        } else
                spc->spc_ticks = l->l_sched.timeslice; 
        lwp_unlock(l);



Home | Main Index | Thread Index | Old Index