Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Move counting involuntary switches into mi_switch. preem...



details:   https://anonhg.NetBSD.org/src/rev/ebaffe4556e4
branches:  trunk
changeset: 446251:ebaffe4556e4
user:      mlelstv <mlelstv%NetBSD.org@localhost>
date:      Wed Nov 28 19:46:22 2018 +0000

description:
Move counting involuntary switches into mi_switch. preempt() passes that
information by setting a new LWP flag.

While here, don't even try to switch when the scheduler has no other LWP
to run. This check is currently spread over all callers of preempt()
and will be removed there.

ok mrg@.

diffstat:

 sys/kern/kern_synch.c |  14 +++++++++++---
 sys/sys/lwp.h         |   3 ++-
 2 files changed, 13 insertions(+), 4 deletions(-)

diffs (71 lines):

diff -r 13ec9b453e23 -r ebaffe4556e4 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Wed Nov 28 19:36:43 2018 +0000
+++ b/sys/kern/kern_synch.c     Wed Nov 28 19:46:22 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_synch.c,v 1.320 2018/11/28 19:36:43 mlelstv Exp $ */
+/*     $NetBSD: kern_synch.c,v 1.321 2018/11/28 19:46:22 mlelstv Exp $ */
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.320 2018/11/28 19:36:43 mlelstv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.321 2018/11/28 19:46:22 mlelstv Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -286,12 +286,16 @@
 {
        struct lwp *l = curlwp;
 
+       /* check if the scheduler has another LWP to run */
+       if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) == 0)
+               return;
+
        KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
        lwp_lock(l);
        KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
        KASSERT(l->l_stat == LSONPROC);
        l->l_kpriority = false;
-       l->l_nivcsw++;
+       l->l_pflag |= LP_PREEMPTING;
        (void)mi_switch(l);
        KERNEL_LOCK(l->l_biglocks, l);
 }
@@ -649,6 +653,9 @@
                KASSERT(l->l_ctxswtch == 0);
                l->l_ctxswtch = 1;
                l->l_ncsw++;
+               if ((l->l_pflag & LP_PREEMPTING) != 0)
+                       l->l_nivcsw++;
+               l->l_pflag &= ~LP_PREEMPTING;
                KASSERT((l->l_pflag & LP_RUNNING) != 0);
                l->l_pflag &= ~LP_RUNNING;
 
@@ -752,6 +759,7 @@
                /* Nothing to do - just unlock and return. */
                pserialize_switchpoint();
                mutex_spin_exit(spc->spc_mutex);
+               l->l_pflag &= ~LP_PREEMPTING;
                lwp_unlock(l);
                retval = 0;
        }
diff -r 13ec9b453e23 -r ebaffe4556e4 sys/sys/lwp.h
--- a/sys/sys/lwp.h     Wed Nov 28 19:36:43 2018 +0000
+++ b/sys/sys/lwp.h     Wed Nov 28 19:46:22 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: lwp.h,v 1.179 2018/04/19 21:19:07 christos Exp $       */
+/*     $NetBSD: lwp.h,v 1.180 2018/11/28 19:46:22 mlelstv Exp $        */
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
@@ -258,6 +258,7 @@
 #define        LP_VFORKWAIT    0x00000200 /* Waiting at vfork() for a child */
 #define        LP_SINGLESTEP   0x00000400 /* Single step thread in ptrace(2) */
 #define        LP_TIMEINTR     0x00010000 /* Time this soft interrupt */
+#define        LP_PREEMPTING   0x00020000 /* mi_switch called involuntarily */
 #define        LP_RUNNING      0x20000000 /* Active on a CPU */
 #define        LP_BOUND        0x80000000 /* Bound to a CPU */
 



Home | Main Index | Thread Index | Old Index