Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Leave the idle LWPs in state LSIDL even when runnin...



details:   https://anonhg.NetBSD.org/src/rev/5b960b9768c4
branches:  trunk
changeset: 1008558:5b960b9768c4
user:      ad <ad%NetBSD.org@localhost>
date:      Thu Mar 26 19:42:39 2020 +0000

description:
Leave the idle LWPs in state LSIDL even when running, so they don't mess up
output from ps/top/etc.  Correctness isn't at stake, LWPs in other states
are temporarily on the CPU at times too (e.g.  LSZOMB, LSSLEEP).

diffstat:

 sys/kern/kern_idle.c  |  11 +++++------
 sys/kern/kern_synch.c |  18 ++++++------------
 2 files changed, 11 insertions(+), 18 deletions(-)

diffs (105 lines):

diff -r 2d3a4ae496bd -r 5b960b9768c4 sys/kern/kern_idle.c
--- a/sys/kern/kern_idle.c      Thu Mar 26 19:25:07 2020 +0000
+++ b/sys/kern/kern_idle.c      Thu Mar 26 19:42:39 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $        */
+/*     $NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $        */
 
 /*-
  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@@ -28,7 +28,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -56,8 +56,8 @@
        /* Update start time for this thread. */
        binuptime(&l->l_stime);
        spc->spc_flags |= SPCF_RUNNING;
-       KASSERT(l->l_stat == LSONPROC);
        KASSERT((l->l_pflag & LP_RUNNING) != 0);
+       l->l_stat = LSIDL;
        lwp_unlock(l);
 
        /*
@@ -91,11 +91,10 @@
                }
                KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
                lwp_lock(l);
-               l->l_stat = LSIDL;
                spc_lock(l->l_cpu);
                mi_switch(l);
                KASSERT(curlwp == l);
-               KASSERT(l->l_stat == LSONPROC);
+               KASSERT(l->l_stat == LSIDL);
        }
 }
 
@@ -119,7 +118,7 @@
                 * mi_switch().  Make the picture look good in case the CPU
                 * takes an interrupt before it calls idle_loop().
                 */
-               l->l_stat = LSONPROC;
+               l->l_stat = LSIDL;
                l->l_pflag |= LP_RUNNING;
                ci->ci_onproc = l;
        }
diff -r 2d3a4ae496bd -r 5b960b9768c4 sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Thu Mar 26 19:25:07 2020 +0000
+++ b/sys/kern/kern_synch.c     Thu Mar 26 19:42:39 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $      */
+/*     $NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $      */
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -387,11 +387,7 @@
                        atomic_swap_uint(&l->l_dopreempt, 0);
                        return true;
                }
-               if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
-                       /* Can't preempt idle loop, don't count as failure. */
-                       atomic_swap_uint(&l->l_dopreempt, 0);
-                       return true;
-               }
+               KASSERT((l->l_flag & LW_IDLE) == 0);
                if (__predict_false(l->l_nopreempt != 0)) {
                        /* LWP holds preemption disabled, explicitly. */
                        if ((dop & DOPREEMPT_COUNTED) == 0) {
@@ -547,12 +543,10 @@
                lwp_setlock(newl, spc->spc_lwplock);
        } else {
                /*
-                * Updates to newl here are unlocked, but newl is the idle
-                * LWP and thus sheltered from outside interference, so no
-                * harm is going to come of it.
+                * The idle LWP does not get set to LSONPROC, because
+                * otherwise it screws up the output from top(1) etc.
                 */
                newl = ci->ci_data.cpu_idlelwp;
-               newl->l_stat = LSONPROC;
                newl->l_pflag |= LP_RUNNING;
                spc->spc_curpriority = PRI_IDLE;
                spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
@@ -840,7 +834,7 @@
        }
 
        KASSERT(l == curlwp);
-       KASSERT(l->l_stat == LSONPROC);
+       KASSERT(l->l_stat == LSONPROC || (l->l_flag & LW_IDLE) != 0); 
 
        SYSCALL_TIME_WAKEUP(l);
        LOCKDEBUG_BARRIER(NULL, 1);



Home | Main Index | Thread Index | Old Index