Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Rip out pserialize(9) logic now that the RCU patent has ...



details:   https://anonhg.NetBSD.org/src/rev/b463eb716f0f
branches:  trunk
changeset: 846866:b463eb716f0f
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Tue Dec 03 05:07:48 2019 +0000

description:
Rip out pserialize(9) logic now that the RCU patent has expired.

pserialize_perform() is now basically just xc_barrier(XC_HIGHPRI).
No more tentacles throughout the scheduler.  Simplify the psz read
count for diagnostic assertions by putting it unconditionally into
cpu_info.

>From rmind@, tidied up by me.

diffstat:

 sys/arch/mips/rmi/rmixl_cpu.c    |   10 +-
 sys/kern/kern_lwp.c              |    8 +-
 sys/kern/kern_softint.c          |    8 +-
 sys/kern/kern_synch.c            |    8 +-
 sys/kern/subr_pserialize.c       |  241 +++++---------------------------------
 sys/rump/librump/rumpkern/rump.c |    9 +-
 sys/sys/cpu_data.h               |    4 +-
 sys/sys/pserialize.h             |    3 +-
 8 files changed, 49 insertions(+), 242 deletions(-)

diffs (truncated from 598 to 300 lines):

diff -r b6ce01f06efc -r b463eb716f0f sys/arch/mips/rmi/rmixl_cpu.c
--- a/sys/arch/mips/rmi/rmixl_cpu.c     Tue Dec 03 05:01:58 2019 +0000
+++ b/sys/arch/mips/rmi/rmixl_cpu.c     Tue Dec 03 05:07:48 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: rmixl_cpu.c,v 1.10 2019/12/01 15:34:45 ad Exp $        */
+/*     $NetBSD: rmixl_cpu.c,v 1.11 2019/12/03 05:07:48 riastradh Exp $ */
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -38,7 +38,7 @@
 #include "locators.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.10 2019/12/01 15:34:45 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.11 2019/12/03 05:07:48 riastradh Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_ddb.h"
@@ -414,8 +414,6 @@
 {
        printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted);
        printf("cpu_callout %p\n", dp->cpu_callout);
-       printf("cpu_unused1 %p\n", dp->cpu_unused1);
-       printf("cpu_unused2 %d\n", dp->cpu_unused2);
        printf("&cpu_schedstate %p\n", &dp->cpu_schedstate);    /* TBD */
        printf("&cpu_xcall %p\n", &dp->cpu_xcall);              /* TBD */
        printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending);
@@ -423,9 +421,7 @@
        printf("cpu_lockstat %p\n", dp->cpu_lockstat);
        printf("cpu_index %d\n", dp->cpu_index);
        printf("cpu_biglock_count %d\n", dp->cpu_biglock_count);
-       printf("cpu_spin_locks %d\n", dp->cpu_spin_locks);
-       printf("cpu_simple_locks %d\n", dp->cpu_simple_locks);
-       printf("cpu_spin_locks2 %d\n", dp->cpu_spin_locks2);
+       printf("cpu_psz_read_depth %d\n", dp->cpu_psz_read_depth);
        printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse);
        printf("cpu_softints %d\n", dp->cpu_softints);
        printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall);
diff -r b6ce01f06efc -r b463eb716f0f sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Tue Dec 03 05:01:58 2019 +0000
+++ b/sys/kern/kern_lwp.c       Tue Dec 03 05:07:48 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.215 2019/12/01 15:27:58 ad Exp $        */
+/*     $NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -209,7 +209,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.215 2019/12/01 15:27:58 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -225,7 +225,6 @@
 #include <sys/syscallargs.h>
 #include <sys/syscall_stats.h>
 #include <sys/kauth.h>
-#include <sys/pserialize.h>
 #include <sys/sleepq.h>
 #include <sys/lockdebug.h>
 #include <sys/kmem.h>
@@ -1036,9 +1035,6 @@
                pmap_activate(new_lwp);
        spl0();
 
-       /* Note trip through cpu_switchto(). */
-       pserialize_switchpoint();
-
        LOCKDEBUG_BARRIER(NULL, 0);
        KPREEMPT_ENABLE(new_lwp);
        if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
diff -r b6ce01f06efc -r b463eb716f0f sys/kern/kern_softint.c
--- a/sys/kern/kern_softint.c   Tue Dec 03 05:01:58 2019 +0000
+++ b/sys/kern/kern_softint.c   Tue Dec 03 05:07:48 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_softint.c,v 1.52 2019/12/01 15:34:46 ad Exp $     */
+/*     $NetBSD: kern_softint.c,v 1.53 2019/12/03 05:07:48 riastradh Exp $      */
 
 /*-
  * Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.52 2019/12/01 15:34:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.53 2019/12/03 05:07:48 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -182,7 +182,6 @@
 #include <sys/evcnt.h>
 #include <sys/cpu.h>
 #include <sys/xcall.h>
-#include <sys/pserialize.h>
 
 #include <net/netisr.h>
 
@@ -884,9 +883,6 @@
                l->l_pflag &= ~LP_TIMEINTR;
        }
 
-       /* Indicate a soft-interrupt switch. */
-       pserialize_switchpoint();
-
        /*
         * If we blocked while handling the interrupt, the pinned LWP is
         * gone so switch to the idle LWP.  It will select a new LWP to
diff -r b6ce01f06efc -r b463eb716f0f sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c     Tue Dec 03 05:01:58 2019 +0000
+++ b/sys/kern/kern_synch.c     Tue Dec 03 05:07:48 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_synch.c,v 1.327 2019/12/01 15:34:46 ad Exp $      */
+/*     $NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $       */
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.327 2019/12/01 15:34:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -743,9 +743,6 @@
                        l->l_lwpctl->lc_pctr++;
                }
 
-               /* Note trip through cpu_switchto(). */
-               pserialize_switchpoint();
-
                KASSERT(l->l_cpu == ci);
                splx(oldspl);
                /*
@@ -755,7 +752,6 @@
                retval = 1;
        } else {
                /* Nothing to do - just unlock and return. */
-               pserialize_switchpoint();
                mutex_spin_exit(spc->spc_mutex);
                l->l_pflag &= ~LP_PREEMPTING;
                lwp_unlock(l);
diff -r b6ce01f06efc -r b463eb716f0f sys/kern/subr_pserialize.c
--- a/sys/kern/subr_pserialize.c        Tue Dec 03 05:01:58 2019 +0000
+++ b/sys/kern/subr_pserialize.c        Tue Dec 03 05:07:48 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr_pserialize.c,v 1.13 2019/10/06 15:11:17 uwe Exp $ */
+/*     $NetBSD: subr_pserialize.c,v 1.14 2019/12/03 05:07:49 riastradh Exp $   */
 
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
@@ -28,58 +28,26 @@
 
 /*
  * Passive serialization.
- *
- * Implementation accurately matches the lapsed US patent 4809168, therefore
- * code is patent-free in the United States.  Your use of this code is at
- * your own risk.
- * 
- * Note for NetBSD developers: all changes to this source file must be
- * approved by the <core>.
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.13 2019/10/06 15:11:17 uwe Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.14 2019/12/03 05:07:49 riastradh Exp $");
 
 #include <sys/param.h>
-
-#include <sys/condvar.h>
+#include <sys/atomic.h>
 #include <sys/cpu.h>
 #include <sys/evcnt.h>
 #include <sys/kmem.h>
-#include <sys/mutex.h>
 #include <sys/pserialize.h>
-#include <sys/proc.h>
-#include <sys/queue.h>
 #include <sys/xcall.h>
 
 struct pserialize {
-       TAILQ_ENTRY(pserialize) psz_chain;
        lwp_t *                 psz_owner;
-       kcpuset_t *             psz_target;
-       kcpuset_t *             psz_pass;
 };
 
-static u_int                   psz_work_todo   __cacheline_aligned;
-static kmutex_t                        psz_lock        __cacheline_aligned;
 static struct evcnt            psz_ev_excl     __cacheline_aligned;
 
 /*
- * As defined in "Method 1":
- *     q0: "0 MP checkpoints have occured".
- *     q1: "1 MP checkpoint has occured".
- *     q2: "2 MP checkpoints have occured".
- */
-static TAILQ_HEAD(, pserialize)        psz_queue0      __cacheline_aligned;
-static TAILQ_HEAD(, pserialize)        psz_queue1      __cacheline_aligned;
-static TAILQ_HEAD(, pserialize)        psz_queue2      __cacheline_aligned;
-
-#ifdef LOCKDEBUG
-#include <sys/percpu.h>
-
-static percpu_t                *psz_debug_nreads       __cacheline_aligned;
-#endif
-
-/*
  * pserialize_init:
  *
  *     Initialize passive serialization structures.
@@ -88,16 +56,8 @@
 pserialize_init(void)
 {
 
-       psz_work_todo = 0;
-       TAILQ_INIT(&psz_queue0);
-       TAILQ_INIT(&psz_queue1);
-       TAILQ_INIT(&psz_queue2);
-       mutex_init(&psz_lock, MUTEX_DEFAULT, IPL_SCHED);
        evcnt_attach_dynamic(&psz_ev_excl, EVCNT_TYPE_MISC, NULL,
            "pserialize", "exclusive access");
-#ifdef LOCKDEBUG
-       psz_debug_nreads = percpu_alloc(sizeof(uint32_t));
-#endif
 }
 
 /*
@@ -110,11 +70,7 @@
 {
        pserialize_t psz;
 
-       psz = kmem_zalloc(sizeof(struct pserialize), KM_SLEEP);
-       kcpuset_create(&psz->psz_target, true);
-       kcpuset_create(&psz->psz_pass, true);
-       psz->psz_owner = NULL;
-
+       psz = kmem_zalloc(sizeof(*psz), KM_SLEEP);
        return psz;
 }
 
@@ -128,25 +84,19 @@
 {
 
        KASSERT(psz->psz_owner == NULL);
-
-       kcpuset_destroy(psz->psz_target);
-       kcpuset_destroy(psz->psz_pass);
-       kmem_free(psz, sizeof(struct pserialize));
+       kmem_free(psz, sizeof(*psz));
 }
 
 /*
  * pserialize_perform:
  *
- *     Perform the write side of passive serialization.  The calling
- *     thread holds an exclusive lock on the data object(s) being updated.
- *     We wait until every processor in the system has made at least two
- *     passes through cpu_switchto().  The wait is made with the caller's
- *     update lock held, but is short term.
+ *     Perform the write side of passive serialization.  This operation
+ *     MUST be serialized at a caller level (e.g. with a mutex or by a
+ *     single-threaded use).
  */
 void
 pserialize_perform(pserialize_t psz)
 {
-       int n;
 
        KASSERT(!cpu_intr_p());
        KASSERT(!cpu_softintr_p());
@@ -155,46 +105,23 @@
                return;
        }
        KASSERT(psz->psz_owner == NULL);
-       KASSERT(ncpu > 0);
 
        if (__predict_false(mp_online == false)) {
                psz_ev_excl.ev_count++;
                return;
        }
 
-       /*
-        * Set up the object and put it onto the queue.  The lock
-        * activity here provides the necessary memory barrier to
-        * make the caller's data update completely visible to
-        * other processors.
-        */
        psz->psz_owner = curlwp;
-       kcpuset_copy(psz->psz_target, kcpuset_running);
-       kcpuset_zero(psz->psz_pass);
-
-       mutex_spin_enter(&psz_lock);
-       TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
-       psz_work_todo++;



Home | Main Index | Thread Index | Old Index