Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Restore xcall(9) fast path using atomic_load/store_*.



details:   https://anonhg.NetBSD.org/src/rev/c0e0e442ddfd
branches:  trunk
changeset: 846808:c0e0e442ddfd
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sun Dec 01 20:56:39 2019 +0000

description:
Restore xcall(9) fast path using atomic_load/store_*.

While here, fix a bug that was formerly in xcall(9): a missing
acquire operation in the xc_wait fast path so that all memory
operations in the xcall on remote CPUs will happen before any memory
operations on the issuing CPU after xc_wait returns.

All stores of xc->xc_donep are done with atomic_store_release so that
we can safely use atomic_load_acquire to read it outside the lock.
However, this fast path only works on platforms with cheap 64-bit
atomic load/store, so conditionalize it on __HAVE_ATOMIC64_LOADSTORE.
(Under the lock, no need for atomic loads since nobody else will be
issuing stores.)

For review, here's the relevant diff from the old version of the fast
path, from before it was removed and some other things changed in the
file:

diff --git a/sys/kern/subr_xcall.c b/sys/kern/subr_xcall.c
index 45a877aa90e0..b6bfb6455291 100644
--- a/sys/kern/subr_xcall.c
+++ b/sys/kern/subr_xcall.c
@@ -84,6 +84,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.27 2019/10/06 15:11:17 uwe Exp $");
 #include <sys/evcnt.h>
 #include <sys/kthread.h>
 #include <sys/cpu.h>
+#include <sys/atomic.h>

 #ifdef _RUMPKERNEL
 #include "rump_private.h"
@@ -334,10 +353,12 @@ xc_wait(uint64_t where)
                xc = &xc_low_pri;
        }

+#ifdef __HAVE_ATOMIC64_LOADSTORE
        /* Fast path, if already done. */
-       if (xc->xc_donep >= where) {
+       if (atomic_load_acquire(&xc->xc_donep) >= where) {
                return;
        }
+#endif

        /* Slow path: block until awoken. */
        mutex_enter(&xc->xc_lock);
@@ -422,7 +443,11 @@ xc_thread(void *cookie)
                (*func)(arg1, arg2);

                mutex_enter(&xc->xc_lock);
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+               atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
+#else
                xc->xc_donep++;
+#endif
        }
        /* NOTREACHED */
 }
@@ -462,7 +487,6 @@ xc__highpri_intr(void *dummy)
         * Lock-less fetch of function and its arguments.
         * Safe since it cannot change at this point.
         */
-       KASSERT(xc->xc_donep < xc->xc_headp);
        func = xc->xc_func;
        arg1 = xc->xc_arg1;
        arg2 = xc->xc_arg2;
@@ -475,7 +499,13 @@ xc__highpri_intr(void *dummy)
         * cross-call has been processed - notify waiters, if any.
         */
        mutex_enter(&xc->xc_lock);
-       if (++xc->xc_donep == xc->xc_headp) {
+       KASSERT(xc->xc_donep < xc->xc_headp);
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+       atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
+#else
+       xc->xc_donep++;
+#endif
+       if (xc->xc_donep == xc->xc_headp) {
                cv_broadcast(&xc->xc_busy);
        }
        mutex_exit(&xc->xc_lock);

diffstat:

 sys/kern/subr_xcall.c |  24 ++++++++++++++++++++----
 1 files changed, 20 insertions(+), 4 deletions(-)

diffs (60 lines):

diff -r b97f77d85984 -r c0e0e442ddfd sys/kern/subr_xcall.c
--- a/sys/kern/subr_xcall.c     Sun Dec 01 20:31:40 2019 +0000
+++ b/sys/kern/subr_xcall.c     Sun Dec 01 20:56:39 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr_xcall.c,v 1.31 2019/12/01 17:06:00 ad Exp $       */
+/*     $NetBSD: subr_xcall.c,v 1.32 2019/12/01 20:56:39 riastradh Exp $        */
 
 /*-
  * Copyright (c) 2007-2010, 2019 The NetBSD Foundation, Inc.
@@ -74,7 +74,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.31 2019/12/01 17:06:00 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.32 2019/12/01 20:56:39 riastradh Exp $");
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -353,7 +353,14 @@
                xc = &xc_low_pri;
        }
 
-       /* Block until awoken. */
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+       /* Fast path, if already done. */
+       if (atomic_load_acquire(&xc->xc_donep) >= where) {
+               return;
+       }
+#endif
+
+       /* Slow path: block until awoken. */
        mutex_enter(&xc->xc_lock);
        while (xc->xc_donep < where) {
                cv_wait(&xc->xc_busy, &xc->xc_lock);
@@ -436,7 +443,11 @@
                (*func)(arg1, arg2);
 
                mutex_enter(&xc->xc_lock);
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+               atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
+#else
                xc->xc_donep++;
+#endif
        }
        /* NOTREACHED */
 }
@@ -489,7 +500,12 @@
         */
        mutex_enter(&xc->xc_lock);
        KASSERT(xc->xc_donep < xc->xc_headp);
-       if (++xc->xc_donep == xc->xc_headp) {
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+       atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
+#else
+       xc->xc_donep++;
+#endif
+       if (xc->xc_donep == xc->xc_headp) {
                cv_broadcast(&xc->xc_busy);
        }
        mutex_exit(&xc->xc_lock);



Home | Main Index | Thread Index | Old Index