Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys kern: Handle l_mutex with atomic_store_release, atomic_l...



details:   https://anonhg.NetBSD.org/src/rev/8f0730f151ce
branches:  trunk
changeset: 365167:8f0730f151ce
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sat Apr 09 23:45:36 2022 +0000

description:
kern: Handle l_mutex with atomic_store_release, atomic_load_consume.

- Where the lock is held and known to be correct, no atomic.
- In loops to acquire the lock, use atomic_load_relaxed before we
  restart with atomic_load_consume.

Nix membar_exit.

(Who knows, using atomic_load_consume here might fix bugs on Alpha!)

diffstat:

 sys/kern/kern_lwp.c       |  14 ++++++--------
 sys/kern/kern_turnstile.c |  11 ++++++-----
 sys/sys/lwp.h             |   9 +++++----
 3 files changed, 17 insertions(+), 17 deletions(-)

diffs (133 lines):

diff -r 2628c62afc9a -r 8f0730f151ce sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Sat Apr 09 23:45:23 2022 +0000
+++ b/sys/kern/kern_lwp.c       Sat Apr 09 23:45:36 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.247 2022/03/10 12:21:25 riastradh Exp $ */
+/*     $NetBSD: kern_lwp.c,v 1.248 2022/04/09 23:45:36 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
@@ -217,7 +217,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.247 2022/03/10 12:21:25 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.248 2022/04/09 23:45:36 riastradh Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -1565,8 +1565,7 @@
 
        KASSERT(mutex_owned(oldmtx));
 
-       membar_exit();
-       l->l_mutex = mtx;
+       atomic_store_release(&l->l_mutex, mtx);
        return oldmtx;
 }
 
@@ -1582,8 +1581,7 @@
        KASSERT(lwp_locked(l, NULL));
 
        old = l->l_mutex;
-       membar_exit();
-       l->l_mutex = mtx;
+       atomic_store_release(&l->l_mutex, mtx);
        mutex_spin_exit(old);
 }
 
@@ -1593,9 +1591,9 @@
        kmutex_t *old;
 
        for (;;) {
-               if (!mutex_tryenter(old = l->l_mutex))
+               if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex)))
                        return 0;
-               if (__predict_true(l->l_mutex == old))
+               if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old))
                        return 1;
                mutex_spin_exit(old);
        }
diff -r 2628c62afc9a -r 8f0730f151ce sys/kern/kern_turnstile.c
--- a/sys/kern/kern_turnstile.c Sat Apr 09 23:45:23 2022 +0000
+++ b/sys/kern/kern_turnstile.c Sat Apr 09 23:45:36 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_turnstile.c,v 1.41 2022/02/23 21:54:41 andvar Exp $       */
+/*     $NetBSD: kern_turnstile.c,v 1.42 2022/04/09 23:45:36 riastradh Exp $    */
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.41 2022/02/23 21:54:41 andvar Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.42 2022/04/09 23:45:36 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/lockdebug.h>
@@ -252,7 +252,7 @@
                 * Because we already have another LWP lock (l->l_mutex) held,
                 * we need to play a try lock dance to avoid deadlock.
                 */
-               dolock = l->l_mutex != owner->l_mutex;
+               dolock = l->l_mutex != atomic_load_relaxed(&owner->l_mutex);
                if (l == owner || (dolock && !lwp_trylock(owner))) {
                        /*
                         * The owner was changed behind us or trylock failed.
@@ -299,7 +299,7 @@
                l = owner;
        }
        LOCKDEBUG_BARRIER(l->l_mutex, 1);
-       if (cur->l_mutex != l->l_mutex) {
+       if (cur->l_mutex != atomic_load_relaxed(&l->l_mutex)) {
                lwp_unlock(l);
                lwp_lock(cur);
        }
@@ -322,7 +322,8 @@
 
        KASSERT(ts->ts_inheritor != NULL);
        ts->ts_inheritor = NULL;
-       dolock = l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock;
+       dolock = (atomic_load_relaxed(&l->l_mutex) ==
+           l->l_cpu->ci_schedstate.spc_lwplock);
        if (dolock) {
                lwp_lock(l);
        }
diff -r 2628c62afc9a -r 8f0730f151ce sys/sys/lwp.h
--- a/sys/sys/lwp.h     Sat Apr 09 23:45:23 2022 +0000
+++ b/sys/sys/lwp.h     Sat Apr 09 23:45:36 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: lwp.h,v 1.214 2022/04/09 13:38:15 riastradh Exp $      */
+/*     $NetBSD: lwp.h,v 1.215 2022/04/09 23:45:37 riastradh Exp $      */
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020
@@ -53,6 +53,7 @@
 /* forward declare this for <machine/cpu.h> so it can get l_cpu. */
 static __inline struct cpu_info *lwp_getcpu(struct lwp *);
 #include <machine/cpu.h>               /* curcpu() and cpu_info */
+#include <sys/atomic.h>
 #ifdef _KERNEL_OPT
 #include "opt_kcov.h"
 #include "opt_kmsan.h"
@@ -407,16 +408,16 @@
 static __inline void
 lwp_lock(lwp_t *l)
 {
-       kmutex_t *old = l->l_mutex;
+       kmutex_t *old = atomic_load_consume(&l->l_mutex);
 
        /*
         * Note: mutex_spin_enter() will have posted a read barrier.
         * Re-test l->l_mutex.  If it has changed, we need to try again.
         */
        mutex_spin_enter(old);
-       while (__predict_false(l->l_mutex != old)) {
+       while (__predict_false(atomic_load_relaxed(&l->l_mutex) != old)) {
                mutex_spin_exit(old);
-               old = l->l_mutex;
+               old = atomic_load_consume(&l->l_mutex);
                mutex_spin_enter(old);
        }
 }



Home | Main Index | Thread Index | Old Index