Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys unix(4): Convert membar_exit to membar_release.



details:   https://anonhg.NetBSD.org/src/rev/44b96b1a9ca6
branches:  trunk
changeset: 365176:44b96b1a9ca6
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sat Apr 09 23:52:22 2022 +0000

description:
unix(4): Convert membar_exit to membar_release.

Use atomic_load_consume or atomic_load_relaxed where necessary.

Comment on why unlocked nonatomic access is valid where it is done.

diffstat:

 sys/kern/uipc_socket.c  |   8 ++++++--
 sys/kern/uipc_socket2.c |  29 +++++++++++++++++++++--------
 sys/kern/uipc_usrreq.c  |  11 ++++++++---
 sys/sys/socketvar.h     |   9 +++++----
 4 files changed, 40 insertions(+), 17 deletions(-)

diffs (201 lines):

diff -r d6cbfab0148a -r 44b96b1a9ca6 sys/kern/uipc_socket.c
--- a/sys/kern/uipc_socket.c    Sat Apr 09 23:52:05 2022 +0000
+++ b/sys/kern/uipc_socket.c    Sat Apr 09 23:52:22 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uipc_socket.c,v 1.301 2022/03/12 16:06:15 riastradh Exp $      */
+/*     $NetBSD: uipc_socket.c,v 1.302 2022/04/09 23:52:22 riastradh Exp $      */
 
 /*
  * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.301 2022/03/12 16:06:15 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.302 2022/04/09 23:52:22 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_compat_netbsd.h"
@@ -539,6 +539,10 @@
         * the lock with another socket, e.g. socketpair(2) case.
         */
        if (lockso) {
+               /*
+                * lockso->so_lock should be stable at this point, so
+                * no need for atomic_load_*.
+                */
                lock = lockso->so_lock;
                so->so_lock = lock;
                mutex_obj_hold(lock);
diff -r d6cbfab0148a -r 44b96b1a9ca6 sys/kern/uipc_socket2.c
--- a/sys/kern/uipc_socket2.c   Sat Apr 09 23:52:05 2022 +0000
+++ b/sys/kern/uipc_socket2.c   Sat Apr 09 23:52:22 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uipc_socket2.c,v 1.140 2021/10/02 02:07:41 thorpej Exp $       */
+/*     $NetBSD: uipc_socket2.c,v 1.141 2022/04/09 23:52:23 riastradh Exp $     */
 
 /*-
  * Copyright (c) 2008 The NetBSD Foundation, Inc.
@@ -58,7 +58,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.140 2021/10/02 02:07:41 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.141 2022/04/09 23:52:23 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -134,7 +134,7 @@
  *
  * o In order to mutate so_lock, the lock pointed to by the current value
  *   of so_lock must be held: i.e., the socket must be held locked by the
- *   changing thread.  The thread must issue membar_exit() to prevent
+ *   changing thread.  The thread must issue membar_release() to prevent
  *   memory accesses being reordered, and can set so_lock to the desired
  *   value.  If the lock pointed to by the new value of so_lock is not
  *   held by the changing thread, the socket must then be considered
@@ -324,6 +324,9 @@
        /*
         * Share the lock with the listening-socket, it may get unshared
         * once the connection is complete.
+        *
+        * so_lock is stable while we hold the socket locked, so no
+        * need for atomic_load_* here.
         */
        mutex_obj_hold(head->so_lock);
        so->so_lock = head->so_lock;
@@ -537,7 +540,7 @@
                error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
        else
                error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
-       if (__predict_false(lock != so->so_lock))
+       if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
                solockretry(so, lock);
        return error;
 }
@@ -589,6 +592,8 @@
  * Reset a socket's lock pointer.  Wake all threads waiting on the
  * socket's condition variables so that they can restart their waits
  * using the new lock.  The existing lock must be held.
+ *
+ * Caller must have issued membar_release before this.
  */
 void
 solockreset(struct socket *so, kmutex_t *lock)
@@ -1464,9 +1469,9 @@
 solockretry(struct socket *so, kmutex_t *lock)
 {
 
-       while (lock != so->so_lock) {
+       while (lock != atomic_load_relaxed(&so->so_lock)) {
                mutex_exit(lock);
-               lock = so->so_lock;
+               lock = atomic_load_consume(&so->so_lock);
                mutex_enter(lock);
        }
 }
@@ -1475,6 +1480,10 @@
 solocked(const struct socket *so)
 {
 
+       /*
+        * Used only for diagnostic assertions, so so_lock should be
+        * stable at this point, hence on need for atomic_load_*.
+        */
        return mutex_owned(so->so_lock);
 }
 
@@ -1483,6 +1492,10 @@
 {
        const kmutex_t *lock;
 
+       /*
+        * Used only for diagnostic assertions, so so_lock should be
+        * stable at this point, hence on need for atomic_load_*.
+        */
        lock = so1->so_lock;
        if (lock != so2->so_lock)
                return false;
@@ -1533,7 +1546,7 @@
                        error = 0;
                } else
                        error = cv_wait_sig(&so->so_cv, lock);
-               if (__predict_false(lock != so->so_lock))
+               if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
                        solockretry(so, lock);
                if (error != 0)
                        return error;
@@ -1568,7 +1581,7 @@
                error = cv_timedwait_sig(&so->so_cv, lock, timo);
        else
                error = cv_timedwait(&so->so_cv, lock, timo);
-       if (__predict_false(lock != so->so_lock))
+       if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
                solockretry(so, lock);
        return error;
 }
diff -r d6cbfab0148a -r 44b96b1a9ca6 sys/kern/uipc_usrreq.c
--- a/sys/kern/uipc_usrreq.c    Sat Apr 09 23:52:05 2022 +0000
+++ b/sys/kern/uipc_usrreq.c    Sat Apr 09 23:52:22 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uipc_usrreq.c,v 1.201 2021/08/08 20:54:48 nia Exp $    */
+/*     $NetBSD: uipc_usrreq.c,v 1.202 2022/04/09 23:52:23 riastradh Exp $      */
 
 /*-
  * Copyright (c) 1998, 2000, 2004, 2008, 2009, 2020 The NetBSD Foundation, Inc.
@@ -96,7 +96,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.201 2021/08/08 20:54:48 nia Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.202 2022/04/09 23:52:23 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_compat_netbsd.h"
@@ -291,7 +291,12 @@
        lock = unp->unp_streamlock;
        unp->unp_streamlock = NULL;
        mutex_obj_hold(lock);
-       membar_exit();
+       /*
+        * Ensure lock is initialized before publishing it with
+        * solockreset.  Pairs with atomic_load_consume in solock and
+        * various loops to reacquire lock after wakeup.
+        */
+       membar_release();
        /*
         * possible race if lock is not held - see comment in
         * uipc_usrreq(PRU_ACCEPT).
diff -r d6cbfab0148a -r 44b96b1a9ca6 sys/sys/socketvar.h
--- a/sys/sys/socketvar.h       Sat Apr 09 23:52:05 2022 +0000
+++ b/sys/sys/socketvar.h       Sat Apr 09 23:52:22 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: socketvar.h,v 1.164 2021/10/23 01:28:33 thorpej Exp $  */
+/*     $NetBSD: socketvar.h,v 1.165 2022/04/09 23:52:23 riastradh Exp $        */
 
 /*-
  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
@@ -74,6 +74,7 @@
 struct lwp;
 struct uidinfo;
 #else
+#include <sys/atomic.h>
 #include <sys/uidinfo.h>
 #endif
 
@@ -520,12 +521,12 @@
 {
        kmutex_t *lock;
 
-       lock = so->so_lock;
+       lock = atomic_load_consume(&so->so_lock);
        mutex_enter(lock);
-       if (__predict_false(lock != so->so_lock))
+       if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
                solockretry(so, lock);
 }
-       
+
 static __inline void
 sounlock(struct socket *so)
 {



Home | Main Index | Thread Index | Old Index