Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/libexec/ld.elf_so Fix membars around rtld internal mutex.



details:   https://anonhg.NetBSD.org/src/rev/4403cf127952
branches:  trunk
changeset: 337198:4403cf127952
user:      yamt <yamt%NetBSD.org@localhost>
date:      Mon Apr 06 09:34:15 2015 +0000

description:
Fix membars around rtld internal mutex.

This fixes the most of lockups i observed with Open vSwitch
on NetBSD/amd64.  ("most of" because it still occasionally
locks up because of other problems.  see PR/49816)

diffstat:

 libexec/ld.elf_so/rtld.c |  20 ++++++++++++--------
 1 files changed, 12 insertions(+), 8 deletions(-)

diffs (82 lines):

diff -r 2a18a670fdbd -r 4403cf127952 libexec/ld.elf_so/rtld.c
--- a/libexec/ld.elf_so/rtld.c  Mon Apr 06 08:39:23 2015 +0000
+++ b/libexec/ld.elf_so/rtld.c  Mon Apr 06 09:34:15 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: rtld.c,v 1.176 2015/04/04 18:51:57 joerg Exp $  */
+/*     $NetBSD: rtld.c,v 1.177 2015/04/06 09:34:15 yamt Exp $   */
 
 /*
  * Copyright 1996 John D. Polstra.
@@ -40,7 +40,7 @@
 
 #include <sys/cdefs.h>
 #ifndef lint
-__RCSID("$NetBSD: rtld.c,v 1.176 2015/04/04 18:51:57 joerg Exp $");
+__RCSID("$NetBSD: rtld.c,v 1.177 2015/04/06 09:34:15 yamt Exp $");
 #endif /* not lint */
 
 #include <sys/param.h>
@@ -1544,6 +1544,7 @@
                        /* Yes, so increment use counter */
                        if (atomic_cas_uint(&_rtld_mutex, cur, cur + 1) != cur)
                                continue;
+                       membar_enter();
                        return;
                }
                /*
@@ -1561,6 +1562,7 @@
                /*
                 * Check for race against _rtld_exclusive_exit before sleeping.
                 */
+               membar_sync();
                if ((_rtld_mutex & RTLD_EXCLUSIVE_MASK) ||
                    _rtld_waiter_exclusive)
                        _lwp_park(CLOCK_REALTIME, 0, NULL, 0,
@@ -1588,12 +1590,12 @@
         * Wakeup LWPs waiting for an exclusive lock if this is the last
         * LWP on the shared lock.
         */
+       membar_exit();
        if (atomic_dec_uint_nv(&_rtld_mutex))
                return;
+       membar_sync();
        if ((waiter = _rtld_waiter_exclusive) != 0)
                _lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
-
-       membar_exit();
 }
 
 void
@@ -1608,12 +1610,13 @@
        sigdelset(&blockmask, SIGTRAP); /* Allow the debugger */
        sigprocmask(SIG_BLOCK, &blockmask, mask);
 
-       membar_enter();
-
        for (;;) {
-               if (atomic_cas_uint(&_rtld_mutex, 0, locked_value) == 0)
+               if (atomic_cas_uint(&_rtld_mutex, 0, locked_value) == 0) {
+                       membar_enter();
                        break;
+               }
                waiter = atomic_swap_uint(&_rtld_waiter_exclusive, self);
+               membar_sync();
                cur = _rtld_mutex;
                if (cur == locked_value) {
                        _rtld_error("dead lock detected");
@@ -1633,13 +1636,14 @@
 {
        lwpid_t waiter;
 
+       membar_exit();
        _rtld_mutex = 0;
+       membar_sync();
        if ((waiter = _rtld_waiter_exclusive) != 0)
                _lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
 
        if ((waiter = _rtld_waiter_shared) != 0)
                _lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
 
-       membar_exit();
        sigprocmask(SIG_SETMASK, mask, NULL);
 }



Home | Main Index | Thread Index | Old Index