Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/external/bsd/libc++/dist/libcxxrt/src Import revision c61efa...



details:   https://anonhg.NetBSD.org/src/rev/477d3bb928a4
branches:  trunk
changeset: 787816:477d3bb928a4
user:      joerg <joerg%NetBSD.org@localhost>
date:      Thu Jul 04 22:29:29 2013 +0000

description:
Import revision c61efa043b14378efbd69c9a2686d44ed46ae179 of libcxxrt.
This adds __cxa_deleted_virtual, some explicit store barriers before
lock releases and fixes ARM guard variable initialisation.

diffstat:

 external/bsd/libc++/dist/libcxxrt/src/atomic.h     |    1 +
 external/bsd/libc++/dist/libcxxrt/src/auxhelper.cc |   10 +
 external/bsd/libc++/dist/libcxxrt/src/dwarf_eh.h   |    4 +-
 external/bsd/libc++/dist/libcxxrt/src/exception.cc |   18 ++
 external/bsd/libc++/dist/libcxxrt/src/guard.cc     |  168 +++++++++-----------
 external/bsd/libc++/dist/libcxxrt/src/memory.cc    |   58 +++---
 6 files changed, 138 insertions(+), 121 deletions(-)

diffs (truncated from 367 to 300 lines):

diff -r 7edf9e5fda63 -r 477d3bb928a4 external/bsd/libc++/dist/libcxxrt/src/atomic.h
--- a/external/bsd/libc++/dist/libcxxrt/src/atomic.h    Thu Jul 04 22:20:51 2013 +0000
+++ b/external/bsd/libc++/dist/libcxxrt/src/atomic.h    Thu Jul 04 22:29:29 2013 +0000
@@ -27,3 +27,4 @@
 #define ATOMIC_LOAD(addr)\
        (__sync_synchronize(), *addr)
 #endif
+
diff -r 7edf9e5fda63 -r 477d3bb928a4 external/bsd/libc++/dist/libcxxrt/src/auxhelper.cc
--- a/external/bsd/libc++/dist/libcxxrt/src/auxhelper.cc        Thu Jul 04 22:20:51 2013 +0000
+++ b/external/bsd/libc++/dist/libcxxrt/src/auxhelper.cc        Thu Jul 04 22:29:29 2013 +0000
@@ -65,3 +65,13 @@
     abort();
 }
 
+/**
+ * Compilers may (but are not required to) set any deleted-virtual function's
+ * vtable entry to this function.  This makes debugging slightly easier, as
+ * users can add a breakpoint on this function to tell if they've accidentally
+ * called a deleted-virtual function.
+ */
+extern "C" void __cxa_deleted_virtual()
+{
+    abort();
+}
diff -r 7edf9e5fda63 -r 477d3bb928a4 external/bsd/libc++/dist/libcxxrt/src/dwarf_eh.h
--- a/external/bsd/libc++/dist/libcxxrt/src/dwarf_eh.h  Thu Jul 04 22:20:51 2013 +0000
+++ b/external/bsd/libc++/dist/libcxxrt/src/dwarf_eh.h  Thu Jul 04 22:29:29 2013 +0000
@@ -57,6 +57,8 @@
 /// DWARF data encoding types.  
 enum dwarf_data_encoding
 {
+       /// Absolute pointer value
+       DW_EH_PE_absptr   = 0x00,
        /// Unsigned, little-endian, base 128-encoded (variable length).
        DW_EH_PE_uleb128 = 0x01,
        /// Unsigned 16-bit integer.
@@ -95,8 +97,6 @@
 {
        /// Value is omitted
        DW_EH_PE_omit     = 0xff,
-       /// Absolute pointer value
-       DW_EH_PE_absptr   = 0x00,
        /// Value relative to program counter
        DW_EH_PE_pcrel    = 0x10,
        /// Value relative to the text segment
diff -r 7edf9e5fda63 -r 477d3bb928a4 external/bsd/libc++/dist/libcxxrt/src/exception.cc
--- a/external/bsd/libc++/dist/libcxxrt/src/exception.cc        Thu Jul 04 22:20:51 2013 +0000
+++ b/external/bsd/libc++/dist/libcxxrt/src/exception.cc        Thu Jul 04 22:29:29 2013 +0000
@@ -39,6 +39,24 @@
 #pragma weak pthread_setspecific
 #pragma weak pthread_getspecific
 #pragma weak pthread_once
+#ifdef LIBCXXRT_WEAK_LOCKS
+#pragma weak pthread_mutex_lock
+#define pthread_mutex_lock(mtx) do {\
+       if (pthread_mutex_lock) pthread_mutex_lock(mtx);\
+       } while(0)
+#pragma weak pthread_mutex_unlock
+#define pthread_mutex_unlock(mtx) do {\
+       if (pthread_mutex_unlock) pthread_mutex_unlock(mtx);\
+       } while(0)
+#pragma weak pthread_cond_signal
+#define pthread_cond_signal(cv) do {\
+       if (pthread_cond_signal) pthread_cond_signal(cv);\
+       } while(0)
+#pragma weak pthread_cond_wait
+#define pthread_cond_wait(cv, mtx) do {\
+       if (pthread_cond_wait) pthread_cond_wait(cv, mtx);\
+       } while(0)
+#endif
 
 using namespace ABI_NAMESPACE;
 
diff -r 7edf9e5fda63 -r 477d3bb928a4 external/bsd/libc++/dist/libcxxrt/src/guard.cc
--- a/external/bsd/libc++/dist/libcxxrt/src/guard.cc    Thu Jul 04 22:20:51 2013 +0000
+++ b/external/bsd/libc++/dist/libcxxrt/src/guard.cc    Thu Jul 04 22:29:29 2013 +0000
@@ -41,37 +41,90 @@
  * initialised.  
  */
 #include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
 #include <pthread.h>
 #include <assert.h>
+#include "atomic.h"
 
+// Older GCC doesn't define __LITTLE_ENDIAN__
+#ifndef __LITTLE_ENDIAN__
+       // If __BYTE_ORDER__ is defined, use that instead
+#      ifdef __BYTE_ORDER__
+#              if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#                      define __LITTLE_ENDIAN__
+#              endif
+       // x86 and ARM are the most common little-endian CPUs, so let's have a
+       // special case for them (ARM is already special cased).  Assume everything
+       // else is big endian.
+#      elif defined(__x86_64) || defined(__i386)
+#              define __LITTLE_ENDIAN__
+#      endif
+#endif
+
+
+/*
+ * The least significant bit of the guard variable indicates that the object
+ * has been initialised, the most significant bit is used for a spinlock.
+ */
 #ifdef __arm__
 // ARM ABI - 32-bit guards.
+typedef uint32_t guard_t;
+static const uint32_t LOCKED = ((guard_t)1) << 31;
+static const uint32_t INITIALISED = 1;
+#else
+typedef uint64_t guard_t;
+#      if defined(__LITTLE_ENDIAN__)
+static const guard_t LOCKED = ((guard_t)1) << 63;
+static const guard_t INITIALISED = 1;
+#      else
+static const guard_t LOCKED = 1;
+static const guard_t INITIALISED = ((guard_t)1) << 56;
+#      endif
+#endif
 
 /**
  * Acquires a lock on a guard, returning 0 if the object has already been
  * initialised, and 1 if it has not.  If the object is already constructed then
  * this function just needs to read a byte from memory and return.
  */
-extern "C" int __cxa_guard_acquire(volatile int32_t *guard_object)
+extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
 {
-       if ((1<<31) == *guard_object) { return 0; }
-       // If we can atomically move the value from 0 -> 1, then this is
-       // uninitialised.
-       if (__sync_bool_compare_and_swap(guard_object, 0, 1))
+       // Not an atomic read, doesn't establish a happens-before relationship, but
+       // if one is already established and we end up seeing an initialised state
+       // then it's a fast path, otherwise we'll do something more expensive than
+       // this test anyway...
+       if ((INITIALISED == *guard_object)) { return 0; }
+       // Spin trying to do the initialisation
+       while (1)
        {
-               return 1;
+               // Loop trying to move the value of the guard from 0 (not
+               // locked, not initialised) to the locked-uninitialised
+               // position.
+               switch (__sync_val_compare_and_swap(guard_object, 0, LOCKED))
+               {
+                       // If the old value was 0, we succeeded, so continue
+                       // initialising
+                       case 0:
+                               return 1;
+                       // If this was already initialised, return and let the caller skip
+                       // initialising it again.
+                       case INITIALISED:
+                               return 0;
+                       // If it is locked by another thread, relinquish the CPU and try
+                       // again later.
+                       case LOCKED:
+                       case LOCKED | INITIALISED:
+                               sched_yield();
+                               break;
+                       // If it is some other value, then something has gone badly wrong.
+                       // Give up.
+                       default:
+                               fprintf(stderr, "Invalid state detected attempting to lock static initialiser.\n");
+                               abort();
+               }
        }
-       // If the value is not 0, some other thread was initialising this.  Spin
-       // until it's finished.
-       while (__sync_bool_compare_and_swap(guard_object, (1<<31), (1<<31)))
-       {
-               // If the other thread aborted, then we grab the lock
-               if (__sync_bool_compare_and_swap(guard_object, 0, 1))
-               {
-                       return 1;
-               }
-               sched_yield();
-       }
+       //__builtin_unreachable();
        return 0;
 }
 
@@ -79,86 +132,21 @@
  * Releases the lock without marking the object as initialised.  This function
  * is called if initialising a static causes an exception to be thrown.
  */
-extern "C" void __cxa_guard_abort(int32_t *guard_object)
+extern "C" void __cxa_guard_abort(volatile guard_t *guard_object)
 {
-       assert(__sync_bool_compare_and_swap(guard_object, 1, 0));
+       __attribute__((unused))
+       bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, 0);
+       assert(reset);
 }
 /**
  * Releases the guard and marks the object as initialised.  This function is
  * called after successful initialisation of a static.
  */
-extern "C" void __cxa_guard_release(int32_t *guard_object)
+extern "C" void __cxa_guard_release(volatile guard_t *guard_object)
 {
-       assert(__sync_bool_compare_and_swap(guard_object, 1, (1<<31)));
+       __attribute__((unused))
+       bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, INITIALISED);
+       assert(reset);
 }
 
 
-#else
-// Itanium ABI: 64-bit guards
-
-/**
- * Returns a pointer to the low 32 bits in a 64-bit value, respecting the
- * platform's byte order.
- */
-static int32_t *low_32_bits(volatile int64_t *ptr)
-{
-       int32_t *low= (int32_t*)ptr;
-       // Test if the machine is big endian - constant propagation at compile time
-       // should eliminate this completely.
-       int one = 1;
-       if (*(char*)&one != 1)
-       {
-               low++;
-       }
-       return low;
-}
-
-/**
- * Acquires a lock on a guard, returning 0 if the object has already been
- * initialised, and 1 if it has not.  If the object is already constructed then
- * this function just needs to read a byte from memory and return.
- */
-extern "C" int __cxa_guard_acquire(volatile int64_t *guard_object)
-{
-       char first_byte = (*guard_object) >> 56;
-       if (1 == first_byte) { return 0; }
-       int32_t *lock = low_32_bits(guard_object);
-       // Simple spin lock using the low 32 bits.  We assume that concurrent
-       // attempts to initialize statics are very rare, so we don't need to
-       // optimise for the case where we have lots of threads trying to acquire
-       // the lock at the same time.
-       while (!__sync_bool_compare_and_swap_4(lock, 0, 1))
-       {
-               if (1 == ((*guard_object) >> 56))
-               {
-                       break;
-               }
-               sched_yield();
-       }
-       // We have to test the guard again, in case another thread has performed
-       // the initialisation while we were trying to acquire the lock.
-       first_byte = (*guard_object) >> 56;
-       return (1 != first_byte);
-}
-
-/**
- * Releases the lock without marking the object as initialised.  This function
- * is called if initialising a static causes an exception to be thrown.
- */
-extern "C" void __cxa_guard_abort(int64_t *guard_object)
-{
-       int32_t *lock = low_32_bits(guard_object);
-       *lock = 0;
-}
-/**
- * Releases the guard and marks the object as initialised.  This function is
- * called after successful initialisation of a static.
- */
-extern "C" void __cxa_guard_release(int64_t *guard_object)
-{
-       // Set the first byte to 1
-       *guard_object |= ((int64_t)1) << 56;
-       __cxa_guard_abort(guard_object);
-}
-
-#endif
diff -r 7edf9e5fda63 -r 477d3bb928a4 external/bsd/libc++/dist/libcxxrt/src/memory.cc
--- a/external/bsd/libc++/dist/libcxxrt/src/memory.cc   Thu Jul 04 22:20:51 2013 +0000
+++ b/external/bsd/libc++/dist/libcxxrt/src/memory.cc   Thu Jul 04 22:29:29 2013 +0000
@@ -99,40 +99,21 @@
 __attribute__((weak))
 void* operator new(size_t size, const std::nothrow_t &) throw()
 {
-       if (0 == size)
-       {
-               size = 1;
+       try {
+               return :: operator new(size);
+       } catch (...) {
+               // nothrow operator new should return NULL in case of
+               // std::bad_alloc exception in new handler
+               return NULL;
        }
-       void *mem = malloc(size);
-       while (0 == mem)
-       {



Home | Main Index | Thread Index | Old Index