Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/external/bsd/drm2/include/linux Attempt to match Linux s...



details:   https://anonhg.NetBSD.org/src/rev/28dcd1ca157d
branches:  trunk
changeset: 366173:28dcd1ca157d
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Mon Aug 27 13:41:08 2018 +0000

description:
Attempt to match Linux semantics for membars implied by atomics.

This is kind of moot at the moment because we're mostly x86-only for
drmkms, but this might help in the future if we ever went beyond x86.

diffstat:

 sys/external/bsd/drm2/include/linux/atomic.h |  137 +++++++++++++++++++++-----
 1 files changed, 108 insertions(+), 29 deletions(-)

diffs (truncated from 336 to 300 lines):

diff -r 06ef707c8656 -r 28dcd1ca157d sys/external/bsd/drm2/include/linux/atomic.h
--- a/sys/external/bsd/drm2/include/linux/atomic.h      Mon Aug 27 13:40:53 2018 +0000
+++ b/sys/external/bsd/drm2/include/linux/atomic.h      Mon Aug 27 13:41:08 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: atomic.h,v 1.12 2018/08/27 13:40:53 riastradh Exp $    */
+/*     $NetBSD: atomic.h,v 1.13 2018/08/27 13:41:08 riastradh Exp $    */
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -36,6 +36,22 @@
 
 #include <machine/limits.h>
 
+#if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
+#  define      smp_mb__before_atomic()         membar_exit()
+#  define      smp_mb__after_atomic()          membar_enter()
+#else
+#  define      smp_mb__before_atomic()         __insn_barrier()
+#  define      smp_mb__after_atomic()          __insn_barrier()
+#endif
+
+/*
+ * atomic (u)int operations
+ *
+ *     Atomics that return a value, other than atomic_read, imply a
+ *     full memory_sync barrier.  Those that do not return a value
+ *     imply no memory barrier.
+ */
+
 struct atomic {
        union {
                volatile int au_int;
@@ -50,78 +66,106 @@
 static inline int
 atomic_read(atomic_t *atomic)
 {
+       /* no membar */
        return atomic->a_u.au_int;
 }
 
 static inline void
 atomic_set(atomic_t *atomic, int value)
 {
+       /* no membar */
        atomic->a_u.au_int = value;
 }
 
 static inline void
 atomic_add(int addend, atomic_t *atomic)
 {
+       /* no membar */
        atomic_add_int(&atomic->a_u.au_uint, addend);
 }
 
 static inline void
 atomic_sub(int subtrahend, atomic_t *atomic)
 {
+       /* no membar */
        atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
 }
 
 static inline int
 atomic_add_return(int addend, atomic_t *atomic)
 {
-       return (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
+       int v;
+
+       smp_mb__before_atomic();
+       v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
+       smp_mb__after_atomic();
+
+       return v;
 }
 
 static inline void
 atomic_inc(atomic_t *atomic)
 {
+       /* no membar */
        atomic_inc_uint(&atomic->a_u.au_uint);
 }
 
 static inline void
 atomic_dec(atomic_t *atomic)
 {
+       /* no membar */
        atomic_dec_uint(&atomic->a_u.au_uint);
 }
 
 static inline int
 atomic_inc_return(atomic_t *atomic)
 {
-       return (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
+       int v;
+
+       smp_mb__before_atomic();
+       v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
+       smp_mb__after_atomic();
+
+       return v;
 }
 
 static inline int
 atomic_dec_return(atomic_t *atomic)
 {
-       return (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
+       int v;
+
+       smp_mb__before_atomic();
+       v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
+       smp_mb__after_atomic();
+
+       return v;
 }
 
 static inline int
 atomic_dec_and_test(atomic_t *atomic)
 {
-       return (0 == (int)atomic_dec_uint_nv(&atomic->a_u.au_uint));
+       /* membar implied by atomic_dec_return */
+       return atomic_dec_return(atomic) == 0;
 }
 
 static inline void
 atomic_or(int value, atomic_t *atomic)
 {
+       /* no membar */
        atomic_or_uint(&atomic->a_u.au_uint, value);
 }
 
 static inline void
 atomic_set_mask(unsigned long mask, atomic_t *atomic)
 {
+       /* no membar */
        atomic_or_uint(&atomic->a_u.au_uint, mask);
 }
 
 static inline void
 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
 {
+       /* no membar */
        atomic_and_uint(&atomic->a_u.au_uint, ~mask);
 }
 
@@ -130,33 +174,53 @@
 {
        int value;
 
+       smp_mb__before_atomic();
        do {
                value = atomic->a_u.au_int;
                if (value == zero)
-                       return 0;
+                       break;
        } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
            != value);
+       smp_mb__after_atomic();
 
-       return 1;
+       return value != zero;
 }
 
 static inline int
 atomic_inc_not_zero(atomic_t *atomic)
 {
+       /* membar implied by atomic_add_unless */
        return atomic_add_unless(atomic, 1, 0);
 }
 
 static inline int
 atomic_xchg(atomic_t *atomic, int new)
 {
-       return (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
+       int old;
+
+       smp_mb__before_atomic();
+       old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
+       smp_mb__after_atomic();
+
+       return old;
 }
 
 static inline int
-atomic_cmpxchg(atomic_t *atomic, int old, int new)
+atomic_cmpxchg(atomic_t *atomic, int expect, int new)
 {
-       return (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)old,
+       int old;
+
+       /*
+        * XXX As an optimization, under Linux's semantics we are
+        * allowed to skip the memory barrier if the comparison fails,
+        * but taking advantage of that is not convenient here.
+        */
+       smp_mb__before_atomic();
+       old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
            (unsigned)new);
+       smp_mb__after_atomic();
+
+       return old;
 }
 
 struct atomic64 {
@@ -168,37 +232,58 @@
 static inline uint64_t
 atomic64_read(const struct atomic64 *a)
 {
+       /* no membar */
        return a->a_v;
 }
 
 static inline void
 atomic64_set(struct atomic64 *a, uint64_t v)
 {
+       /* no membar */
        a->a_v = v;
 }
 
 static inline void
 atomic64_add(long long d, struct atomic64 *a)
 {
+       /* no membar */
        atomic_add_64(&a->a_v, d);
 }
 
 static inline void
 atomic64_sub(long long d, struct atomic64 *a)
 {
+       /* no membar */
        atomic_add_64(&a->a_v, -d);
 }
 
 static inline uint64_t
-atomic64_xchg(struct atomic64 *a, uint64_t v)
+atomic64_xchg(struct atomic64 *a, uint64_t new)
 {
-       return atomic_swap_64(&a->a_v, v);
+       uint64_t old;
+
+       smp_mb__before_atomic();
+       old = atomic_swap_64(&a->a_v, new);
+       smp_mb__after_atomic();
+
+       return old;
 }
 
 static inline uint64_t
-atomic64_cmpxchg(struct atomic64 *atomic, uint64_t old, uint64_t new)
+atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
 {
-       return atomic_cas_64(&atomic->a_v, old, new);
+       uint64_t old;
+
+       /*
+        * XXX As an optimization, under Linux's semantics we are
+        * allowed to skip the memory barrier if the comparison fails,
+        * but taking advantage of that is not convenient here.
+        */
+       smp_mb__before_atomic();
+       old = atomic_cas_64(&atomic->a_v, expect, new);
+       smp_mb__after_atomic();
+
+       return old;
 }
 
 static inline void
@@ -206,6 +291,7 @@
 {
        const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
 
+       /* no memory barrier */
        atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
 }
 
@@ -214,6 +300,7 @@
 {
        const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
 
+       /* no memory barrier */
        atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
 }
 
@@ -225,6 +312,7 @@
        const unsigned long mask = (1UL << (bit % units));
        unsigned long v;
 
+       /* no memory barrier */
        do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
 }
 
@@ -236,7 +324,9 @@
        const unsigned long mask = (1UL << (bit % units));
        unsigned long v;
 
+       smp_mb__before_atomic();
        do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
+       smp_mb__after_atomic();
 
        return ((v & mask) != 0);
 }
@@ -249,7 +339,9 @@



Home | Main Index | Thread Index | Old Index