Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/sys sys/atomic.h: Fix atomic_store_* on sparcv7, sparcv8.
details: https://anonhg.NetBSD.org/src/rev/03b3fbfe0e52
branches: trunk
changeset: 368724:03b3fbfe0e52
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sat Jul 30 14:13:27 2022 +0000
description:
sys/atomic.h: Fix atomic_store_* on sparcv7, sparcv8.
These did not cooperate with the hash-locked scheme of the other
atomic operations, with the effect that, for instance, a typical
naive spin lock based on atomic_*,
volatile unsigned locked = 0;
lock()
{
while (atomic_swap_uint(&locked, 1))
continue;
membar_acquire();
}
unlock()
{
membar_release();
atomic_store_relaxed(&locked, 0);
}
would fail to achieve mutual exclusion.
For this case, we need to use atomic_swap_* (or, for 8- or 16-bit
objects, atomic_cas_32 loops, since there is no atomic_swap_8 or
atomic_swap_16).
The new machine/types.h macro __HAVE_HASHLOCKED_ATOMICS says whether
these contortions are necessary.
Note that this _requires_ the use of atomic_store_*(p, v), not
regular stores *p = v, to work with the r/m/w atomic operations.
diffstat:
sys/arch/sparc/include/types.h | 6 ++++-
sys/kern/subr_csan.c | 8 ++++-
sys/sys/atomic.h | 52 +++++++++++++++++++++++++++++++++++++++++-
3 files changed, 62 insertions(+), 4 deletions(-)
diffs (138 lines):
diff -r 4a740dddfcb3 -r 03b3fbfe0e52 sys/arch/sparc/include/types.h
--- a/sys/arch/sparc/include/types.h Sat Jul 30 14:11:00 2022 +0000
+++ b/sys/arch/sparc/include/types.h Sat Jul 30 14:13:27 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: types.h,v 1.71 2021/01/23 19:38:53 christos Exp $ */
+/* $NetBSD: types.h,v 1.72 2022/07/30 14:13:27 riastradh Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -48,6 +48,7 @@
#endif
#if defined(_KERNEL_OPT)
+#include "opt_multiprocessor.h"
#include "opt_sparc_arch.h"
#endif
@@ -135,6 +136,9 @@
#define __HAVE_FAST_SOFTINTS
#else
#define __HAVE_MM_MD_READWRITE
+#ifdef MULTIPROCESSOR
+#define __HAVE_HASHLOCKED_ATOMICS
+#endif
#endif
#define __HAVE_CPU_LWP_SETPRIVATE
diff -r 4a740dddfcb3 -r 03b3fbfe0e52 sys/kern/subr_csan.c
--- a/sys/kern/subr_csan.c Sat Jul 30 14:11:00 2022 +0000
+++ b/sys/kern/subr_csan.c Sat Jul 30 14:13:27 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_csan.c,v 1.13 2021/09/11 10:09:55 riastradh Exp $ */
+/* $NetBSD: subr_csan.c,v 1.14 2022/07/30 14:13:27 riastradh Exp $ */
/*
* Copyright (c) 2019-2020 Maxime Villard, m00nbsd.net
@@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_csan.c,v 1.13 2021/09/11 10:09:55 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_csan.c,v 1.14 2022/07/30 14:13:27 riastradh Exp $");
#include <sys/param.h>
#include <sys/device.h>
@@ -615,12 +615,16 @@
kcsan_atomic_store(volatile void *p, const void *v, int size)
{
kcsan_access((uintptr_t)p, size, true, true, __RET_ADDR);
+#ifdef __HAVE_HASHLOCKED_ATOMICS
+ __do_atomic_store(p, v, size);
+#else
switch (size) {
case 1: *(volatile uint8_t *)p = *(const uint8_t *)v; break;
case 2: *(volatile uint16_t *)p = *(const uint16_t *)v; break;
case 4: *(volatile uint32_t *)p = *(const uint32_t *)v; break;
case 8: *(volatile uint64_t *)p = *(const uint64_t *)v; break;
}
+#endif
}
/* -------------------------------------------------------------------------- */
diff -r 4a740dddfcb3 -r 03b3fbfe0e52 sys/sys/atomic.h
--- a/sys/sys/atomic.h Sat Jul 30 14:11:00 2022 +0000
+++ b/sys/sys/atomic.h Sat Jul 30 14:13:27 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: atomic.h,v 1.24 2022/04/09 23:34:30 riastradh Exp $ */
+/* $NetBSD: atomic.h,v 1.25 2022/07/30 14:13:27 riastradh Exp $ */
/*-
* Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
@@ -433,9 +433,14 @@
__typeof__(*(p)) v = *(p)
#define __END_ATOMIC_LOAD(v) \
v
+#ifdef __HAVE_HASHLOCKED_ATOMICS
+#define __DO_ATOMIC_STORE(p, v) \
+ __do_atomic_store(p, __UNVOLATILE(&v), sizeof(v))
+#else /* !__HAVE_HASHLOCKED_ATOMICS */
#define __DO_ATOMIC_STORE(p, v) \
*p = v
#endif
+#endif
#define atomic_load_relaxed(p) \
({ \
@@ -480,6 +485,51 @@
__DO_ATOMIC_STORE(__as_ptr, __as_val); \
})
+#ifdef __HAVE_HASHLOCKED_ATOMICS
+static void __inline __always_inline
+__do_atomic_store(volatile void *p, const void *q, size_t size)
+{
+ switch (size) {
+ case 1: {
+ uint8_t v;
+ unsigned s = 8 * ((uintptr_t)p & 3);
+ uint32_t o, n, m = ~(0xffU << s);
+ memcpy(&v, q, 1);
+ do {
+ o = atomic_load_relaxed((const volatile uint32_t *)p);
+ n = (o & m) | ((uint32_t)v << s);
+ } while (atomic_cas_32((volatile uint32_t *)p, o, n) != o);
+ break;
+ }
+ case 2: {
+ uint16_t v;
+ unsigned s = 8 * (((uintptr_t)p & 2) >> 1);
+ uint32_t o, n, m = ~(0xffffU << s);
+ memcpy(&v, q, 2);
+ do {
+ o = atomic_load_relaxed((const volatile uint32_t *)p);
+ n = (o & m) | ((uint32_t)v << s);
+ } while (atomic_cas_32((volatile uint32_t *)p, o, n) != o);
+ break;
+ }
+ case 4: {
+ uint32_t v;
+ memcpy(&v, q, 4);
+ (void)atomic_swap_32(p, v);
+ break;
+ }
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+ case 8: {
+ uint64_t v;
+ memcpy(&v, q, 8);
+ (void)atomic_swap_64(p, v);
+ break;
+ }
+#endif
+ }
+}
+#endif /* __HAVE_HASHLOCKED_ATOMICS */
+
#else /* __STDC_VERSION__ >= 201112L */
/* C11 definitions, not yet available */
Home |
Main Index |
Thread Index |
Old Index