Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/hppa/include hppa: __cpu_simple_lock membar audit.
details: https://anonhg.NetBSD.org/src/rev/3ffd8f223d94
branches: trunk
changeset: 361546:3ffd8f223d94
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sun Feb 13 14:06:51 2022 +0000
description:
hppa: __cpu_simple_lock membar audit.
ok skrll
diffstat:
sys/arch/hppa/include/lock.h | 35 +++++++++++++++++++++++++----------
1 files changed, 25 insertions(+), 10 deletions(-)
diffs (65 lines):
diff -r 317998b021b7 -r 3ffd8f223d94 sys/arch/hppa/include/lock.h
--- a/sys/arch/hppa/include/lock.h Sun Feb 13 13:42:30 2022 +0000
+++ b/sys/arch/hppa/include/lock.h Sun Feb 13 14:06:51 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: lock.h,v 1.23 2022/02/12 17:17:53 riastradh Exp $ */
+/* $NetBSD: lock.h,v 1.24 2022/02/13 14:06:51 riastradh Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -91,6 +91,25 @@
__SIMPLELOCK_RAW_UNLOCKED;
}
+static __inline int
+__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
+{
+ volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
+
+ if (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
+ return 0;
+
+ /*
+ * __cpu_simple_lock_try must be a load-acquire operation, but
+ * HPPA's LDCW does not appear to guarantee load-acquire
+ * semantics, so we have to do LDCW and then an explicit SYNC
+ * to make a load-acquire operation that pairs with a preceding
+ * store-release in __cpu_simple_unlock.
+ */
+ __sync();
+ return 1;
+}
+
static __inline void
__cpu_simple_lock(__cpu_simple_lock_t *alp)
{
@@ -103,24 +122,20 @@
* some work.
*/
- while (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
+ while (!__cpu_simple_lock_try(alp))
while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
;
}
-static __inline int
-__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
-{
- volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
-
- return (__ldcw(__aptr) != __SIMPLELOCK_RAW_LOCKED);
-}
-
static __inline void
__cpu_simple_unlock(__cpu_simple_lock_t *alp)
{
volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
+ /*
+ * SYNC and then store makes a store-release that pairs with
+ * the load-acquire in a subsequent __cpu_simple_lock_try.
+ */
__sync();
*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
}
Home |
Main Index |
Thread Index |
Old Index