Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm arm32: Add missing barriers in cpu_switchto.



details:   https://anonhg.NetBSD.org/src/rev/1ea5b2ca3002
branches:  trunk
changeset: 373667:1ea5b2ca3002
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Thu Feb 23 14:55:24 2023 +0000

description:
arm32: Add missing barriers in cpu_switchto.

Details in comments.

PR kern/57240

XXX pullup-8
XXX pullup-9
XXX pullup-10

diffstat:

 sys/arch/arm/arm/armv6_start.S |   7 ++++++-
 sys/arch/arm/arm32/cpuswitch.S |  42 ++++++++++++++++++++++++++++++++++++------
 2 files changed, 42 insertions(+), 7 deletions(-)

diffs (103 lines):

diff -r 13d34c05683a -r 1ea5b2ca3002 sys/arch/arm/arm/armv6_start.S
--- a/sys/arch/arm/arm/armv6_start.S    Thu Feb 23 14:55:10 2023 +0000
+++ b/sys/arch/arm/arm/armv6_start.S    Thu Feb 23 14:55:24 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: armv6_start.S,v 1.37 2021/11/14 16:56:32 riastradh Exp $       */
+/*     $NetBSD: armv6_start.S,v 1.38 2023/02/23 14:55:24 riastradh Exp $       */
 
 /*-
  * Copyright (c) 2012, 2017, 2018 The NetBSD Foundation, Inc.
@@ -943,6 +943,11 @@
 #else
 #error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined
 #endif
+       /*
+        * No membar needed because we're not switching from a
+        * previous lwp, and the idle lwp we're switching to can't be
+        * holding locks already; see cpu_switchto.
+        */
        str     r6, [r5, #CI_CURLWP]            // and note we are running on it
 
        mov     r0, r5                          // pass cpu_info
diff -r 13d34c05683a -r 1ea5b2ca3002 sys/arch/arm/arm32/cpuswitch.S
--- a/sys/arch/arm/arm32/cpuswitch.S    Thu Feb 23 14:55:10 2023 +0000
+++ b/sys/arch/arm/arm32/cpuswitch.S    Thu Feb 23 14:55:24 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpuswitch.S,v 1.105 2021/05/30 06:53:15 dholland Exp $ */
+/*     $NetBSD: cpuswitch.S,v 1.106 2023/02/23 14:55:25 riastradh Exp $        */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -87,7 +87,7 @@
 #include <arm/asm.h>
 #include <arm/locore.h>
 
-       RCSID("$NetBSD: cpuswitch.S,v 1.105 2021/05/30 06:53:15 dholland Exp $")
+       RCSID("$NetBSD: cpuswitch.S,v 1.106 2023/02/23 14:55:25 riastradh Exp $")
 
 /* LINTSTUB: include <sys/param.h> */
 
@@ -189,11 +189,32 @@
        mcr     p15, 0, r6, c13, c0, 4          /* set current lwp */
 #endif
 
-       /* We have a new curlwp now so make a note of it */
-       str     r6, [r5, #(CI_CURLWP)]
+       /*
+        * Issue barriers to coordinate mutex_exit on this CPU with
+        * mutex_vector_enter on another CPU.
+        *
+        * 1. Any prior mutex_exit by oldlwp must be visible to other
+        *    CPUs before we set ci_curlwp := newlwp on this one,
+        *    requiring a store-before-store barrier.
+        *
+        * 2. ci_curlwp := newlwp must be visible on all other CPUs
+        *    before any subsequent mutex_exit by newlwp can even test
+        *    whether there might be waiters, requiring a
+        *    store-before-load barrier.
+        *
+        * See kern_mutex.c for details -- this is necessary for
+        * adaptive mutexes to detect whether the lwp is on the CPU in
+        * order to safely block without requiring atomic r/m/w in
+        * mutex_exit.
+        */
 
+       /* We have a new curlwp now so make a note of it */
 #ifdef _ARM_ARCH_7
-       dmb                                     /* see comments in kern_mutex.c */
+       dmb                             /* store-before-store */
+#endif
+       str     r6, [r5, #(CI_CURLWP)]
+#ifdef _ARM_ARCH_7
+       dmb                             /* store-before-load */
 #endif
 
        /* Get the new pcb */
@@ -392,9 +413,12 @@
 #if defined(TPIDRPRW_IS_CURLWP)
        mcr     p15, 0, r5, c13, c0, 4  /* save new lwp */
 #endif
+#ifdef _ARM_ARCH_7
+       dmb                             /* for mutex_enter; see cpu_switchto */
+#endif
        str     r5, [r7, #(CI_CURLWP)]  /* save new lwp */
 #ifdef _ARM_ARCH_7
-       dmb                             /* see comments in kern_mutex.c */
+       dmb                             /* for mutex_enter; see cpu_switchto */
 #endif
 
 #ifdef KASAN
@@ -428,7 +452,13 @@
 #if defined(TPIDRPRW_IS_CURLWP)
        mcr     p15, 0, r4, c13, c0, 4  /* restore pinned lwp */
 #endif
+#ifdef _ARM_ARCH_7
+       dmb                             /* for mutex_enter; see cpu_switchto */
+#endif
        str     r4, [r7, #(CI_CURLWP)]  /* restore pinned lwp */
+#ifdef _ARM_ARCH_7
+       dmb                             /* for mutex_enter; see cpu_switchto */
+#endif
        ldr     sp, [r2, #(PCB_KSP)]    /* now running on the old stack. */
 
        /* At this point we can allow IRQ's again. */



Home | Main Index | Thread Index | Old Index