Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch mips: Add missing barriers in cpu_switchto.



details:   https://anonhg.NetBSD.org/src/rev/d6bb1cb01278
branches:  trunk
changeset: 373670:d6bb1cb01278
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Thu Feb 23 14:56:00 2023 +0000

description:
mips: Add missing barriers in cpu_switchto.

Details in comments.

PR kern/57240

XXX pullup-8
XXX pullup-9
XXX pullup-10

diffstat:

 sys/arch/evbmips/ingenic/cpu_startup.S |   9 +++++++--
 sys/arch/mips/include/asm.h            |  24 +++++++++++++++++++++++-
 sys/arch/mips/mips/locore.S            |  29 +++++++++++++++++++++++++++--
 sys/arch/mips/mips/locore_mips3.S      |   9 +++++++--
 4 files changed, 64 insertions(+), 7 deletions(-)

diffs (165 lines):

diff -r 63d6990cfb79 -r d6bb1cb01278 sys/arch/evbmips/ingenic/cpu_startup.S
--- a/sys/arch/evbmips/ingenic/cpu_startup.S    Thu Feb 23 14:55:47 2023 +0000
+++ b/sys/arch/evbmips/ingenic/cpu_startup.S    Thu Feb 23 14:56:00 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu_startup.S,v 1.1 2016/01/29 01:54:14 macallan Exp $ */
+/*     $NetBSD: cpu_startup.S,v 1.2 2023/02/23 14:56:00 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2015 Michael Lorenz
@@ -33,7 +33,7 @@
 #include <sys/endian.h>
 
 #include <mips/asm.h>
-RCSID("$NetBSD: cpu_startup.S,v 1.1 2016/01/29 01:54:14 macallan Exp $");
+RCSID("$NetBSD: cpu_startup.S,v 1.2 2023/02/23 14:56:00 riastradh Exp $");
 
 #ifdef MULTIPROCESSOR
 
@@ -56,6 +56,11 @@
        nop
        beqz    MIPS_CURLWP, 1b
         nop
+       /*
+        * No membar needed because we're not switching from a
+        * previous lwp, and the idle lwp we're switching to can't be
+        * holding locks already; see cpu_switchto.
+        */
        PTR_S   MIPS_CURLWP, CPU_INFO_CURLWP(a0)
 
        li      v0, 0
diff -r 63d6990cfb79 -r d6bb1cb01278 sys/arch/mips/include/asm.h
--- a/sys/arch/mips/include/asm.h       Thu Feb 23 14:55:47 2023 +0000
+++ b/sys/arch/mips/include/asm.h       Thu Feb 23 14:56:00 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: asm.h,v 1.73 2023/02/20 13:30:47 riastradh Exp $       */
+/*     $NetBSD: asm.h,v 1.74 2023/02/23 14:56:00 riastradh Exp $       */
 
 /*
  * Copyright (c) 1992, 1993
@@ -633,6 +633,28 @@
 #define        SYNC_PLUNGER    /* nothing */
 #endif
 
+/*
+ * Store-before-load barrier.  Do not use this unless you know what
+ * you're doing.
+ */
+#ifdef MULTIPROCESSOR
+#define        SYNC_DEKKER     sync
+#else
+#define        SYNC_DEKKER     /* nothing */
+#endif
+
+/*
+ * Store-before-store and load-before-load barriers.  These could be
+ * made weaker than release (load/store-before-store) and acquire
+ * (load-before-load/store) barriers, and newer MIPS does have
+ * instruction encodings for finer-grained barriers like this, but I
+ * dunno how to appropriately conditionalize their use or get the
+ * assembler to be happy with them, so we'll use these definitions for
+ * now.
+ */
+#define        SYNC_PRODUCER   SYNC_REL
+#define        SYNC_CONSUMER   SYNC_ACQ
+
 /* CPU dependent hook for cp0 load delays */
 #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3)
 #define        MFC0_HAZARD     sll $0,$0,1     /* super scalar nop */
diff -r 63d6990cfb79 -r d6bb1cb01278 sys/arch/mips/mips/locore.S
--- a/sys/arch/mips/mips/locore.S       Thu Feb 23 14:55:47 2023 +0000
+++ b/sys/arch/mips/mips/locore.S       Thu Feb 23 14:56:00 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.228 2021/05/29 12:35:27 simonb Exp $      */
+/*     $NetBSD: locore.S,v 1.229 2023/02/23 14:56:00 riastradh Exp $   */
 
 /*
  * Copyright (c) 1992, 1993
@@ -63,7 +63,7 @@
 #include <mips/trap.h>
 #include <mips/locore.h>
 
-RCSID("$NetBSD: locore.S,v 1.228 2021/05/29 12:35:27 simonb Exp $")
+RCSID("$NetBSD: locore.S,v 1.229 2023/02/23 14:56:00 riastradh Exp $")
 
 #include "assym.h"
 
@@ -286,7 +286,28 @@
 
        PTR_L   t2, L_CPU(MIPS_CURLWP)
        nop                                     # patchable load delay slot
+
+       /*
+        * Issue barriers to coordinate mutex_exit on this CPU with
+        * mutex_vector_enter on another CPU.
+        *
+        * 1. Any prior mutex_exit by oldlwp must be visible to other
+        *    CPUs before we set ci_curlwp := newlwp on this one,
+        *    requiring a store-before-store barrier.
+        *
+        * 2. ci_curlwp := newlwp must be visible on all other CPUs
+        *    before any subsequent mutex_exit by newlwp can even test
+        *    whether there might be waiters, requiring a
+        *    store-before-load barrier.
+        *
+        * See kern_mutex.c for details -- this is necessary for
+        * adaptive mutexes to detect whether the lwp is on the CPU in
+        * order to safely block without requiring atomic r/m/w in
+        * mutex_exit.
+        */
+       SYNC_PRODUCER           /* XXX fixup to nop for uniprocessor boot */
        PTR_S   MIPS_CURLWP, CPU_INFO_CURLWP(t2)
+       SYNC_DEKKER             /* XXX fixup to nop for uniprocessor boot */
 
        /* Check for restartable atomic sequences (RAS) */
        PTR_L   a0, L_PROC(MIPS_CURLWP)         # argument to ras_lookup
@@ -406,7 +427,9 @@
        move    MIPS_CURLWP, a0                         # switch to softint lwp
        PTR_L   s1, L_CPU(MIPS_CURLWP)                  # get curcpu()
        nop                                     # patchable load delay slot
+       SYNC_PRODUCER /* XXX fixup */   /* for mutex_enter; see cpu_switchto */
        PTR_S   MIPS_CURLWP, CPU_INFO_CURLWP(s1)        #    ...
+       SYNC_DEKKER /* XXX fixup */     /* for mutex_enter; see cpu_switchto */
        move    s2, sp                                  # remember sp
        move    s3, t0                                  # remember curpcb
 
@@ -417,7 +440,9 @@
 
        move    sp, s2                                  # restore stack
        move    MIPS_CURLWP, s0                         # restore curlwp
+       SYNC_PRODUCER /* XXX fixup */   /* for mutex_enter; see cpu_switchto */
        PTR_S   MIPS_CURLWP, CPU_INFO_CURLWP(s1)        #    ....
+       SYNC_DEKKER /* XXX fixup */     /* for mutex_enter; see cpu_switchto */
 
        REG_L   ra, CALLFRAME_RA(sp)            # load early since we use it
 
diff -r 63d6990cfb79 -r d6bb1cb01278 sys/arch/mips/mips/locore_mips3.S
--- a/sys/arch/mips/mips/locore_mips3.S Thu Feb 23 14:55:47 2023 +0000
+++ b/sys/arch/mips/mips/locore_mips3.S Thu Feb 23 14:56:00 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore_mips3.S,v 1.115 2020/05/24 07:15:24 simonb Exp $        */
+/*     $NetBSD: locore_mips3.S,v 1.116 2023/02/23 14:56:00 riastradh Exp $     */
 
 /*
  * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
@@ -92,7 +92,7 @@
 #include <mips/asm.h>
 #include <mips/cpuregs.h>
 
-RCSID("$NetBSD: locore_mips3.S,v 1.115 2020/05/24 07:15:24 simonb Exp $")
+RCSID("$NetBSD: locore_mips3.S,v 1.116 2023/02/23 14:56:00 riastradh Exp $")
 
 #include "assym.h"
 
@@ -809,6 +809,11 @@
        nop
        beqz    MIPS_CURLWP, 1b
         nop
+       /*
+        * No membar needed because we're not switching from a
+        * previous lwp, and the idle lwp we're switching to can't be
+        * holding locks already; see cpu_switchto.
+        */
        PTR_S   MIPS_CURLWP, CPU_INFO_CURLWP(a0)
 
 #ifdef _LP64



Home | Main Index | Thread Index | Old Index