Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64/aarch64 aarch64: Optimization: Omit needles...
details: https://anonhg.NetBSD.org/src/rev/8375b74ce704
branches: trunk
changeset: 373788:8375b74ce704
user: riastradh <riastradh%NetBSD.org@localhost>
date: Wed Mar 01 08:17:24 2023 +0000
description:
aarch64: Optimization: Omit needless membar when triggering softint.
When we are triggering a softint, it can't already hold any mutexes.
So any path to mutex_exit(mtx) must go via mutex_enter(mtx), which is
always done with atomic r/m/w, and we need not issue any explicit
barrier between ci->ci_curlwp = softlwp and a potential load of
mtx->mtx_owner in mutex_exit.
PR kern/57240
XXX pullup-9
XXX pullup-10
diffstat:
sys/arch/aarch64/aarch64/cpuswitch.S | 12 +++++++++---
1 files changed, 9 insertions(+), 3 deletions(-)
diffs (33 lines):
diff -r 8b299360a607 -r 8375b74ce704 sys/arch/aarch64/aarch64/cpuswitch.S
--- a/sys/arch/aarch64/aarch64/cpuswitch.S Wed Mar 01 08:14:13 2023 +0000
+++ b/sys/arch/aarch64/aarch64/cpuswitch.S Wed Mar 01 08:17:24 2023 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpuswitch.S,v 1.40 2023/02/23 14:54:57 riastradh Exp $ */
+/* $NetBSD: cpuswitch.S,v 1.41 2023/03/01 08:17:24 riastradh Exp $ */
/*-
* Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
#include "opt_ddb.h"
#include "opt_kasan.h"
-RCSID("$NetBSD: cpuswitch.S,v 1.40 2023/02/23 14:54:57 riastradh Exp $")
+RCSID("$NetBSD: cpuswitch.S,v 1.41 2023/03/01 08:17:24 riastradh Exp $")
ARMV8_DEFINE_OPTIONS
@@ -224,7 +224,13 @@
msr tpidr_el1, x0 /* curlwp = softlwp; */
dmb ishst /* for mutex_enter; see cpu_switchto */
str x0, [x20, #CI_CURLWP] /* curcpu()->ci_curlwp = softlwp; */
- dmb ish /* for mutex_enter; see cpu_switchto */
+ /*
+ * No need for barrier after ci->ci_curlwp = softlwp -- when we
+ * enter a softint lwp, it can't be holding any mutexes, so it
+ * can't release any until after it has acquired them, so we
+ * need not participate in the protocol with mutex_vector_enter
+ * barriers here.
+ */
mov x5, #CPACR_FPEN_NONE
msr cpacr_el1, x5 /* cpacr_el1 = CPACR_FPEN_NONE */
Home |
Main Index |
Thread Index |
Old Index