Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/aarch64 aarch64: curcpu() audit.



details:   https://anonhg.NetBSD.org/src/rev/03a8490da5a6
branches:  trunk
changeset: 373730:03a8490da5a6
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sat Feb 25 00:40:22 2023 +0000

description:
aarch64: curcpu() audit.

Sprinkle KASSERT (or KDASSERT in hot paths) for kpreempt_disabled()
when we use curcpu() and it's not immediately obvious that the caller
has preemption disabled but closer scrutiny suggests the caller has.

Note unsafe curcpu()s for syscall event counting.  Not sure this is
worth changing.

Possible bugs fixed:

- cpu_irq and cpu_fiq could be preempted while trying to run softints
  on this CPU.

- data_abort_handler might incorrectly think it was invoked in
  interrupt context when it was only preempted and migrated to
  another CPU.

- pmap_fault_fixup might report the wrong CPU logs.

(However, we don't currently run with kpreemption on aarch64, so
these are not yet real bugs fixed except if you patch it to build
with __HAVE_PREEMPTION.)

diffstat:

 sys/arch/aarch64/aarch64/aarch32_syscall.c |   6 ++--
 sys/arch/aarch64/aarch64/cpu_machdep.c     |   6 +++-
 sys/arch/aarch64/aarch64/cpufunc.c         |  10 +++++++-
 sys/arch/aarch64/aarch64/fault.c           |   9 +++++--
 sys/arch/aarch64/aarch64/pmap_machdep.c    |  12 +++++++---
 sys/arch/aarch64/aarch64/syscall.c         |   6 ++--
 sys/arch/aarch64/aarch64/trap.c            |  33 ++++++++++++++++++++++++++++-
 sys/arch/aarch64/include/cpu.h             |   3 +-
 8 files changed, 65 insertions(+), 20 deletions(-)

diffs (truncated from 304 to 300 lines):

diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/aarch32_syscall.c
--- a/sys/arch/aarch64/aarch64/aarch32_syscall.c        Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/aarch32_syscall.c        Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: aarch32_syscall.c,v 1.6 2021/11/25 03:08:04 ryo Exp $  */
+/*     $NetBSD: aarch32_syscall.c,v 1.7 2023/02/25 00:40:22 riastradh Exp $    */
 
 /*
  * Copyright (c) 2018 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: aarch32_syscall.c,v 1.6 2021/11/25 03:08:04 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: aarch32_syscall.c,v 1.7 2023/02/25 00:40:22 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/ktrace.h>
@@ -69,7 +69,7 @@
 
        LWP_CACHE_CREDS(l, p);
 
-       curcpu()->ci_data.cpu_nsyscall++;
+       curcpu()->ci_data.cpu_nsyscall++; /* XXX unsafe curcpu() */
 
        thumbmode = (tf->tf_spsr & SPSR_A32_T) ? true : false;
 #ifdef SYSCALL_CODE_REG
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/cpu_machdep.c
--- a/sys/arch/aarch64/aarch64/cpu_machdep.c    Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/cpu_machdep.c    Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu_machdep.c,v 1.13 2022/07/28 09:14:12 riastradh Exp $ */
+/* $NetBSD: cpu_machdep.c,v 1.14 2023/02/25 00:40:22 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(1, "$NetBSD: cpu_machdep.c,v 1.13 2022/07/28 09:14:12 riastradh Exp $");
+__KERNEL_RCSID(1, "$NetBSD: cpu_machdep.c,v 1.14 2023/02/25 00:40:22 riastradh Exp $");
 
 #include "opt_multiprocessor.h"
 
@@ -116,6 +116,8 @@
        const uint32_t softiplmask = SOFTIPLMASK(opl);
        int s;
 
+       KDASSERT(kpreempt_disabled());
+
        s = splhigh();
        KASSERT(s == opl);
        for (;;) {
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/cpufunc.c
--- a/sys/arch/aarch64/aarch64/cpufunc.c        Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/cpufunc.c        Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpufunc.c,v 1.33 2022/01/31 09:16:09 ryo Exp $ */
+/*     $NetBSD: cpufunc.c,v 1.34 2023/02/25 00:40:22 riastradh Exp $   */
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -30,7 +30,7 @@
 #include "opt_multiprocessor.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.33 2022/01/31 09:16:09 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.34 2023/02/25 00:40:22 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -369,6 +369,8 @@
        struct aarch64_cache_info * const cinfo = ci->ci_cacheinfo;
        int level;
 
+       KASSERT(kpreempt_disabled());
+
        for (level = 0; level < MAX_CACHE_LEVEL; level++) {
                if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE)
                        break;
@@ -386,6 +388,8 @@
        struct aarch64_cache_info * const cinfo = ci->ci_cacheinfo;
        int level;
 
+       KASSERT(kpreempt_disabled());
+
        for (level = 0; level < MAX_CACHE_LEVEL; level++) {
                if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE)
                        break;
@@ -403,6 +407,8 @@
        struct aarch64_cache_info * const cinfo = ci->ci_cacheinfo;
        int level;
 
+       KASSERT(kpreempt_disabled());
+
        for (level = 0; level < MAX_CACHE_LEVEL; level++) {
                if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE)
                        break;
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/fault.c
--- a/sys/arch/aarch64/aarch64/fault.c  Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/fault.c  Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: fault.c,v 1.24 2022/05/11 14:58:00 andvar Exp $        */
+/*     $NetBSD: fault.c,v 1.25 2023/02/25 00:40:22 riastradh Exp $     */
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.24 2022/05/11 14:58:00 andvar Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.25 2023/02/25 00:40:22 riastradh Exp $");
 
 #include "opt_compat_netbsd32.h"
 #include "opt_cpuoptions.h"
@@ -226,7 +226,10 @@
 
  do_fault:
        /* faultbail path? */
-       if (curcpu()->ci_intr_depth == 0) {
+       kpreempt_disable();
+       const bool intrdepthzero = (curcpu()->ci_intr_depth == 0);
+       kpreempt_enable();
+       if (intrdepthzero) {
                fb = cpu_disable_onfault();
                if (fb != NULL) {
                        cpu_jump_onfault(tf, fb, error);
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/pmap_machdep.c
--- a/sys/arch/aarch64/aarch64/pmap_machdep.c   Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/pmap_machdep.c   Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_machdep.c,v 1.2 2022/12/21 11:39:45 skrll Exp $   */
+/*     $NetBSD: pmap_machdep.c,v 1.3 2023/02/25 00:40:22 riastradh Exp $       */
 
 /*-
  * Copyright (c) 2022 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
 #define __PMAP_PRIVATE
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.2 2022/12/21 11:39:45 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.3 2023/02/25 00:40:22 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -138,6 +138,8 @@
 
        KASSERT(!user || (pm != pmap_kernel()));
 
+       kpreempt_disable();
+
        UVMHIST_LOG(pmaphist, " pm=%#jx, va=%#jx, ftype=%#jx, user=%jd",
            (uintptr_t)pm, va, ftype, user);
        UVMHIST_LOG(pmaphist, " ti=%#jx pai=%#jx asid=%#jx",
@@ -145,8 +147,6 @@
            (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())),
            (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
 
-       kpreempt_disable();
-
        bool fixed = false;
        pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
        if (ptep == NULL) {
@@ -525,6 +525,8 @@
        UVMHIST_FUNC(__func__);
        UVMHIST_CALLARGS(pmaphist, " (pm=%#jx l=%#jx)", (uintptr_t)pm, (uintptr_t)l, 0, 0);
 
+       KASSERT(kpreempt_disabled());
+
        /*
         * Assume that TTBR1 has only global mappings and TTBR0 only
         * has non-global mappings.  To prevent speculation from doing
@@ -565,6 +567,8 @@
 {
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
+       KASSERT(kpreempt_disabled());
+
        struct cpu_info * const ci = curcpu();
        /*
         * Disable translation table walks from TTBR0 while no pmap has been
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/syscall.c
--- a/sys/arch/aarch64/aarch64/syscall.c        Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/syscall.c        Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: syscall.c,v 1.11 2021/09/27 17:51:15 ryo Exp $ */
+/*     $NetBSD: syscall.c,v 1.12 2023/02/25 00:40:22 riastradh Exp $   */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -58,7 +58,7 @@
 #define EMULNAME(x)    (x)
 #define EMULNAMEU(x)   (x)
 
-__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.11 2021/09/27 17:51:15 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.12 2023/02/25 00:40:22 riastradh Exp $");
 
 void
 cpu_spawn_return(struct lwp *l)
@@ -94,7 +94,7 @@
 
        LWP_CACHE_CREDS(l, p);
 
-       curcpu()->ci_data.cpu_nsyscall++;
+       curcpu()->ci_data.cpu_nsyscall++; /* XXX unsafe curcpu() */
 
        register_t *params = (void *)tf->tf_reg;
        size_t nargs = NARGREG;
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/aarch64/trap.c
--- a/sys/arch/aarch64/aarch64/trap.c   Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/aarch64/trap.c   Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: trap.c,v 1.47 2021/08/30 23:20:00 jmcneill Exp $ */
+/* $NetBSD: trap.c,v 1.48 2023/02/25 00:40:22 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.47 2021/08/30 23:20:00 jmcneill Exp $");
+__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.48 2023/02/25 00:40:22 riastradh Exp $");
 
 #include "opt_arm_intr_impl.h"
 #include "opt_compat_netbsd32.h"
@@ -525,14 +525,29 @@
        /* disable trace */
        reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
 #endif
+
+       /*
+        * Prevent preemption once we enable traps, until we have
+        * finished running hard and soft interrupt handlers.  This
+        * guarantees ci = curcpu() remains stable and we don't
+        * accidentally try to run its pending soft interrupts on
+        * another CPU.
+        */
+       kpreempt_disable();
+
        /* enable traps */
        daif_enable(DAIF_D|DAIF_A);
 
+       /* run hard interrupt handlers */
        ci->ci_intr_depth++;
        ARM_IRQ_HANDLER(tf);
        ci->ci_intr_depth--;
 
+       /* run soft interrupt handlers */
        cpu_dosoftints();
+
+       /* all done, preempt as you please */
+       kpreempt_enable();
 }
 
 void
@@ -552,14 +567,28 @@
        /* disable trace */
        reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
 
+       /*
+        * Prevent preemption once we enable traps, until we have
+        * finished running hard and soft interrupt handlers.  This
+        * guarantees ci = curcpu() remains stable and we don't
+        * accidentally try to run its pending soft interrupts on
+        * another CPU.
+        */
+       kpreempt_disable();
+
        /* enable traps */
        daif_enable(DAIF_D|DAIF_A);
 
+       /* run hard interrupt handlers */
        ci->ci_intr_depth++;
        ARM_FIQ_HANDLER(tf);
        ci->ci_intr_depth--;
 
+       /* run soft interrupt handlers */
        cpu_dosoftints();
+
+       /* all done, preempt as you please */
+       kpreempt_enable();
 }
 
 #ifdef COMPAT_NETBSD32
diff -r 10f273d26283 -r 03a8490da5a6 sys/arch/aarch64/include/cpu.h
--- a/sys/arch/aarch64/include/cpu.h    Sat Feb 25 00:37:47 2023 +0000
+++ b/sys/arch/aarch64/include/cpu.h    Sat Feb 25 00:40:22 2023 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.h,v 1.48 2022/11/03 09:04:56 skrll Exp $ */
+/* $NetBSD: cpu.h,v 1.49 2023/02/25 00:40:22 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
@@ -242,6 +242,7 @@
 cpu_dosoftints(void)
 {
 #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)



Home | Main Index | Thread Index | Old Index