Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch fpu_kern_enter/leave: Disable IPL assertions.



details:   https://anonhg.NetBSD.org/src/rev/8490d9c8b97a
branches:  trunk
changeset: 369491:8490d9c8b97a
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sat Aug 20 11:34:08 2022 +0000

description:
fpu_kern_enter/leave: Disable IPL assertions.

These don't work because mutex_enter/exit on a spin lock may raise an
IPL but not lower it, if another spin lock was already held.  For
example,

        mutex_enter(some_lock_at_IPL_VM);
        printf("foo\n");
        fpu_kern_enter();
        ...
        fpu_kern_leave();
        mutex_exit(some_lock_at_IPL_VM);

will trigger the panic, because printf takes a lock at IPL_HIGH where
the IPL wil remain until the mutex_exit.  (This was a nightmare to
track down before I remembered that detail of spin lock IPL
semantics...)

diffstat:

 sys/arch/aarch64/aarch64/fpu.c |  18 ++++++++++++++++--
 sys/arch/arm/vfp/vfp_init.c    |  18 ++++++++++++++++--
 sys/arch/x86/x86/fpu.c         |  18 ++++++++++++++++--
 3 files changed, 48 insertions(+), 6 deletions(-)

diffs (145 lines):

diff -r 2c0d7813d8f3 -r 8490d9c8b97a sys/arch/aarch64/aarch64/fpu.c
--- a/sys/arch/aarch64/aarch64/fpu.c    Sat Aug 20 11:32:20 2022 +0000
+++ b/sys/arch/aarch64/aarch64/fpu.c    Sat Aug 20 11:34:08 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: fpu.c,v 1.12 2022/04/01 19:57:22 riastradh Exp $ */
+/* $NetBSD: fpu.c,v 1.13 2022/08/20 11:34:08 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(1, "$NetBSD: fpu.c,v 1.12 2022/04/01 19:57:22 riastradh Exp $");
+__KERNEL_RCSID(1, "$NetBSD: fpu.c,v 1.13 2022/08/20 11:34:08 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -214,7 +214,14 @@
         */
        s = splvm();
        ci = curcpu();
+#if 0
+       /*
+        * Can't assert this because if the caller holds a spin lock at
+        * IPL_VM, and previously held and released a spin lock at
+        * higher IPL, the IPL remains raised above IPL_VM.
+        */
        KASSERTMSG(ci->ci_cpl <= IPL_VM || cold, "cpl=%d", ci->ci_cpl);
+#endif
        KASSERT(ci->ci_kfpu_spl == -1);
        ci->ci_kfpu_spl = s;
 
@@ -242,7 +249,14 @@
 
        ci = curcpu();
 
+#if 0
+       /*
+        * Can't assert this because if the caller holds a spin lock at
+        * IPL_VM, and previously held and released a spin lock at
+        * higher IPL, the IPL remains raised above IPL_VM.
+        */
        KASSERT(ci->ci_cpl == IPL_VM || cold);
+#endif
        KASSERT(ci->ci_kfpu_spl != -1);
 
        /*
diff -r 2c0d7813d8f3 -r 8490d9c8b97a sys/arch/arm/vfp/vfp_init.c
--- a/sys/arch/arm/vfp/vfp_init.c       Sat Aug 20 11:32:20 2022 +0000
+++ b/sys/arch/arm/vfp/vfp_init.c       Sat Aug 20 11:34:08 2022 +0000
@@ -1,4 +1,4 @@
-/*      $NetBSD: vfp_init.c,v 1.77 2022/04/01 19:57:22 riastradh Exp $ */
+/*      $NetBSD: vfp_init.c,v 1.78 2022/08/20 11:34:08 riastradh Exp $ */
 
 /*
  * Copyright (c) 2008 ARM Ltd
@@ -32,7 +32,7 @@
 #include "opt_cputypes.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfp_init.c,v 1.77 2022/04/01 19:57:22 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfp_init.c,v 1.78 2022/08/20 11:34:08 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -695,7 +695,14 @@
         */
        s = splvm();
        ci = curcpu();
+#if 0
+       /*
+        * Can't assert this because if the caller holds a spin lock at
+        * IPL_VM, and previously held and released a spin lock at
+        * higher IPL, the IPL remains raised above IPL_VM.
+        */
        KASSERTMSG(ci->ci_cpl <= IPL_VM || cold, "cpl=%d", ci->ci_cpl);
+#endif
        KASSERT(ci->ci_kfpu_spl == -1);
        ci->ci_kfpu_spl = s;
 
@@ -721,7 +728,14 @@
                return;
        }
 
+#if 0
+       /*
+        * Can't assert this because if the caller holds a spin lock at
+        * IPL_VM, and previously held and released a spin lock at
+        * higher IPL, the IPL remains raised above IPL_VM.
+        */
        KASSERT(ci->ci_cpl == IPL_VM || cold);
+#endif
        KASSERT(ci->ci_kfpu_spl != -1);
 
        /*
diff -r 2c0d7813d8f3 -r 8490d9c8b97a sys/arch/x86/x86/fpu.c
--- a/sys/arch/x86/x86/fpu.c    Sat Aug 20 11:32:20 2022 +0000
+++ b/sys/arch/x86/x86/fpu.c    Sat Aug 20 11:34:08 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: fpu.c,v 1.78 2022/05/24 06:28:00 andvar Exp $  */
+/*     $NetBSD: fpu.c,v 1.79 2022/08/20 11:34:08 riastradh Exp $       */
 
 /*
  * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.  All
@@ -96,7 +96,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.78 2022/05/24 06:28:00 andvar Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.79 2022/08/20 11:34:08 riastradh Exp $");
 
 #include "opt_multiprocessor.h"
 
@@ -380,8 +380,15 @@
        s = splvm();
 
        ci = curcpu();
+#if 0
+       /*
+        * Can't assert this because if the caller holds a spin lock at
+        * IPL_VM, and previously held and released a spin lock at
+        * higher IPL, the IPL remains raised above IPL_VM.
+        */
        KASSERTMSG(ci->ci_ilevel <= IPL_VM || cold, "ilevel=%d",
            ci->ci_ilevel);
+#endif
        KASSERT(ci->ci_kfpu_spl == -1);
        ci->ci_kfpu_spl = s;
 
@@ -414,7 +421,14 @@
        struct cpu_info *ci = curcpu();
        int s;
 
+#if 0
+       /*
+        * Can't assert this because if the caller holds a spin lock at
+        * IPL_VM, and previously held and released a spin lock at
+        * higher IPL, the IPL remains raised above IPL_VM.
+        */
        KASSERT(ci->ci_ilevel == IPL_VM || cold);
+#endif
        KASSERT(ci->ci_kfpu_spl != -1);
 
        /*



Home | Main Index | Thread Index | Old Index