Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Mirror the changes to aarch64 and



details:   https://anonhg.NetBSD.org/src/rev/18a64cfdf8c2
branches:  trunk
changeset: 937285:18a64cfdf8c2
user:      skrll <skrll%NetBSD.org@localhost>
date:      Fri Aug 14 16:18:36 2020 +0000

description:
Mirror the changes to aarch64 and

- Switch to TPIDRPRW_IS_CURLWP, because curlwp is accessed much more often
  by MI code.  It also makes curlwp preemption safe,

- Make ASTs operate per-LWP rather than per-CPU, otherwise sometimes LWPs
  can see spurious ASTs (which doesn't cause a problem, it just means some
  time may be wasted).

- Make sure ASTs are always set on the same CPU as the target LWP, and
  delivered via IPI if posted from a remote CPU so that they are resolved
  quickly.

- Add some cache line padding to struct cpu_info.

- Add a memory barrier in a couple of places where ci_curlwp is set.  This
  is needed whenever an LWP that is resuming on the CPU could hold an
  adaptive mutex.  The barrier needs to drain the CPU's store buffer, so
  that the update to ci_curlwp becomes globally visible before the LWP can
  resume and call mutex_exit().

diffstat:

 sys/arch/arm/arm/arm_machdep.c     |  30 +++++++++++++++++++---
 sys/arch/arm/arm32/cpuswitch.S     |   6 +++-
 sys/arch/arm/arm32/db_machdep.c    |   6 +---
 sys/arch/arm/arm32/genassym.cf     |   4 +-
 sys/arch/arm/include/arm32/frame.h |  47 +++++++++++++----------------------
 sys/arch/arm/include/cpu.h         |  50 +++++++++++++++++++-------------------
 sys/arch/arm/include/locore.h      |   7 ++++-
 sys/arch/arm/include/proc.h        |   3 +-
 sys/arch/evbarm/conf/std.generic   |   4 +-
 9 files changed, 87 insertions(+), 70 deletions(-)

diffs (truncated from 406 to 300 lines):

diff -r 550f63797e46 -r 18a64cfdf8c2 sys/arch/arm/arm/arm_machdep.c
--- a/sys/arch/arm/arm/arm_machdep.c    Fri Aug 14 14:42:44 2020 +0000
+++ b/sys/arch/arm/arm/arm_machdep.c    Fri Aug 14 16:18:36 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: arm_machdep.c,v 1.63 2020/02/15 08:16:10 skrll Exp $   */
+/*     $NetBSD: arm_machdep.c,v 1.64 2020/08/14 16:18:36 skrll Exp $   */
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -80,7 +80,7 @@
 
 #include <sys/param.h>
 
-__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.63 2020/02/15 08:16:10 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.64 2020/08/14 16:18:36 skrll Exp $");
 
 #include <sys/atomic.h>
 #include <sys/cpu.h>
@@ -241,17 +241,39 @@
                if (flags & RESCHED_REMOTE) {
                        intr_ipi_send(ci->ci_kcpuset, IPI_KPREEMPT);
                } else {
-                       atomic_or_uint(&ci->ci_astpending, __BIT(1));
+                       l->l_md.md_astpending |= __BIT(1);
                }
 #endif /* __HAVE_PREEMPTION */
                return;
        }
+
+       KASSERT((flags & RESCHED_UPREEMPT) != 0);
        if (flags & RESCHED_REMOTE) {
 #ifdef MULTIPROCESSOR
                intr_ipi_send(ci->ci_kcpuset, IPI_AST);
 #endif /* MULTIPROCESSOR */
        } else {
-               setsoftast(ci);
+               l->l_md.md_astpending |= __BIT(0);
+       }
+}
+
+
+/*
+ * Notify the current lwp (l) that it has a signal pending,
+ * process as soon as possible.
+ */
+void
+cpu_signotify(struct lwp *l)
+{
+
+       KASSERT(kpreempt_disabled());
+
+       if (l->l_cpu != curcpu()) {
+#ifdef MULTIPROCESSOR
+               intr_ipi_send(l->l_cpu->ci_kcpuset, IPI_AST);
+#endif
+       } else {
+               l->l_md.md_astpending |= __BIT(0);
        }
 }
 
diff -r 550f63797e46 -r 18a64cfdf8c2 sys/arch/arm/arm32/cpuswitch.S
--- a/sys/arch/arm/arm32/cpuswitch.S    Fri Aug 14 14:42:44 2020 +0000
+++ b/sys/arch/arm/arm32/cpuswitch.S    Fri Aug 14 16:18:36 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpuswitch.S,v 1.101 2020/07/10 12:25:09 skrll Exp $    */
+/*     $NetBSD: cpuswitch.S,v 1.102 2020/08/14 16:18:36 skrll Exp $    */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -87,7 +87,7 @@
 #include <arm/asm.h>
 #include <arm/locore.h>
 
-       RCSID("$NetBSD: cpuswitch.S,v 1.101 2020/07/10 12:25:09 skrll Exp $")
+       RCSID("$NetBSD: cpuswitch.S,v 1.102 2020/08/14 16:18:36 skrll Exp $")
 
 /* LINTSTUB: include <sys/param.h> */
 
@@ -191,6 +191,7 @@
 
        /* We have a new curlwp now so make a note of it */
        str     r6, [r5, #(CI_CURLWP)]
+       dmb                                     /* see comments in kern_mutex.c */
 
        /* Get the new pcb */
        ldr     r7, [r6, #(L_PCB)]
@@ -388,6 +389,7 @@
        mcr     p15, 0, r5, c13, c0, 4  /* save new lwp */
 #endif
        str     r5, [r7, #(CI_CURLWP)]  /* save new lwp */
+       dmb                             /* see comments in kern_mutex.c */
 
 #ifdef KASAN
        mov     r0, r5
diff -r 550f63797e46 -r 18a64cfdf8c2 sys/arch/arm/arm32/db_machdep.c
--- a/sys/arch/arm/arm32/db_machdep.c   Fri Aug 14 14:42:44 2020 +0000
+++ b/sys/arch/arm/arm32/db_machdep.c   Fri Aug 14 16:18:36 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: db_machdep.c,v 1.34 2020/07/03 10:19:18 jmcneill Exp $ */
+/*     $NetBSD: db_machdep.c,v 1.35 2020/08/14 16:18:36 skrll Exp $    */
 
 /*
  * Copyright (c) 1996 Mark Brinicombe
@@ -34,7 +34,7 @@
 #endif
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: db_machdep.c,v 1.34 2020/07/03 10:19:18 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_machdep.c,v 1.35 2020/08/14 16:18:36 skrll Exp $");
 
 #include <sys/param.h>
 
@@ -521,8 +521,6 @@
            &ci->ci_cpl, cpuid, ci->ci_cpl);
        db_printf("%p cpu[%lu].ci_softints     = 0x%08x\n",
            &ci->ci_softints, cpuid, ci->ci_softints);
-       db_printf("%p cpu[%lu].ci_astpending   = 0x%08x\n",
-           &ci->ci_astpending, cpuid, ci->ci_astpending);
        db_printf("%p cpu[%lu].ci_intr_depth   = %u\n",
            &ci->ci_intr_depth, cpuid, ci->ci_intr_depth);
 
diff -r 550f63797e46 -r 18a64cfdf8c2 sys/arch/arm/arm32/genassym.cf
--- a/sys/arch/arm/arm32/genassym.cf    Fri Aug 14 14:42:44 2020 +0000
+++ b/sys/arch/arm/arm32/genassym.cf    Fri Aug 14 16:18:36 2020 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: genassym.cf,v 1.93 2020/07/08 10:18:00 skrll Exp $
+#      $NetBSD: genassym.cf,v 1.94 2020/08/14 16:18:36 skrll Exp $
 
 # Copyright (c) 1982, 1990 The Regents of the University of California.
 # All rights reserved.
@@ -166,6 +166,7 @@
 define L_PROC                  offsetof(struct lwp, l_proc)
 define L_PRIVATE               offsetof(struct lwp, l_private)
 define L_FLAG                  offsetof(struct lwp, l_flag)
+define L_MD_ASTPENDING         offsetof(struct lwp, l_md.md_astpending)
 define L_MD_FLAGS              offsetof(struct lwp, l_md.md_flags)
 define L_MD_TF                 offsetof(struct lwp, l_md.md_tf)
 define MDLWP_NOALIGNFLT        MDLWP_NOALIGNFLT
@@ -225,7 +226,6 @@
 
 define CPU_INFO_SIZE           sizeof(struct cpu_info)
 define CI_ARM_CPUID            offsetof(struct cpu_info, ci_arm_cpuid)
-define CI_ASTPENDING           offsetof(struct cpu_info, ci_astpending)
 define CI_CPL                  offsetof(struct cpu_info, ci_cpl)
 define CI_CURLWP               offsetof(struct cpu_info, ci_curlwp)
 define CI_INTR_DEPTH           offsetof(struct cpu_info, ci_intr_depth)
diff -r 550f63797e46 -r 18a64cfdf8c2 sys/arch/arm/include/arm32/frame.h
--- a/sys/arch/arm/include/arm32/frame.h        Fri Aug 14 14:42:44 2020 +0000
+++ b/sys/arch/arm/include/arm32/frame.h        Fri Aug 14 16:18:36 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: frame.h,v 1.47 2018/10/28 14:46:59 skrll Exp $ */
+/*     $NetBSD: frame.h,v 1.48 2020/08/14 16:18:36 skrll Exp $ */
 
 /*
  * Copyright (c) 1994-1997 Mark Brinicombe.
@@ -151,26 +151,16 @@
        msr     cpsr_c, ra              /* Restore interrupts */
 #endif
 
-#ifdef __HAVE_PREEMPTION
-#define DO_CLEAR_ASTPENDING                                            \
-       mvn     r1, #1                  /* complement of 1 */           ;\
-       add     r0, r4, #CI_ASTPENDING  /* address of astpending */     ;\
-       bl      _C_LABEL(atomic_and_uint) /* clear AST */
-#else
-#define DO_CLEAR_ASTPENDING                                            \
-       mov     r0, #0                                                  ;\
-       str     r0, [r4, #CI_ASTPENDING] /* clear AST */
-#endif
-
 #define DO_PENDING_AST(lbl)                                            ;\
-1:     ldr     r1, [r4, #CI_ASTPENDING] /* Pending AST? */             ;\
-       tst     r1, #0x00000001                                         ;\
+1:     ldr     r1, [r5, #L_MD_ASTPENDING] /* Pending AST? */           ;\
+       tst     r1, #1                                                  ;\
        beq     lbl                     /* Nope. Just bail */           ;\
-       DO_CLEAR_ASTPENDING                                             ;\
-       CPSIE_I(r5, r5)                 /* Restore interrupts */        ;\
+       bic     r0, r1, #1               /* clear AST */                ;\
+       str     r0, [r5, #L_MD_ASTPENDING]                              ;\
+       CPSIE_I(r6, r6)                 /* Restore interrupts */        ;\
        mov     r0, sp                                                  ;\
        bl      _C_LABEL(ast)           /* ast(frame) */                ;\
-       CPSID_I(r0, r5)                 /* Disable interrupts */        ;\
+       CPSID_I(r0, r6)                 /* Disable interrupts */        ;\
        b       1b                      /* test again */
 
 /*
@@ -179,8 +169,8 @@
  * alignment faults when executing old a.out ARM binaries.
  *
  * Note that when ENABLE_ALIGNMENTS_FAULTS finishes r4 will contain
- * pointer to the cpu's cpu_info.  DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
- * relies on r4 being preserved.
+ * curcpu() and r5 containing curlwp.  DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ * relies on r4 and r5 being preserved.
  */
 #ifdef EXEC_AOUT
 #define        AST_ALIGNMENT_FAULT_LOCALS                                      \
@@ -198,10 +188,9 @@
 #define        ENABLE_ALIGNMENT_FAULTS                                         \
        and     r7, r0, #(PSR_MODE)     /* Test for USR32 mode */       ;\
        cmp     r7, #(PSR_USR32_MODE)                                   ;\
-       GET_CURCPU(r4)                  /* r4 = cpuinfo */              ;\
+       GET_CURX(r4, r5)                /* r4 = curcpu, r5 = curlwp */  ;\
        bne     1f                      /* Not USR mode skip AFLT */    ;\
-       ldr     r1, [r4, #CI_CURLWP]    /* get curlwp from cpu_info */  ;\
-       ldr     r1, [r1, #L_MD_FLAGS]   /* Fetch l_md.md_flags */       ;\
+       ldr     r1, [r5, #L_MD_FLAGS]   /* Fetch l_md.md_flags */       ;\
        tst     r1, #MDLWP_NOALIGNFLT                                   ;\
        beq     1f                      /* AFLTs already enabled */     ;\
        ldr     r2, .Laflt_cpufuncs                                     ;\
@@ -213,13 +202,13 @@
 /*
  * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or
  * PULLFRAME at the end of interrupt/exception handlers.  We know that
- * r4 points to cpu_info since that is what ENABLE_ALIGNMENT_FAULTS did
- * for use.
+ * r4 points to curcpu() and r5 points to curlwp since that is what
+ * ENABLE_ALIGNMENT_FAULTS did for us.
  */
 #define        DO_AST_AND_RESTORE_ALIGNMENT_FAULTS                             \
        DO_PENDING_SOFTINTS                                             ;\
-       GET_CPSR(r5)                    /* save CPSR */                 ;\
-       CPSID_I(r1, r5)                 /* Disable interrupts */        ;\
+       GET_CPSR(r6)                    /* save CPSR */                 ;\
+       CPSID_I(r1, r6)                 /* Disable interrupts */        ;\
        cmp     r7, #(PSR_USR32_MODE)   /* Returning to USR mode? */    ;\
        bne     3f                      /* Nope, get out now */         ;\
        DO_PENDING_AST(2f)              /* Pending AST? */              ;\
@@ -240,13 +229,13 @@
 
 #define        ENABLE_ALIGNMENT_FAULTS                                         \
        and     r7, r0, #(PSR_MODE)     /* Test for USR32 mode */       ;\
-       GET_CURCPU(r4)                  /* r4 = cpuinfo */
+       GET_CURX(r4, r5)                /* r4 = curcpu, r5 = curlwp */
 
 
 #define        DO_AST_AND_RESTORE_ALIGNMENT_FAULTS                             \
        DO_PENDING_SOFTINTS                                             ;\
-       GET_CPSR(r5)                    /* save CPSR */                 ;\
-       CPSID_I(r1, r5)                 /* Disable interrupts */        ;\
+       GET_CPSR(r6)                    /* save CPSR */                 ;\
+       CPSID_I(r1, r6)                 /* Disable interrupts */        ;\
        cmp     r7, #(PSR_USR32_MODE)                                   ;\
        bne     2f                      /* Nope, get out now */         ;\
        DO_PENDING_AST(2f)              /* Pending AST? */              ;\
diff -r 550f63797e46 -r 18a64cfdf8c2 sys/arch/arm/include/cpu.h
--- a/sys/arch/arm/include/cpu.h        Fri Aug 14 14:42:44 2020 +0000
+++ b/sys/arch/arm/include/cpu.h        Fri Aug 14 16:18:36 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu.h,v 1.111 2020/06/29 23:54:06 riastradh Exp $      */
+/*     $NetBSD: cpu.h,v 1.112 2020/08/14 16:18:36 skrll Exp $  */
 
 /*
  * Copyright (c) 1994-1996 Mark Brinicombe.
@@ -154,6 +154,7 @@
 #include <sys/cpu_data.h>
 #include <sys/device_if.h>
 #include <sys/evcnt.h>
+#include <sys/param.h>
 
 struct cpu_info {
        struct cpu_data ci_data;        /* MI per-cpu data */
@@ -163,22 +164,32 @@
        uint32_t        ci_arm_cputype; /* CPU type */
        uint32_t        ci_arm_cpurev;  /* CPU revision */
        uint32_t        ci_ctrl;        /* The CPU control register */
-       int             ci_cpl;         /* current processor level (spl) */
-       volatile int    ci_astpending;  /* */
-       int             ci_want_resched;/* resched() was called */
-       int             ci_intr_depth;  /* */
 
-       int ci_kfpu_spl;
+       /*
+        * the following are in their own cache line, as they are stored to
+        * regularly by remote CPUs; when they were mixed with other fields
+        * we observed frequent cache misses.
+        */
+       int             ci_want_resched __aligned(COHERENCY_UNIT);
+                                       /* resched() was called */
+       lwp_t *         ci_curlwp __aligned(COHERENCY_UNIT);
+                                       /* current lwp */
+       lwp_t *         ci_onproc;      /* current user LWP / kthread */
+
+       /*
+        * largely CPU-private.
+        */
+       lwp_t *         ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
 
        struct cpu_softc *
                        ci_softc;       /* platform softc */
 
-       lwp_t *         ci_softlwps[SOFTINT_COUNT];
-       volatile uint32_t
-                       ci_softints;
+       int             ci_cpl;         /* current processor level (spl) */
+       int             ci_kfpu_spl;
 
-       lwp_t *         ci_curlwp;      /* current lwp */



Home | Main Index | Thread Index | Old Index