Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch NetBSD/x86: Raise the number of interrupt sources p...



details:   https://anonhg.NetBSD.org/src/rev/a5293fcb521b
branches:  trunk
changeset: 369900:a5293fcb521b
user:      knakahara <knakahara%NetBSD.org@localhost>
date:      Wed Sep 07 00:40:18 2022 +0000

description:
NetBSD/x86: Raise the number of interrupt sources per CPU from 32 to 56.

There has been no objection for three years.
    https://mail-index.netbsd.org/port-amd64/2019/09/22/msg003012.html
Implemented by nonaka@n.o, updated by me.

diffstat:

 sys/arch/amd64/amd64/amd64_trap.S     |   16 +-
 sys/arch/amd64/amd64/genassym.cf      |    6 +-
 sys/arch/amd64/amd64/lock_stubs.S     |    6 +-
 sys/arch/amd64/amd64/locore.S         |    8 +-
 sys/arch/amd64/amd64/spl.S            |   62 +-
 sys/arch/amd64/amd64/vector.S         |  268 +++++--------
 sys/arch/i386/i386/genassym.cf        |    6 +-
 sys/arch/i386/i386/i386_trap.S        |   18 +-
 sys/arch/i386/i386/lock_stubs.S       |   31 +-
 sys/arch/i386/i386/locore.S           |   12 +-
 sys/arch/i386/i386/spl.S              |   80 +++-
 sys/arch/i386/i386/vector.S           |  662 ++++++++++-----------------------
 sys/arch/x86/include/cpu.h            |   23 +-
 sys/arch/x86/include/intr.h           |    5 +-
 sys/arch/x86/include/intrdefs.h       |   10 +-
 sys/arch/x86/x86/intr.c               |   19 +-
 sys/arch/x86/x86/lapic.c              |    6 +-
 sys/arch/x86/x86/x86_softintr.c       |   19 +-
 sys/arch/xen/include/hypervisor.h     |    4 +-
 sys/arch/xen/include/intr.h           |    4 +-
 sys/arch/xen/x86/hypervisor_machdep.c |   12 +-
 sys/arch/xen/xen/evtchn.c             |   10 +-
 sys/arch/xen/xen/xenevt.c             |    6 +-
 23 files changed, 523 insertions(+), 770 deletions(-)

diffs (truncated from 2387 to 300 lines):

diff -r 6d37c94c90d9 -r a5293fcb521b sys/arch/amd64/amd64/amd64_trap.S
--- a/sys/arch/amd64/amd64/amd64_trap.S Wed Sep 07 00:34:19 2022 +0000
+++ b/sys/arch/amd64/amd64/amd64_trap.S Wed Sep 07 00:40:18 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: amd64_trap.S,v 1.53 2020/06/29 23:04:56 riastradh Exp $        */
+/*     $NetBSD: amd64_trap.S,v 1.54 2022/09/07 00:40:18 knakahara Exp $        */
 
 /*
  * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@@ -347,7 +347,7 @@
        ZTRAP_NJ(T_DNA)
        INTRENTRY
 #ifdef DIAGNOSTIC
-       movl    CPUVAR(ILEVEL),%ebx
+       movzbl  CPUVAR(ILEVEL),%ebx
 #endif
        movq    %rsp,%rdi
        call    _C_LABEL(fpudna)
@@ -439,7 +439,7 @@
        ZTRAP_NJ(T_ASTFLT)
        INTRENTRY
 #ifdef DIAGNOSTIC
-       movl    CPUVAR(ILEVEL),%ebx
+       movzbl  CPUVAR(ILEVEL),%ebx
 #endif
        jmp     .Lalltraps_checkusr
 IDTVEC_END(trap15)
@@ -457,7 +457,7 @@
        HANDLE_DEFERRED_FPU
 #endif /* XENPV */
 #ifdef DIAGNOSTIC
-       movl    CPUVAR(ILEVEL),%ebx
+       movzbl  CPUVAR(ILEVEL),%ebx
 #endif
        movq    %rsp,%rdi
        call    _C_LABEL(fputrap)
@@ -508,7 +508,7 @@
        ZTRAP_NJ(T_ASTFLT)
        INTRENTRY
 #ifdef DIAGNOSTIC
-       movl    CPUVAR(ILEVEL),%ebx
+       movzbl  CPUVAR(ILEVEL),%ebx
 #endif
        jmp     .Lalltraps_checkusr
 IDTVEC_END(intrspurious)
@@ -658,7 +658,7 @@
 
 calltrap:
 #ifdef DIAGNOSTIC
-       movl    CPUVAR(ILEVEL),%ebx
+       movzbl  CPUVAR(ILEVEL),%ebx
 #endif
        movq    %rsp,%rdi
        incq    CPUVAR(NTRAP)
@@ -688,7 +688,7 @@
 
 6:
 #ifdef DIAGNOSTIC
-       cmpl    CPUVAR(ILEVEL),%ebx
+       cmpb    CPUVAR(ILEVEL),%bl
        jne     .Lspl_error
 #endif
        INTRFASTEXIT
@@ -701,7 +701,7 @@
 .Lspl_error:
        STI(si)
        movabsq $4f,%rdi
-       movl    CPUVAR(ILEVEL),%esi
+       movzbl  CPUVAR(ILEVEL),%esi
        call    _C_LABEL(panic)
 4:     .asciz  "spl not lowered on trap exit, ilevel=%x"
 #endif
diff -r 6d37c94c90d9 -r a5293fcb521b sys/arch/amd64/amd64/genassym.cf
--- a/sys/arch/amd64/amd64/genassym.cf  Wed Sep 07 00:34:19 2022 +0000
+++ b/sys/arch/amd64/amd64/genassym.cf  Wed Sep 07 00:40:18 2022 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: genassym.cf,v 1.88 2022/08/20 23:48:50 riastradh Exp $
+#      $NetBSD: genassym.cf,v 1.89 2022/09/07 00:40:18 knakahara Exp $
 
 #
 # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -247,9 +247,9 @@
 define CPU_INFO_CURPRIORITY    offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
 
 define CPU_INFO_GDT            offsetof(struct cpu_info, ci_gdt)
-define CPU_INFO_ILEVEL         offsetof(struct cpu_info, ci_ilevel)
+define CPU_INFO_ILEVEL         (offsetof(struct cpu_info, ci_istate) + 7)
 define CPU_INFO_IDEPTH         offsetof(struct cpu_info, ci_idepth)
-define CPU_INFO_IPENDING       offsetof(struct cpu_info, ci_ipending)
+define CPU_INFO_IPENDING       offsetof(struct cpu_info, ci_istate)
 define CPU_INFO_IMASKED        offsetof(struct cpu_info, ci_imasked)
 define CPU_INFO_IMASK          offsetof(struct cpu_info, ci_imask)
 define CPU_INFO_IUNMASK        offsetof(struct cpu_info, ci_iunmask)
diff -r 6d37c94c90d9 -r a5293fcb521b sys/arch/amd64/amd64/lock_stubs.S
--- a/sys/arch/amd64/amd64/lock_stubs.S Wed Sep 07 00:34:19 2022 +0000
+++ b/sys/arch/amd64/amd64/lock_stubs.S Wed Sep 07 00:40:18 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: lock_stubs.S,v 1.36 2020/04/25 15:26:16 bouyer Exp $   */
+/*     $NetBSD: lock_stubs.S,v 1.37 2022/09/07 00:40:18 knakahara Exp $        */
 
 /*
  * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -91,11 +91,11 @@
  */
 ENTRY(mutex_spin_enter)
        movl    $1, %eax
-       movl    CPUVAR(ILEVEL), %esi
+       movzbl  CPUVAR(ILEVEL), %esi
        movzbl  MTX_IPL(%rdi), %ecx             /* new SPL */
        cmpl    %ecx, %esi                      /* higher? */
        cmovgl  %esi, %ecx
-       movl    %ecx, CPUVAR(ILEVEL)            /* splraiseipl() */
+       movb    %cl, CPUVAR(ILEVEL)             /* splraiseipl() */
        subl    %eax, CPUVAR(MTX_COUNT)         /* decl doesnt set CF */
        cmovncl CPUVAR(MTX_OLDSPL), %esi
        movl    %esi, CPUVAR(MTX_OLDSPL)
diff -r 6d37c94c90d9 -r a5293fcb521b sys/arch/amd64/amd64/locore.S
--- a/sys/arch/amd64/amd64/locore.S     Wed Sep 07 00:34:19 2022 +0000
+++ b/sys/arch/amd64/amd64/locore.S     Wed Sep 07 00:40:18 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.213 2022/06/12 11:36:42 bouyer Exp $      */
+/*     $NetBSD: locore.S,v 1.214 2022/09/07 00:40:18 knakahara Exp $   */
 
 /*
  * Copyright-o-rama!
@@ -1266,7 +1266,7 @@
 #ifndef XENPV
        /* Raise the IPL to IPL_HIGH. Dropping the priority is deferred until
         * mi_switch(), when cpu_switchto() returns. XXX Still needed? */
-       movl    $IPL_HIGH,CPUVAR(ILEVEL)
+       movb    $IPL_HIGH,CPUVAR(ILEVEL)
 
        /* The 32bit LWPs are handled differently. */
        testl   $PCB_COMPAT32,PCB_FLAGS(%r14)
@@ -1376,7 +1376,7 @@
        jnz     9f
 
 #ifdef DIAGNOSTIC
-       cmpl    $IPL_NONE,CPUVAR(ILEVEL)
+       cmpb    $IPL_NONE,CPUVAR(ILEVEL)
        jne     .Lspl_error
 #endif
 
@@ -1398,7 +1398,7 @@
 #ifdef DIAGNOSTIC
 .Lspl_error:
        movabsq $4f,%rdi
-       movl    CPUVAR(ILEVEL),%esi
+       movzbl  CPUVAR(ILEVEL),%esi
        call    _C_LABEL(panic)
 4:     .asciz  "spl not lowered on syscall, ilevel=%x"
 #endif
diff -r 6d37c94c90d9 -r a5293fcb521b sys/arch/amd64/amd64/spl.S
--- a/sys/arch/amd64/amd64/spl.S        Wed Sep 07 00:34:19 2022 +0000
+++ b/sys/arch/amd64/amd64/spl.S        Wed Sep 07 00:40:18 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: spl.S,v 1.47 2020/08/29 07:16:03 maxv Exp $    */
+/*     $NetBSD: spl.S,v 1.48 2022/09/07 00:40:18 knakahara Exp $       */
 
 /*
  * Copyright (c) 2003 Wasabi Systems, Inc.
@@ -83,10 +83,10 @@
  * int splraise(int s);
  */
 ENTRY(splraise)
-       movl    CPUVAR(ILEVEL),%eax
+       movzbl  CPUVAR(ILEVEL),%eax
        cmpl    %edi,%eax
        cmoval  %eax,%edi
-       movl    %edi,CPUVAR(ILEVEL)
+       movb    %dil,CPUVAR(ILEVEL)
        KMSAN_INIT_RET(4)
        ret
 END(splraise)
@@ -111,7 +111,7 @@
        pushq   %r14
        pushq   %r15
 
-       movl    $IPL_HIGH,CPUVAR(ILEVEL)
+       movb    $IPL_HIGH,CPUVAR(ILEVEL)
        movq    CPUVAR(CURLWP),%r15
        movq    IS_LWP(%rax),%rdi       /* switch to handler LWP */
        movq    L_PCB(%rdi),%rdx
@@ -188,7 +188,9 @@
  * Software interrupt registration.
  */
 ENTRY(softint_trigger)
-       orl     %edi,CPUVAR(IPENDING)   /* atomic on local cpu */
+       shlq    $8,%rdi                 /* clear upper 8 bits */
+       shrq    $8,%rdi
+       orq     %rdi,CPUVAR(IPENDING)   /* atomic on local cpu */
        ret
 END(softint_trigger)
 
@@ -198,7 +200,7 @@
  * Handles preemption interrupts via Xspllower().
  */
 IDTVEC(recurse_preempt)
-       movl    $IPL_PREEMPT,CPUVAR(ILEVEL)
+       movb    $IPL_PREEMPT,CPUVAR(ILEVEL)
        STI(di)
        xorq    %rdi,%rdi
        KMSAN_INIT_ARG(8)
@@ -213,7 +215,7 @@
  * Handles preemption interrupts via Xdoreti().
  */
 IDTVEC(resume_preempt)
-       movl    $IPL_PREEMPT,CPUVAR(ILEVEL)
+       movb    $IPL_PREEMPT,CPUVAR(ILEVEL)
        STI(ax)
        testq   $SEL_RPL,TF_CS(%rsp)
        jnz     1f
@@ -230,34 +232,32 @@
 
 /*
  * void spllower(int s);
- *
- * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
- *
- * edx : eax = old level / old ipending
- * ecx : ebx = new level / old ipending
  */
 ENTRY(spllower)
-       movl    CPUVAR(ILEVEL),%edx
-       movq    %rbx,%r8
+       movzbl  CPUVAR(ILEVEL),%edx
        cmpl    %edx,%edi                       /* new level is lower? */
        jae     1f
+       xorq    %rcx,%rcx                       /* rcx: ci_ipending mask */
+       notq    %rcx
+       shrq    $8,%rcx
+       movq    %rdi,%r9                        /* r9: shifted new level */
+       shlq    $56,%r9
 0:
-       movl    CPUVAR(IPENDING),%eax
-       movl    %edi,%ecx
-       testl   %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */
-       movl    %eax,%ebx
+       movq    CPUVAR(IPENDING),%rax
+       testq   %rax,CPUVAR(IUNMASK)(,%rdi,8)   /* deferred interrupts? */
        /*
         * On the P4 this jump is cheaper than patching in junk
         * using cmov.  Is cmpxchg expensive if it fails?
         */
        jnz     2f
-       cmpxchg8b CPUVAR(ISTATE)                /* swap in new ilevel */
+       movq    %rax,%r8
+       andq    %rcx,%r8
+       orq     %r9,%r8
+       cmpxchgq %r8,CPUVAR(ISTATE)             /* swap in new ilevel */
        jnz     0b
 1:
-       movq    %r8,%rbx
        ret
 2:
-       movq    %r8,%rbx
        jmp     _C_LABEL(Xspllower)
 END(spllower)
 
@@ -286,16 +286,16 @@
        leaq    1f(%rip),%r13           /* address to resume loop at */
 1:
        movl    %ebx,%eax               /* get cpl */
-       movl    CPUVAR(IUNMASK)(,%rax,4),%eax
+       movq    CPUVAR(IUNMASK)(,%rax,8),%rax
        CLI(si)
-       andl    CPUVAR(IPENDING),%eax   /* any non-masked bits left? */
+       andq    CPUVAR(IPENDING),%rax   /* any non-masked bits left? */
        jz      2f
-       bsrl    %eax,%eax
-       btrl    %eax,CPUVAR(IPENDING)
+       bsrq    %rax,%rax
+       btrq    %rax,CPUVAR(IPENDING)
        movq    CPUVAR(ISOURCES)(,%rax,8),%rax
        jmp     *IS_RECURSE(%rax)
 2:
-       movl    %ebx,CPUVAR(ILEVEL)
+       movb    %bl,CPUVAR(ILEVEL)
        STI(si)
        popq    %r12
        popq    %r13
@@ -318,16 +318,16 @@
        leaq    1f(%rip),%r13
 1:
        movl    %ebx,%eax
-       movl    CPUVAR(IUNMASK)(,%rax,4),%eax
+       movq    CPUVAR(IUNMASK)(,%rax,8),%rax
        CLI(si)
-       andl    CPUVAR(IPENDING),%eax
+       andq    CPUVAR(IPENDING),%rax
        jz      2f
-       bsrl    %eax,%eax               /* slow, but not worth optimizing */
-       btrl    %eax,CPUVAR(IPENDING)
+       bsrq    %rax,%rax               /* slow, but not worth optimizing */
+       btrq    %rax,CPUVAR(IPENDING)
        movq    CPUVAR(ISOURCES)(,%rax,8),%rax
        jmp     *IS_RESUME(%rax)
 2:     /* Check for ASTs on exit to user mode. */
-       movl    %ebx,CPUVAR(ILEVEL)
+       movb    %bl,CPUVAR(ILEVEL)



Home | Main Index | Thread Index | Old Index