Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Remove the LazyFPU code, as posted 5 months ago on ...



details:   https://anonhg.NetBSD.org/src/rev/7ab6c94237f0
branches:  trunk
changeset: 459960:7ab6c94237f0
user:      maxv <maxv%NetBSD.org@localhost>
date:      Thu Oct 03 05:06:29 2019 +0000

description:
Remove the LazyFPU code, as posted 5 months ago on port-amd64@.

diffstat:

 sys/arch/amd64/amd64/locore.S  |    7 +-
 sys/arch/i386/i386/locore.S    |    9 +-
 sys/arch/x86/include/cpu.h     |    3 +-
 sys/arch/x86/x86/fpu.c         |  190 ++--------------------------------------
 sys/arch/x86/x86/identcpu.c    |    5 +-
 sys/arch/x86/x86/x86_machdep.c |    7 +-
 6 files changed, 21 insertions(+), 200 deletions(-)

diffs (truncated from 379 to 300 lines):

diff -r b4bce0890508 -r 7ab6c94237f0 sys/arch/amd64/amd64/locore.S
--- a/sys/arch/amd64/amd64/locore.S     Thu Oct 03 03:10:02 2019 +0000
+++ b/sys/arch/amd64/amd64/locore.S     Thu Oct 03 05:06:29 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.186 2019/08/21 16:35:10 maxv Exp $        */
+/*     $NetBSD: locore.S,v 1.187 2019/10/03 05:06:29 maxv Exp $        */
 
 /*
  * Copyright-o-rama!
@@ -1147,13 +1147,10 @@
        movq    %r12,%rsi
        callq   _C_LABEL(x86_dbregs_switch)
 
-       movb    _C_LABEL(x86_fpu_eager),%dl
-       testb   %dl,%dl
-       jz      .Lno_eagerfpu
+       /* Switch the FPU. */
        movq    %r13,%rdi
        movq    %r12,%rsi
        callq   _C_LABEL(fpu_eagerswitch)
-.Lno_eagerfpu:
 
        /* Don't bother with the rest if switching to a system process. */
        testl   $LW_SYSTEM,L_FLAG(%r12)
diff -r b4bce0890508 -r 7ab6c94237f0 sys/arch/i386/i386/locore.S
--- a/sys/arch/i386/i386/locore.S       Thu Oct 03 03:10:02 2019 +0000
+++ b/sys/arch/i386/i386/locore.S       Thu Oct 03 05:06:29 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.168 2019/05/19 08:17:02 maxv Exp $        */
+/*     $NetBSD: locore.S,v 1.169 2019/10/03 05:06:29 maxv Exp $        */
 
 /*
  * Copyright-o-rama!
@@ -128,7 +128,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.168 2019/05/19 08:17:02 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.169 2019/10/03 05:06:29 maxv Exp $");
 
 #include "opt_copy_symtab.h"
 #include "opt_ddb.h"
@@ -1137,15 +1137,12 @@
        call    _C_LABEL(x86_dbregs_switch)
        addl    $8,%esp
 
+       /* Switch the FPU. */
        pushl   %edx
-       movb    _C_LABEL(x86_fpu_eager),%dl
-       testb   %dl,%dl
-       jz      .Lno_eagerfpu
        pushl   %edi
        pushl   %esi
        call    _C_LABEL(fpu_eagerswitch)
        addl    $8,%esp
-.Lno_eagerfpu:
        popl    %edx
 
        /* Don't bother with the rest if switching to a system process. */
diff -r b4bce0890508 -r 7ab6c94237f0 sys/arch/x86/include/cpu.h
--- a/sys/arch/x86/include/cpu.h        Thu Oct 03 03:10:02 2019 +0000
+++ b/sys/arch/x86/include/cpu.h        Thu Oct 03 05:06:29 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu.h,v 1.108 2019/08/07 06:23:48 maxv Exp $   */
+/*     $NetBSD: cpu.h,v 1.109 2019/10/03 05:06:29 maxv Exp $   */
 
 /*
  * Copyright (c) 1990 The Regents of the University of California.
@@ -462,7 +462,6 @@
 extern size_t x86_xsave_offsets[];
 extern size_t x86_xsave_sizes[];
 extern uint32_t x86_fpu_mxcsr_mask;
-extern bool x86_fpu_eager;
 
 extern void (*x86_cpu_idle)(void);
 #define        cpu_idle() (*x86_cpu_idle)()
diff -r b4bce0890508 -r 7ab6c94237f0 sys/arch/x86/x86/fpu.c
--- a/sys/arch/x86/x86/fpu.c    Thu Oct 03 03:10:02 2019 +0000
+++ b/sys/arch/x86/x86/fpu.c    Thu Oct 03 05:06:29 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: fpu.c,v 1.55 2019/07/05 17:08:56 maxv Exp $    */
+/*     $NetBSD: fpu.c,v 1.56 2019/10/03 05:06:29 maxv Exp $    */
 
 /*
  * Copyright (c) 2008 The NetBSD Foundation, Inc.  All
@@ -96,7 +96,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.55 2019/07/05 17:08:56 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.56 2019/10/03 05:06:29 maxv Exp $");
 
 #include "opt_multiprocessor.h"
 
@@ -127,7 +127,6 @@
 #endif
 
 uint32_t x86_fpu_mxcsr_mask __read_mostly = 0;
-bool x86_fpu_eager __read_mostly = false;
 
 static inline union savefpu *
 lwp_fpuarea(struct lwp *l)
@@ -440,20 +439,13 @@
 }
 
 /*
- * Implement device not available (DNA) exception.
- *
- * If we were the last lwp to use the FPU, we can simply return.
- * Otherwise, we save the previous state, if necessary, and restore
- * our last saved state.
- *
- * Called directly from the trap 0x13 entry with interrupts still disabled.
+ * Implement device not available (DNA) exception. Called with interrupts still
+ * disabled.
  */
 void
 fpudna(struct trapframe *frame)
 {
        struct cpu_info *ci = curcpu();
-       struct lwp *l, *fl;
-       struct pcb *pcb;
        int s;
 
        if (!USERMODE(frame->tf_cs)) {
@@ -461,55 +453,9 @@
                    (void *)X86_TF_RIP(frame), frame);
        }
 
+       /* Install the LWP's FPU state. */
        s = splhigh();
-
-       /* Save state on current CPU. */
-       l = ci->ci_curlwp;
-       pcb = lwp_getpcb(l);
-       fl = ci->ci_fpcurlwp;
-       if (fl != NULL) {
-               if (__predict_false(x86_fpu_eager)) {
-                       panic("%s: FPU busy with EagerFPU enabled",
-                           __func__);
-               }
-
-               /*
-                * It seems we can get here on Xen even if we didn't
-                * switch lwp.  In this case do nothing
-                */
-               if (fl == l) {
-                       KASSERT(pcb->pcb_fpcpu == ci);
-                       clts();
-                       splx(s);
-                       return;
-               }
-               fpusave_cpu(true);
-       }
-
-       /* Save our state if on a remote CPU. */
-       if (pcb->pcb_fpcpu != NULL) {
-               if (__predict_false(x86_fpu_eager)) {
-                       panic("%s: LWP busy with EagerFPU enabled",
-                           __func__);
-               }
-
-               /* Explicitly disable preemption before dropping spl. */
-               kpreempt_disable();
-               splx(s);
-
-               /* Actually enable interrupts */
-               x86_enable_intr();
-
-               fpusave_lwp(l, true);
-               KASSERT(pcb->pcb_fpcpu == NULL);
-               s = splhigh();
-               kpreempt_enable();
-       }
-
-       /* Install the LWP's FPU state. */
-       fpu_lwp_install(l);
-
-       KASSERT(ci == curcpu());
+       fpu_lwp_install(ci->ci_curlwp);
        splx(s);
 }
 
@@ -625,14 +571,9 @@
        pcb = lwp_getpcb(l);
 
        s = splhigh();
-       if (x86_fpu_eager) {
-               KASSERT(pcb->pcb_fpcpu == NULL ||
-                   pcb->pcb_fpcpu == curcpu());
-               fpusave_cpu(false);
-       } else {
-               splx(s);
-               fpusave_lwp(l, false);
-       }
+
+       KASSERT(pcb->pcb_fpcpu == NULL || pcb->pcb_fpcpu == curcpu());
+       fpusave_cpu(false);
        KASSERT(pcb->pcb_fpcpu == NULL);
 
        switch (x86_fpu_save) {
@@ -667,10 +608,8 @@
 
        pcb->pcb_fpu_dflt_cw = x87_cw;
 
-       if (x86_fpu_eager) {
-               fpu_lwp_install(l);
-               splx(s);
-       }
+       fpu_lwp_install(l);
+       splx(s);
 }
 
 void
@@ -1073,110 +1012,3 @@
 
        return 0;
 }
-
-/* -------------------------------------------------------------------------- */
-
-static volatile unsigned long eagerfpu_cpu_barrier1 __cacheline_aligned;
-static volatile unsigned long eagerfpu_cpu_barrier2 __cacheline_aligned;
-
-static void
-eager_change_cpu(void *arg1, void *arg2)
-{
-       struct cpu_info *ci = curcpu();
-       bool enabled = (bool)arg1;
-       int s;
-
-       s = splhigh();
-
-       /* Rendez-vous 1. */
-       atomic_dec_ulong(&eagerfpu_cpu_barrier1);
-       while (atomic_cas_ulong(&eagerfpu_cpu_barrier1, 0, 0) != 0) {
-               x86_pause();
-       }
-
-       fpusave_cpu(true);
-       if (ci == &cpu_info_primary) {
-               x86_fpu_eager = enabled;
-       }
-
-       /* Rendez-vous 2. */
-       atomic_dec_ulong(&eagerfpu_cpu_barrier2);
-       while (atomic_cas_ulong(&eagerfpu_cpu_barrier2, 0, 0) != 0) {
-               x86_pause();
-       }
-
-       splx(s);
-}
-
-static int
-eager_change(bool enabled)
-{
-       struct cpu_info *ci = NULL;
-       CPU_INFO_ITERATOR cii;
-       uint64_t xc;
-
-       mutex_enter(&cpu_lock);
-
-       /*
-        * We expect all the CPUs to be online.
-        */
-       for (CPU_INFO_FOREACH(cii, ci)) {
-               struct schedstate_percpu *spc = &ci->ci_schedstate;
-               if (spc->spc_flags & SPCF_OFFLINE) {
-                       printf("[!] cpu%d offline, EagerFPU not changed\n",
-                           cpu_index(ci));
-                       mutex_exit(&cpu_lock);
-                       return EOPNOTSUPP;
-               }
-       }
-
-       /* Initialize the barriers */
-       eagerfpu_cpu_barrier1 = ncpu;
-       eagerfpu_cpu_barrier2 = ncpu;
-
-       printf("[+] %s EagerFPU...",
-           enabled ? "Enabling" : "Disabling");
-       xc = xc_broadcast(0, eager_change_cpu,
-           (void *)enabled, NULL);
-       xc_wait(xc);
-       printf(" done!\n");
-
-       mutex_exit(&cpu_lock);
-
-       return 0;
-}
-
-static int
-sysctl_machdep_fpu_eager(SYSCTLFN_ARGS)
-{
-       struct sysctlnode node;
-       int error;
-       bool val;
-
-       val = *(bool *)rnode->sysctl_data;
-



Home | Main Index | Thread Index | Old Index