Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src Issue 64-bit versions of *XSAVE* for 64-bit amd64 programs



details:   https://anonhg.NetBSD.org/src/rev/7d3e85a919a1
branches:  trunk
changeset: 941563:7d3e85a919a1
user:      mgorny <mgorny%NetBSD.org@localhost>
date:      Sat Oct 24 07:14:29 2020 +0000

description:
Issue 64-bit versions of *XSAVE* for 64-bit amd64 programs

When calling FXSAVE, XSAVE, FXRSTOR, ... for 64-bit programs on amd64
use the 64-suffixed variant in order to include the complete FIP/FDP
registers in the x87 area.

The difference between the two variants is that the FXSAVE64 (new)
variant represents FIP/FDP as 64-bit fields (union fp_addr.fa_64),
while the legacy FXSAVE variant uses split fields: 32-bit offset,
16-bit segment and 16-bit reserved field (union fp_addr.fa_32).
The latter implies that the actual addresses are truncated to 32 bits
which is insufficient in modern programs.

The change is applied only to 64-bit programs on amd64.  Plain i386
and compat32 continue using plain FXSAVE.  Similarly, NVMM is not
changed as I am not familiar with that code.

This is a potentially breaking change.  However, I don't think it likely
to actually break anything because the data provided by the old variant
were not meaningful (because of the truncated pointer).

diffstat:

 sys/arch/x86/include/cpufunc.h         |  78 +++++++++++++++++++++++++++++++++-
 sys/arch/x86/include/fpu.h             |   6 +-
 sys/arch/x86/x86/fpu.c                 |  34 +++++++++-----
 sys/dev/nvmm/x86/nvmm_x86_svm.c        |  10 ++-
 sys/dev/nvmm/x86/nvmm_x86_vmx.c        |  10 ++-
 tests/lib/libc/sys/t_ptrace_x86_wait.h |   4 +-
 6 files changed, 114 insertions(+), 28 deletions(-)

diffs (truncated from 323 to 300 lines):

diff -r 96a8add7135f -r 7d3e85a919a1 sys/arch/x86/include/cpufunc.h
--- a/sys/arch/x86/include/cpufunc.h    Sat Oct 24 07:08:22 2020 +0000
+++ b/sys/arch/x86/include/cpufunc.h    Sat Oct 24 07:14:29 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpufunc.h,v 1.41 2020/06/15 09:09:23 msaitoh Exp $     */
+/*     $NetBSD: cpufunc.h,v 1.42 2020/10/24 07:14:29 mgorny Exp $      */
 
 /*
  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
@@ -485,6 +485,82 @@
        );
 }
 
+#ifdef __x86_64__
+static inline void
+fxsave64(void *addr)
+{
+       uint8_t *area = addr;
+
+       __asm volatile (
+               "fxsave64       %[area]"
+               : [area] "=m" (*area)
+               :
+               : "memory"
+       );
+}
+
+static inline void
+fxrstor64(const void *addr)
+{
+       const uint8_t *area = addr;
+
+       __asm volatile (
+               "fxrstor64 %[area]"
+               :
+               : [area] "m" (*area)
+               : "memory"
+       );
+}
+
+static inline void
+xsave64(void *addr, uint64_t mask)
+{
+       uint8_t *area = addr;
+       uint32_t low, high;
+
+       low = mask;
+       high = mask >> 32;
+       __asm volatile (
+               "xsave64        %[area]"
+               : [area] "=m" (*area)
+               : "a" (low), "d" (high)
+               : "memory"
+       );
+}
+
+static inline void
+xsaveopt64(void *addr, uint64_t mask)
+{
+       uint8_t *area = addr;
+       uint32_t low, high;
+
+       low = mask;
+       high = mask >> 32;
+       __asm volatile (
+               "xsaveopt64 %[area]"
+               : [area] "=m" (*area)
+               : "a" (low), "d" (high)
+               : "memory"
+       );
+}
+
+static inline void
+xrstor64(const void *addr, uint64_t mask)
+{
+       const uint8_t *area = addr;
+       uint32_t low, high;
+
+       low = mask;
+       high = mask >> 32;
+       __asm volatile (
+               "xrstor64 %[area]"
+               :
+               : [area] "m" (*area), "a" (low), "d" (high)
+               : "memory"
+       );
+}
+#endif
+
 /* -------------------------------------------------------------------------- */
 
 #ifdef XENPV
diff -r 96a8add7135f -r 7d3e85a919a1 sys/arch/x86/include/fpu.h
--- a/sys/arch/x86/include/fpu.h        Sat Oct 24 07:08:22 2020 +0000
+++ b/sys/arch/x86/include/fpu.h        Sat Oct 24 07:14:29 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: fpu.h,v 1.22 2020/10/15 17:40:14 mgorny Exp $  */
+/*     $NetBSD: fpu.h,v 1.23 2020/10/24 07:14:29 mgorny Exp $  */
 
 #ifndef        _X86_FPU_H_
 #define        _X86_FPU_H_
@@ -14,8 +14,8 @@
 void fpuinit(struct cpu_info *);
 void fpuinit_mxcsr_mask(void);
 
-void fpu_area_save(void *, uint64_t);
-void fpu_area_restore(const void *, uint64_t);
+void fpu_area_save(void *, uint64_t, bool);
+void fpu_area_restore(const void *, uint64_t, bool);
 
 void fpu_save(void);
 
diff -r 96a8add7135f -r 7d3e85a919a1 sys/arch/x86/x86/fpu.c
--- a/sys/arch/x86/x86/fpu.c    Sat Oct 24 07:08:22 2020 +0000
+++ b/sys/arch/x86/x86/fpu.c    Sat Oct 24 07:14:29 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: fpu.c,v 1.75 2020/10/15 17:40:14 mgorny Exp $  */
+/*     $NetBSD: fpu.c,v 1.76 2020/10/24 07:14:30 mgorny Exp $  */
 
 /*
  * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.  All
@@ -96,7 +96,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.75 2020/10/15 17:40:14 mgorny Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.76 2020/10/24 07:14:30 mgorny Exp $");
 
 #include "opt_multiprocessor.h"
 
@@ -156,7 +156,7 @@
        s = splvm();
        if (l->l_md.md_flags & MDL_FPU_IN_CPU) {
                KASSERT((l->l_flag & LW_SYSTEM) == 0);
-               fpu_area_save(area, x86_xsave_features);
+               fpu_area_save(area, x86_xsave_features, !(l->l_proc->p_flag & PK_32));
                l->l_md.md_flags &= ~MDL_FPU_IN_CPU;
        }
        splx(s);
@@ -246,21 +246,27 @@
        fldummy();
 }
 
+#ifdef __x86_64__
+#define XS64(x) (is_64bit ? x##64 : x)
+#else
+#define XS64(x) x
+#endif
+
 void
-fpu_area_save(void *area, uint64_t xsave_features)
+fpu_area_save(void *area, uint64_t xsave_features, bool is_64bit)
 {
        switch (x86_fpu_save) {
        case FPU_SAVE_FSAVE:
                fnsave(area);
                break;
        case FPU_SAVE_FXSAVE:
-               fxsave(area);
+               XS64(fxsave)(area);
                break;
        case FPU_SAVE_XSAVE:
-               xsave(area, xsave_features);
+               XS64(xsave)(area, xsave_features);
                break;
        case FPU_SAVE_XSAVEOPT:
-               xsaveopt(area, xsave_features);
+               XS64(xsaveopt)(area, xsave_features);
                break;
        }
 
@@ -268,7 +274,7 @@
 }
 
 void
-fpu_area_restore(const void *area, uint64_t xsave_features)
+fpu_area_restore(const void *area, uint64_t xsave_features, bool is_64bit)
 {
        clts();
 
@@ -279,13 +285,13 @@
        case FPU_SAVE_FXSAVE:
                if (cpu_vendor == CPUVENDOR_AMD)
                        fpu_errata_amd();
-               fxrstor(area);
+               XS64(fxrstor)(area);
                break;
        case FPU_SAVE_XSAVE:
        case FPU_SAVE_XSAVEOPT:
                if (cpu_vendor == CPUVENDOR_AMD)
                        fpu_errata_amd();
-               xrstor(area, xsave_features);
+               XS64(xrstor)(area, xsave_features);
                break;
        }
 }
@@ -294,7 +300,8 @@
 fpu_handle_deferred(void)
 {
        struct pcb *pcb = lwp_getpcb(curlwp);
-       fpu_area_restore(&pcb->pcb_savefpu, x86_xsave_features);
+       fpu_area_restore(&pcb->pcb_savefpu, x86_xsave_features,
+           !(curlwp->l_proc->p_flag & PK_32));
 }
 
 void
@@ -309,7 +316,8 @@
        if (oldlwp->l_md.md_flags & MDL_FPU_IN_CPU) {
                KASSERT(!(oldlwp->l_flag & LW_SYSTEM));
                pcb = lwp_getpcb(oldlwp);
-               fpu_area_save(&pcb->pcb_savefpu, x86_xsave_features);
+               fpu_area_save(&pcb->pcb_savefpu, x86_xsave_features,
+                   !(oldlwp->l_proc->p_flag & PK_32));
                oldlwp->l_md.md_flags &= ~MDL_FPU_IN_CPU;
        }
        KASSERT(!(newlwp->l_md.md_flags & MDL_FPU_IN_CPU));
@@ -413,7 +421,7 @@
         * through Spectre-class attacks to userland, even if there are
         * no bugs in fpu state management.
         */
-       fpu_area_restore(&zero_fpu, x86_xsave_features);
+       fpu_area_restore(&zero_fpu, x86_xsave_features, false);
 
        /*
         * Set CR0_TS again so that the kernel can't accidentally use
diff -r 96a8add7135f -r 7d3e85a919a1 sys/dev/nvmm/x86/nvmm_x86_svm.c
--- a/sys/dev/nvmm/x86/nvmm_x86_svm.c   Sat Oct 24 07:08:22 2020 +0000
+++ b/sys/dev/nvmm/x86/nvmm_x86_svm.c   Sat Oct 24 07:14:29 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nvmm_x86_svm.c,v 1.81 2020/09/08 17:02:03 maxv Exp $   */
+/*     $NetBSD: nvmm_x86_svm.c,v 1.82 2020/10/24 07:14:30 mgorny Exp $ */
 
 /*
  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.81 2020/09/08 17:02:03 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.82 2020/10/24 07:14:30 mgorny Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1351,7 +1351,8 @@
        struct svm_cpudata *cpudata = vcpu->cpudata;
 
        fpu_kern_enter();
-       fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
+       /* TODO: should we use *XSAVE64 here? */
+       fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask, false);
 
        if (svm_xcr0_mask != 0) {
                cpudata->hxcr0 = rdxcr(0);
@@ -1369,7 +1370,8 @@
                wrxcr(0, cpudata->hxcr0);
        }
 
-       fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
+       /* TODO: should we use *XSAVE64 here? */
+       fpu_area_save(&cpudata->gfpu, svm_xcr0_mask, false);
        fpu_kern_leave();
 }
 
diff -r 96a8add7135f -r 7d3e85a919a1 sys/dev/nvmm/x86/nvmm_x86_vmx.c
--- a/sys/dev/nvmm/x86/nvmm_x86_vmx.c   Sat Oct 24 07:08:22 2020 +0000
+++ b/sys/dev/nvmm/x86/nvmm_x86_vmx.c   Sat Oct 24 07:14:29 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nvmm_x86_vmx.c,v 1.80 2020/09/08 17:02:03 maxv Exp $   */
+/*     $NetBSD: nvmm_x86_vmx.c,v 1.81 2020/10/24 07:14:30 mgorny Exp $ */
 
 /*
  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.80 2020/09/08 17:02:03 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.81 2020/10/24 07:14:30 mgorny Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -2014,7 +2014,8 @@
        struct vmx_cpudata *cpudata = vcpu->cpudata;
 
        fpu_kern_enter();
-       fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
+       /* TODO: should we use *XSAVE64 here? */
+       fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask, false);
 
        if (vmx_xcr0_mask != 0) {
                cpudata->hxcr0 = rdxcr(0);
@@ -2032,7 +2033,8 @@
                wrxcr(0, cpudata->hxcr0);
        }
 
-       fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
+       /* TODO: should we use *XSAVE64 here? */
+       fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask, false);
        fpu_kern_leave();
 }



Home | Main Index | Thread Index | Old Index