Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/dev/nvmm/x86 Optimize nvmm-intel, use inlined GCC assemb...



details:   https://anonhg.NetBSD.org/src/rev/eac83faefa54
branches:  trunk
changeset: 456211:eac83faefa54
user:      maxv <maxv%NetBSD.org@localhost>
date:      Sat Apr 27 08:16:19 2019 +0000

description:
Optimize nvmm-intel, use inlined GCC assembly rather than function calls.

diffstat:

 sys/dev/nvmm/x86/nvmm_x86_vmx.c     |  293 +++++++++++++++++++++--------------
 sys/dev/nvmm/x86/nvmm_x86_vmxfunc.S |  133 ++--------------
 2 files changed, 192 insertions(+), 234 deletions(-)

diffs (truncated from 747 to 300 lines):

diff -r 6a4db03a595f -r eac83faefa54 sys/dev/nvmm/x86/nvmm_x86_vmx.c
--- a/sys/dev/nvmm/x86/nvmm_x86_vmx.c   Sat Apr 27 06:18:15 2019 +0000
+++ b/sys/dev/nvmm/x86/nvmm_x86_vmx.c   Sat Apr 27 08:16:19 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nvmm_x86_vmx.c,v 1.27 2019/04/24 18:19:28 maxv Exp $   */
+/*     $NetBSD: nvmm_x86_vmx.c,v 1.28 2019/04/27 08:16:19 maxv Exp $   */
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.27 2019/04/24 18:19:28 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.28 2019/04/27 08:16:19 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -56,13 +56,6 @@
 
 int _vmx_vmxon(paddr_t *pa);
 int _vmx_vmxoff(void);
-int _vmx_invept(uint64_t op, void *desc);
-int _vmx_invvpid(uint64_t op, void *desc);
-int _vmx_vmread(uint64_t op, uint64_t *val);
-int _vmx_vmwrite(uint64_t op, uint64_t val);
-int _vmx_vmptrld(paddr_t *pa);
-int _vmx_vmptrst(paddr_t *pa);
-int _vmx_vmclear(paddr_t *pa);
 int vmx_vmlaunch(uint64_t *gprs);
 int vmx_vmresume(uint64_t *gprs);
 
@@ -74,34 +67,113 @@
        if (__predict_false(_vmx_vmxoff() != 0)) { \
                panic("%s: VMXOFF failed", __func__); \
        }
-#define vmx_invept(a, b) \
-       if (__predict_false(_vmx_invept(a, b) != 0)) { \
-               panic("%s: INVEPT failed", __func__); \
-       }
-#define vmx_invvpid(a, b) \
-       if (__predict_false(_vmx_invvpid(a, b) != 0)) { \
-               panic("%s: INVVPID failed", __func__); \
-       }
-#define vmx_vmread(a, b) \
-       if (__predict_false(_vmx_vmread(a, b) != 0)) { \
-               panic("%s: VMREAD failed", __func__); \
-       }
-#define vmx_vmwrite(a, b) \
-       if (__predict_false(_vmx_vmwrite(a, b) != 0)) { \
-               panic("%s: VMWRITE failed", __func__); \
-       }
-#define vmx_vmptrld(a) \
-       if (__predict_false(_vmx_vmptrld(a) != 0)) { \
-               panic("%s: VMPTRLD failed", __func__); \
-       }
-#define vmx_vmptrst(a) \
-       if (__predict_false(_vmx_vmptrst(a) != 0)) { \
-               panic("%s: VMPTRST failed", __func__); \
-       }
-#define vmx_vmclear(a) \
-       if (__predict_false(_vmx_vmclear(a) != 0)) { \
-               panic("%s: VMCLEAR failed", __func__); \
-       }
+
+struct ept_desc {
+       uint64_t eptp;
+       uint64_t mbz;
+} __packed;
+
+struct vpid_desc {
+       uint64_t vpid;
+       uint64_t addr;
+} __packed;
+
+static inline void
+vmx_invept(uint64_t op, struct ept_desc *desc)
+{
+       asm volatile (
+               "invept         %[desc],%[op];"
+               "jz             vmx_insn_failvalid;"
+               "jc             vmx_insn_failinvalid;"
+               :
+               : [desc] "m" (*desc), [op] "r" (op)
+               : "memory", "cc"
+       );
+}
+
+static inline void
+vmx_invvpid(uint64_t op, struct vpid_desc *desc)
+{
+       asm volatile (
+               "invvpid        %[desc],%[op];"
+               "jz             vmx_insn_failvalid;"
+               "jc             vmx_insn_failinvalid;"
+               :
+               : [desc] "m" (*desc), [op] "r" (op)
+               : "memory", "cc"
+       );
+}
+
+static inline uint64_t
+vmx_vmread(uint64_t field)
+{
+       uint64_t value;
+
+       asm volatile (
+               "vmread         %[field],%[value];"
+               "jz             vmx_insn_failvalid;"
+               "jc             vmx_insn_failinvalid;"
+               : [value] "=r" (value)
+               : [field] "r" (field)
+               : "cc"
+       );
+
+       return value;
+}
+
+static inline void
+vmx_vmwrite(uint64_t field, uint64_t value)
+{
+       asm volatile (
+               "vmwrite        %[value],%[field];"
+               "jz             vmx_insn_failvalid;"
+               "jc             vmx_insn_failinvalid;"
+               :
+               : [field] "r" (field), [value] "r" (value)
+               : "cc"
+       );
+}
+
+static inline paddr_t
+vmx_vmptrst(void)
+{
+       paddr_t pa;
+
+       asm volatile (
+               "vmptrst        %[pa];"
+               :
+               : [pa] "m" (*(paddr_t *)&pa)
+               : "memory"
+       );
+
+       return pa;
+}
+
+static inline void
+vmx_vmptrld(paddr_t *pa)
+{
+       asm volatile (
+               "vmptrld        %[pa];"
+               "jz             vmx_insn_failvalid;"
+               "jc             vmx_insn_failinvalid;"
+               :
+               : [pa] "m" (*pa)
+               : "memory", "cc"
+       );
+}
+
+static inline void
+vmx_vmclear(paddr_t *pa)
+{
+       asm volatile (
+               "vmclear        %[pa];"
+               "jz             vmx_insn_failvalid;"
+               "jc             vmx_insn_failinvalid;"
+               :
+               : [pa] "m" (*pa)
+               : "memory", "cc"
+       );
+}
 
 #define MSR_IA32_FEATURE_CONTROL       0x003A
 #define                IA32_FEATURE_CONTROL_LOCK       __BIT(0)
@@ -526,16 +598,6 @@
        uint64_t val;
 } __packed;
 
-struct ept_desc {
-       uint64_t eptp;
-       uint64_t mbz;
-} __packed;
-
-struct vpid_desc {
-       uint64_t vpid;
-       uint64_t addr;
-} __packed;
-
 #define VPID_MAX       0xFFFF
 
 /* Make sure we never run out of VPIDs. */
@@ -805,7 +867,7 @@
        if (cpudata->vmcs_refcnt > 1) {
 #ifdef DIAGNOSTIC
                KASSERT(kpreempt_disabled());
-               vmx_vmptrst(&oldpa);
+               oldpa = vmx_vmptrst();
                KASSERT(oldpa == cpudata->vmcs_pa);
 #endif
                return;
@@ -835,12 +897,10 @@
 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
 {
        struct vmx_cpudata *cpudata = vcpu->cpudata;
-       paddr_t oldpa __diagused;
 
        KASSERT(kpreempt_disabled());
 #ifdef DIAGNOSTIC
-       vmx_vmptrst(&oldpa);
-       KASSERT(oldpa == cpudata->vmcs_pa);
+       KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
 #endif
        KASSERT(cpudata->vmcs_refcnt > 0);
        cpudata->vmcs_refcnt--;
@@ -857,12 +917,10 @@
 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
 {
        struct vmx_cpudata *cpudata = vcpu->cpudata;
-       paddr_t oldpa __diagused;
 
        KASSERT(kpreempt_disabled());
 #ifdef DIAGNOSTIC
-       vmx_vmptrst(&oldpa);
-       KASSERT(oldpa == cpudata->vmcs_pa);
+       KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
 #endif
        KASSERT(cpudata->vmcs_refcnt == 1);
        cpudata->vmcs_refcnt--;
@@ -879,7 +937,7 @@
        struct vmx_cpudata *cpudata = vcpu->cpudata;
        uint64_t ctls1;
 
-       vmx_vmread(VMCS_PROCBASED_CTLS, &ctls1);
+       ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
 
        if (nmi) {
                // XXX INT_STATE_NMI?
@@ -899,7 +957,7 @@
        struct vmx_cpudata *cpudata = vcpu->cpudata;
        uint64_t ctls1;
 
-       vmx_vmread(VMCS_PROCBASED_CTLS, &ctls1);
+       ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
 
        if (nmi) {
                ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
@@ -950,7 +1008,7 @@
                if (event->vector == 2) {
                        type = INTR_TYPE_NMI;
                }
-               vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
+               intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
                if (type == INTR_TYPE_NMI) {
                        if (cpudata->nmi_window_exit) {
                                ret = EAGAIN;
@@ -958,7 +1016,7 @@
                        }
                        vmx_event_waitexit_enable(vcpu, true);
                } else {
-                       vmx_vmread(VMCS_GUEST_RFLAGS, &rflags);
+                       rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
                        if ((rflags & PSL_I) == 0 ||
                            (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0) {
                                vmx_event_waitexit_enable(vcpu, false);
@@ -1041,10 +1099,10 @@
         * Matters for guest-ring3, because it can execute 'cpuid' under a
         * debugger.
         */
-       vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
-       vmx_vmread(VMCS_GUEST_RIP, &rip);
+       inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
+       rip = vmx_vmread(VMCS_GUEST_RIP);
        vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
-       vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
+       intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
        vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
            intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
 }
@@ -1055,7 +1113,7 @@
 {
        uint64_t qual;
 
-       vmx_vmread(VMCS_EXIT_INTR_INFO, &qual);
+       qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
 
        if ((qual & INTR_INFO_VALID) == 0) {
                goto error;
@@ -1091,7 +1149,7 @@
                cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
 
                /* CPUID2_OSXSAVE depends on CR4. */
-               vmx_vmread(VMCS_GUEST_CR4, &cr4);
+               cr4 = vmx_vmread(VMCS_GUEST_CR4);
                if (!(cr4 & CR4_OSXSAVE)) {
                        cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
                }
@@ -1207,7 +1265,7 @@
        uint64_t rflags;
 
        if (cpudata->int_window_exit) {



Home | Main Index | Thread Index | Old Index