Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86/x86 Several changes:
details: https://anonhg.NetBSD.org/src/rev/2e1a231cf5b9
branches: trunk
changeset: 322887:2e1a231cf5b9
user: maxv <maxv%NetBSD.org@localhost>
date: Tue May 22 09:25:58 2018 +0000
description:
Several changes:
- Move the sysctl initialization code into spectre.c. This way each
variable is local. Rename the variables, use shorter names.
- Use mitigation methods for SpectreV4, like SpectreV2. There are
several available on AMD (that we don't support yet). Add a "method"
leaf.
- Make SSB_NO a mitigation method by itself. This way we report as
"mitigated" a CPU that is not affected by SpectreV4. In this case,
of course, the user can't enable/disable the mitigation. Drop the
"affected" sysctl leaf.
diffstat:
sys/arch/x86/x86/spectre.c | 304 ++++++++++++++++++++++++++++------------
sys/arch/x86/x86/x86_machdep.c | 85 +----------
2 files changed, 216 insertions(+), 173 deletions(-)
diffs (truncated from 643 to 300 lines):
diff -r 5153ce884ebd -r 2e1a231cf5b9 sys/arch/x86/x86/spectre.c
--- a/sys/arch/x86/x86/spectre.c Tue May 22 08:15:26 2018 +0000
+++ b/sys/arch/x86/x86/spectre.c Tue May 22 09:25:58 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: spectre.c,v 1.13 2018/05/22 08:15:26 maxv Exp $ */
+/* $NetBSD: spectre.c,v 1.14 2018/05/22 09:25:58 maxv Exp $ */
/*
* Copyright (c) 2018 NetBSD Foundation, Inc.
@@ -30,11 +30,11 @@
*/
/*
- * Mitigations for the Spectre V2 CPU flaw.
+ * Mitigations for the SpectreV2 and SpectreV4 CPU flaws.
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.13 2018/05/22 08:15:26 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.14 2018/05/22 09:25:58 maxv Exp $");
#include "opt_spectre.h"
@@ -51,18 +51,31 @@
#include <x86/cputypes.h>
-enum spec_mitigation {
- MITIGATION_NONE,
- MITIGATION_AMD_DIS_IND,
- MITIGATION_INTEL_IBRS
+enum v2_mitigation {
+ V2_MITIGATION_NONE,
+ V2_MITIGATION_AMD_DIS_IND,
+ V2_MITIGATION_INTEL_IBRS
+};
+
+enum v4_mitigation {
+ V4_MITIGATION_NONE,
+ V4_MITIGATION_INTEL_SSBD,
+ V4_MITIGATION_INTEL_SSB_NO
};
-bool spec_v2_mitigation_enabled __read_mostly = false;
-static enum spec_mitigation mitigation_v2_method = MITIGATION_NONE;
-char spec_v2_mitigation_name[64] = "(none)";
+static enum v2_mitigation v2_mitigation_method = V2_MITIGATION_NONE;
+static enum v4_mitigation v4_mitigation_method = V4_MITIGATION_NONE;
+
+static bool v2_mitigation_enabled __read_mostly = false;
+static bool v4_mitigation_enabled __read_mostly = false;
+
+static char v2_mitigation_name[64] = "(none)";
+static char v4_mitigation_name[64] = "(none)";
+
+/* --------------------------------------------------------------------- */
static void
-spec_v2_set_name(void)
+v2_set_name(void)
{
char name[64] = "";
size_t nmitig = 0;
@@ -72,17 +85,17 @@
nmitig++;
#endif
- if (!spec_v2_mitigation_enabled) {
+ if (!v2_mitigation_enabled) {
if (nmitig == 0)
strlcat(name, "(none)", sizeof(name));
} else {
if (nmitig)
strlcat(name, " + ", sizeof(name));
- switch (mitigation_v2_method) {
- case MITIGATION_AMD_DIS_IND:
+ switch (v2_mitigation_method) {
+ case V2_MITIGATION_AMD_DIS_IND:
strlcat(name, "[AMD DIS_IND]", sizeof(name));
break;
- case MITIGATION_INTEL_IBRS:
+ case V2_MITIGATION_INTEL_IBRS:
strlcat(name, "[Intel IBRS]", sizeof(name));
break;
default:
@@ -90,12 +103,12 @@
}
}
- strlcpy(spec_v2_mitigation_name, name,
- sizeof(spec_v2_mitigation_name));
+ strlcpy(v2_mitigation_name, name,
+ sizeof(v2_mitigation_name));
}
static void
-spec_v2_detect_method(void)
+v2_detect_method(void)
{
struct cpu_info *ci = curcpu();
u_int descs[4];
@@ -106,15 +119,15 @@
if (descs[3] & CPUID_SEF_IBRS) {
/* descs[3] = %edx */
#ifdef __x86_64__
- mitigation_v2_method = MITIGATION_INTEL_IBRS;
+ v2_mitigation_method = V2_MITIGATION_INTEL_IBRS;
#else
/* IBRS not supported on i386. */
- mitigation_v2_method = MITIGATION_NONE;
+ v2_mitigation_method = V2_MITIGATION_NONE;
#endif
return;
}
}
- mitigation_v2_method = MITIGATION_NONE;
+ v2_mitigation_method = V2_MITIGATION_NONE;
} else if (cpu_vendor == CPUVENDOR_AMD) {
/*
* The AMD Family 10h manual documents the IC_CFG.DIS_IND bit.
@@ -127,14 +140,14 @@
case 0x10:
case 0x12:
case 0x16:
- mitigation_v2_method = MITIGATION_AMD_DIS_IND;
+ v2_mitigation_method = V2_MITIGATION_AMD_DIS_IND;
break;
default:
- mitigation_v2_method = MITIGATION_NONE;
+ v2_mitigation_method = V2_MITIGATION_NONE;
break;
}
} else {
- mitigation_v2_method = MITIGATION_NONE;
+ v2_mitigation_method = V2_MITIGATION_NONE;
}
}
@@ -208,10 +221,10 @@
{
uint64_t msr;
- switch (mitigation_v2_method) {
- case MITIGATION_NONE:
+ switch (v2_mitigation_method) {
+ case V2_MITIGATION_NONE:
panic("impossible");
- case MITIGATION_INTEL_IBRS:
+ case V2_MITIGATION_INTEL_IBRS:
/* cpu0 is the one that does the hotpatch job */
if (ci == &cpu_info_primary) {
if (enabled) {
@@ -224,7 +237,7 @@
wrmsr(MSR_IA32_SPEC_CTRL, 0);
}
break;
- case MITIGATION_AMD_DIS_IND:
+ case V2_MITIGATION_AMD_DIS_IND:
msr = rdmsr(MSR_IC_CFG);
if (enabled) {
msr |= IC_CFG_DIS_IND;
@@ -247,7 +260,7 @@
u_long psl = 0;
/* Rendez-vous 1 (IBRS only). */
- if (mitigation_v2_method == MITIGATION_INTEL_IBRS) {
+ if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) {
psl = x86_read_psl();
x86_disable_intr();
@@ -260,7 +273,7 @@
mitigation_v2_apply_cpu(ci, enabled);
/* Rendez-vous 2 (IBRS only). */
- if (mitigation_v2_method == MITIGATION_INTEL_IBRS) {
+ if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) {
atomic_dec_ulong(&ibrs_cpu_barrier2);
while (atomic_cas_ulong(&ibrs_cpu_barrier2, 0, 0) != 0) {
x86_pause();
@@ -281,7 +294,7 @@
CPU_INFO_ITERATOR cii;
uint64_t xc;
- spec_v2_detect_method();
+ v2_detect_method();
mutex_enter(&cpu_lock);
@@ -298,13 +311,13 @@
}
}
- switch (mitigation_v2_method) {
- case MITIGATION_NONE:
+ switch (v2_mitigation_method) {
+ case V2_MITIGATION_NONE:
printf("[!] No mitigation available\n");
mutex_exit(&cpu_lock);
return EOPNOTSUPP;
- case MITIGATION_AMD_DIS_IND:
- case MITIGATION_INTEL_IBRS:
+ case V2_MITIGATION_AMD_DIS_IND:
+ case V2_MITIGATION_INTEL_IBRS:
/* Initialize the barriers */
ibrs_cpu_barrier1 = ncpu;
ibrs_cpu_barrier2 = ncpu;
@@ -315,18 +328,16 @@
(void *)enabled, NULL);
xc_wait(xc);
printf(" done!\n");
- spec_v2_mitigation_enabled = enabled;
+ v2_mitigation_enabled = enabled;
mutex_exit(&cpu_lock);
- spec_v2_set_name();
+ v2_set_name();
return 0;
default:
panic("impossible");
}
}
-int sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS);
-
-int
+static int
sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS)
{
struct sysctlnode node;
@@ -343,12 +354,12 @@
return error;
if (val == 0) {
- if (!spec_v2_mitigation_enabled)
+ if (!v2_mitigation_enabled)
error = 0;
else
error = mitigation_v2_change(false);
} else {
- if (spec_v2_mitigation_enabled)
+ if (v2_mitigation_enabled)
error = 0;
else
error = mitigation_v2_change(true);
@@ -359,43 +370,60 @@
/* -------------------------------------------------------------------------- */
-bool spec_v4_mitigation_enabled __read_mostly = false;
-bool spec_v4_affected __read_mostly = true;
-
-int sysctl_machdep_spectreV4_mitigated(SYSCTLFN_ARGS);
-
-static bool ssbd_needed(void)
+static void
+v4_set_name(void)
{
- uint64_t msr;
+ char name[64] = "";
- if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) {
- msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
- if (msr & IA32_ARCH_SSB_NO) {
- /*
- * The processor indicates it is not vulnerable to the
- * Speculative Store Bypass (SpectreV4) flaw.
- */
- return false;
+ if (!v4_mitigation_enabled) {
+ strlcat(name, "(none)", sizeof(name));
+ } else {
+ switch (v4_mitigation_method) {
+ case V4_MITIGATION_INTEL_SSBD:
+ strlcat(name, "[Intel SSBD]", sizeof(name));
+ break;
+ case V4_MITIGATION_INTEL_SSB_NO:
+ strlcat(name, "[Intel SSB_NO]", sizeof(name));
+ break;
+ default:
+ panic("%s: impossible", __func__);
}
}
- return true;
+ strlcpy(v4_mitigation_name, name,
+ sizeof(v4_mitigation_name));
}
-static bool ssbd_supported(void)
+static void
+v4_detect_method(void)
{
u_int descs[4];
+ uint64_t msr;
if (cpu_vendor == CPUVENDOR_INTEL) {
+ if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) {
+ msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
+ if (msr & IA32_ARCH_SSB_NO) {
+ /*
+ * The processor indicates it is not vulnerable
Home |
Main Index |
Thread Index |
Old Index