Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src-draft/trunk]: src/sys/arch Draft fpu_kthread_enter/leave on x86.
details: https://anonhg.NetBSD.org/src-all/rev/08a86cf7e9ff
branches: trunk
changeset: 934739:08a86cf7e9ff
user: Taylor R Campbell <riastradh%NetBSD.org@localhost>
date: Thu Jun 04 03:34:45 2020 +0000
description:
Draft fpu_kthread_enter/leave on x86.
Only fit for kthreads, not user lwps. Preemptible, nestable.
diffstat:
sys/arch/amd64/include/proc.h | 1 +
sys/arch/i386/include/proc.h | 1 +
sys/arch/x86/include/fpu.h | 3 +
sys/arch/x86/x86/fpu.c | 85 +++++++++++++++++++++++++++++++++++++++++-
4 files changed, 87 insertions(+), 3 deletions(-)
diffs (161 lines):
diff -r 4a0394d9dc15 -r 08a86cf7e9ff sys/arch/amd64/include/proc.h
--- a/sys/arch/amd64/include/proc.h Wed Jun 17 20:00:14 2020 +0000
+++ b/sys/arch/amd64/include/proc.h Thu Jun 04 03:34:45 2020 +0000
@@ -55,6 +55,7 @@
#define MDL_COMPAT32 0x0008 /* i386, always return via iret */
#define MDL_IRET 0x0010 /* force return via iret, not sysret */
#define MDL_FPU_IN_CPU 0x0020 /* the FPU state is in the CPU */
+#define MDL_SYSTEM_FPU 0x0040 /* system thread is allowed FPU use */
struct mdproc {
int md_flags;
diff -r 4a0394d9dc15 -r 08a86cf7e9ff sys/arch/i386/include/proc.h
--- a/sys/arch/i386/include/proc.h Wed Jun 17 20:00:14 2020 +0000
+++ b/sys/arch/i386/include/proc.h Thu Jun 04 03:34:45 2020 +0000
@@ -44,6 +44,7 @@
struct vm_page;
#define MDL_FPU_IN_CPU 0x0020 /* the FPU state is in the CPU */
+#define MDL_SYSTEM_FPU 0x0040 /* system thread is allowed FPU use */
struct mdlwp {
volatile uint64_t md_tsc; /* last TSC reading */
diff -r 4a0394d9dc15 -r 08a86cf7e9ff sys/arch/x86/include/fpu.h
--- a/sys/arch/x86/include/fpu.h Wed Jun 17 20:00:14 2020 +0000
+++ b/sys/arch/x86/include/fpu.h Thu Jun 04 03:34:45 2020 +0000
@@ -33,6 +33,9 @@
void fpu_kern_enter(void);
void fpu_kern_leave(void);
+int fpu_kthread_enter(void);
+void fpu_kthread_leave(int);
+
void process_write_fpregs_xmm(struct lwp *, const struct fxsave *);
void process_write_fpregs_s87(struct lwp *, const struct save87 *);
diff -r 4a0394d9dc15 -r 08a86cf7e9ff sys/arch/x86/x86/fpu.c
--- a/sys/arch/x86/x86/fpu.c Wed Jun 17 20:00:14 2020 +0000
+++ b/sys/arch/x86/x86/fpu.c Thu Jun 04 03:34:45 2020 +0000
@@ -137,7 +137,8 @@
struct pcb *pcb = lwp_getpcb(l);
union savefpu *area = &pcb->pcb_savefpu;
- KASSERT((l->l_flag & LW_SYSTEM) == 0);
+ KASSERT((l->l_flag & LW_SYSTEM) == 0 ||
+ (l->l_md.md_flags & MDL_SYSTEM_FPU));
if (l == curlwp) {
fpu_save();
}
@@ -154,7 +155,8 @@
kpreempt_disable();
if (l->l_md.md_flags & MDL_FPU_IN_CPU) {
- KASSERT((l->l_flag & LW_SYSTEM) == 0);
+ KASSERT((l->l_flag & LW_SYSTEM) == 0 ||
+ (l->l_md.md_flags & MDL_SYSTEM_FPU));
fpu_area_save(area, x86_xsave_features);
l->l_md.md_flags &= ~MDL_FPU_IN_CPU;
}
@@ -343,6 +345,75 @@
/* -------------------------------------------------------------------------- */
+static const union savefpu zero_fpu __aligned(64);
+
+/*
+ * s = fpu_kthread_enter()
+ *
+ * Allow the current kthread to use the FPU without disabling
+ * preemption as fpu_kern_enter/leave do. Must not be used in a
+ * user lwp. When done, call fpu_kthread_leave(s). May be
+ * recursively nested.
+ *
+ * Must not be invoked while in a fpu_kern_enter/leave block.
+ */
+int
+fpu_kthread_enter(void)
+{
+ struct lwp *l = curlwp;
+ int system_fpu = l->l_md.md_flags & MDL_SYSTEM_FPU;
+
+ KASSERTMSG(l->l_flag & LW_SYSTEM,
+ "fpu_kthread_enter is allowed only in kthreads");
+ KASSERTMSG(curcpu()->ci_kfpu_spl == -1,
+ "fpu_kthread_enter is not allowed between fpu_kern_enter/leave");
+
+ if (!system_fpu) {
+ /*
+ * Notify the FPU fault handler to save the FPU state
+ * for us.
+ */
+ l->l_md.md_flags |= MDL_SYSTEM_FPU;
+
+ /* Clear CR0_TS to enable the FPU. */
+ clts();
+ }
+
+ return system_fpu;
+}
+
+/*
+ * fpu_kthread_leave(s)
+ *
+ * Return to the previous state of whether the current kthread can
+ * use the FPU without disabling preemption.
+ */
+void
+fpu_kthread_leave(int system_fpu)
+{
+ struct lwp *l = curlwp;
+
+ KASSERTMSG(l->l_flag & LW_SYSTEM,
+ "fpu_kthread_leave is allowed only in kthreads");
+ KASSERTMSG(l->l_md.md_flags & MDL_SYSTEM_FPU,
+ "fpu_kthread_leave without fpu_kthread_enter");
+
+ if (!system_fpu) {
+ /*
+ * Zero the fpu registers; otherwise we might leak
+ * secrets through Spectre-class attacks to userland,
+ * even if there are no bugs in fpu state management.
+ */
+ fpu_area_restore(&zero_fpu, x86_xsave_features);
+
+ /* Set CR0_TS to disable use of the FPU. */
+ stts();
+
+ /* Stop asking to save our FPU state. */
+ l->l_md.md_flags &= ~MDL_SYSTEM_FPU;
+ }
+}
+
/*
* fpu_kern_enter()
*
@@ -359,6 +430,10 @@
struct cpu_info *ci;
int s;
+ /* Nothing needed if we're in a kthread with FPU enabled. */
+ if (l->l_md.md_flags & MDL_SYSTEM_FPU)
+ return;
+
s = splhigh();
ci = curcpu();
@@ -392,10 +467,14 @@
void
fpu_kern_leave(void)
{
- static const union savefpu zero_fpu __aligned(64);
+ struct lwp *l = curlwp;
struct cpu_info *ci = curcpu();
int s;
+ /* Nothing needed if we're in a kthread with FPU enabled. */
+ if (l->l_md.md_flags & MDL_SYSTEM_FPU)
+ return;
+
KASSERT(ci->ci_ilevel == IPL_HIGH);
KASSERT(ci->ci_kfpu_spl != -1);
Home |
Main Index |
Thread Index |
Old Index