Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src-draft/trunk]: src/sys/arch/aarch64 Draft fpu_kern_enter/leave on aarch64.
details: https://anonhg.NetBSD.org/src-all/rev/1c320bff3ccc
branches: trunk
changeset: 934605:1c320bff3ccc
user: Taylor R Campbell <riastradh%NetBSD.org@localhost>
date: Thu Jun 04 03:23:00 2020 +0000
description:
Draft fpu_kern_enter/leave on aarch64.
diffstat:
sys/arch/aarch64/aarch64/cpu.c | 2 +
sys/arch/aarch64/aarch64/fpu.c | 66 ++++++++++++++++++++++++++++++++++++++
sys/arch/aarch64/include/cpu.h | 2 +
sys/arch/aarch64/include/machdep.h | 5 ++-
4 files changed, 74 insertions(+), 1 deletions(-)
diffs (120 lines):
diff -r 9b8cc827186b -r 1c320bff3ccc sys/arch/aarch64/aarch64/cpu.c
--- a/sys/arch/aarch64/aarch64/cpu.c Sun Jun 14 18:06:43 2020 +0000
+++ b/sys/arch/aarch64/aarch64/cpu.c Thu Jun 04 03:23:00 2020 +0000
@@ -133,6 +133,8 @@
ci->ci_dev = dv;
dv->dv_private = ci;
+ ci->ci_kfpu_spl = -1;
+
arm_cpu_do_topology(ci);
cpu_identify(ci->ci_dev, ci);
diff -r 9b8cc827186b -r 1c320bff3ccc sys/arch/aarch64/aarch64/fpu.c
--- a/sys/arch/aarch64/aarch64/fpu.c Sun Jun 14 18:06:43 2020 +0000
+++ b/sys/arch/aarch64/aarch64/fpu.c Thu Jun 04 03:23:00 2020 +0000
@@ -38,6 +38,7 @@
#include <sys/lwp.h>
#include <sys/evcnt.h>
+#include <aarch64/locore.h>
#include <aarch64/reg.h>
#include <aarch64/pcb.h>
#include <aarch64/armreg.h>
@@ -172,3 +173,68 @@
reg_cpacr_el1_write(CPACR_FPEN_NONE);
__asm __volatile ("isb");
}
+
+void
+fpu_kern_enter(void)
+{
+ struct lwp *l = curlwp;
+ struct cpu_info *ci;
+ int s;
+
+ /*
+ * Block all interrupts. We must block preemption since -- if
+ * this is a user thread -- there is nowhere to save the kernel
+ * fpu state, and if we want this to be usable in interrupts,
+ * we can't let interrupts interfere with the fpu state in use
+ * since there's nowhere for them to save it.
+ */
+ s = splhigh();
+ ci = curcpu();
+ KASSERT(ci->ci_kfpu_spl == -1);
+ ci->ci_kfpu_spl = s;
+
+ /*
+ * If we are in a softint and have a pinned lwp, the fpu state
+ * is that of the pinned lwp, so save it there.
+ */
+ if ((l->l_pflag & LP_INTR) && (l->l_switchto != NULL))
+ l = l->l_switchto;
+ if (fpu_used_p(l))
+ fpu_save(l);
+
+ /*
+ * Enable the fpu, and wait until it is enabled before
+ * executing any further instructions.
+ */
+ reg_cpacr_el1_write(CPACR_FPEN_ALL);
+ arm_isb();
+}
+
+void
+fpu_kern_leave(void)
+{
+ static const struct fpreg zero_fpreg;
+ struct cpu_info *ci = curcpu();
+ int s;
+
+ KASSERT(ci->ci_cpl == IPL_HIGH);
+ KASSERT(ci->ci_kfpu_spl != -1);
+
+ /*
+ * Zero the fpu registers; otherwise we might leak secrets
+ * through Spectre-class attacks to userland, even if there are
+ * no bugs in fpu state management.
+ */
+ load_fpregs(&zero_fpreg);
+
+ /*
+ * Disable the fpu so that the kernel can't accidentally use
+ * it again.
+ */
+ reg_cpacr_el1_write(CPACR_FPEN_NONE);
+ arm_isb();
+
+ s = ci->ci_kfpu_spl;
+ ci->ci_kfpu_spl = -1;
+ splx(s);
+}
diff -r 9b8cc827186b -r 1c320bff3ccc sys/arch/aarch64/include/cpu.h
--- a/sys/arch/aarch64/include/cpu.h Sun Jun 14 18:06:43 2020 +0000
+++ b/sys/arch/aarch64/include/cpu.h Thu Jun 04 03:23:00 2020 +0000
@@ -89,6 +89,8 @@
volatile u_int ci_astpending;
volatile u_int ci_intr_depth;
+ int ci_kfpu_spl;
+
/* event counters */
struct evcnt ci_vfp_use;
struct evcnt ci_vfp_reuse;
diff -r 9b8cc827186b -r 1c320bff3ccc sys/arch/aarch64/include/machdep.h
--- a/sys/arch/aarch64/include/machdep.h Sun Jun 14 18:06:43 2020 +0000
+++ b/sys/arch/aarch64/include/machdep.h Thu Jun 04 03:23:00 2020 +0000
@@ -142,8 +142,11 @@
/* fpu.c */
void fpu_attach(struct cpu_info *);
struct fpreg;
-void load_fpregs(struct fpreg *);
+void load_fpregs(const struct fpreg *);
void save_fpregs(struct fpreg *);
+void fpu_kern_enter(void);
+void fpu_kern_leave(void);
+
#ifdef TRAP_SIGDEBUG
#define do_trapsignal(l, signo, code, addr, trap) \
Home |
Main Index |
Thread Index |
Old Index