Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src-draft/trunk]: src/sys/arch/aarch64/aarch64 Add kthread_fpu_enter/exit su...
details: https://anonhg.NetBSD.org/src-all/rev/3a9b1ca3581a
branches: trunk
changeset: 936593:3a9b1ca3581a
user: Taylor R Campbell <riastradh%NetBSD.org@localhost>
date: Fri Jul 31 03:10:13 2020 +0000
description:
Add kthread_fpu_enter/exit support to aarch64.
diffstat:
sys/arch/aarch64/aarch64/fpu.c | 46 +++++++++++++++++++++++++++++++++++++++-
sys/arch/aarch64/aarch64/trap.c | 6 +++++
2 files changed, 50 insertions(+), 2 deletions(-)
diffs (98 lines):
diff -r 9f9b4e466042 -r 3a9b1ca3581a sys/arch/aarch64/aarch64/fpu.c
--- a/sys/arch/aarch64/aarch64/fpu.c Fri Jul 31 03:06:02 2020 +0000
+++ b/sys/arch/aarch64/aarch64/fpu.c Fri Jul 31 03:10:13 2020 +0000
@@ -35,6 +35,8 @@
#include <sys/param.h>
#include <sys/types.h>
+#include <sys/cpu.h>
+#include <sys/kthread.h>
#include <sys/lwp.h>
#include <sys/evcnt.h>
@@ -176,12 +178,30 @@
__asm __volatile ("isb");
}
+static const struct fpreg zero_fpreg;
+
+/*
+ * True if this is a system thread with its own private FPU state.
+ */
+static inline bool
+lwp_system_fpu_p(struct lwp *l)
+{
+
+ return (l->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
+ (LW_SYSTEM|LW_SYSTEM_FPU);
+}
+
void
fpu_kern_enter(void)
{
struct cpu_info *ci;
int s;
+ if (lwp_system_fpu_p(curlwp) && !cpu_intr_p()) {
+ KASSERT(!cpu_softintr_p());
+ return;
+ }
+
/*
* Block interrupts up to IPL_VM. We must block preemption
* since -- if this is a user thread -- there is nowhere to
@@ -209,10 +229,16 @@
void
fpu_kern_leave(void)
{
- static const struct fpreg zero_fpreg;
- struct cpu_info *ci = curcpu();
+ struct cpu_info *ci;
int s;
+ if (lwp_system_fpu_p(curlwp) && !cpu_intr_p()) {
+ KASSERT(!cpu_softintr_p());
+ return;
+ }
+
+ ci = curcpu();
+
KASSERT(ci->ci_cpl == IPL_VM);
KASSERT(ci->ci_kfpu_spl != -1);
@@ -234,3 +260,19 @@
ci->ci_kfpu_spl = -1;
splx(s);
}
+
+void
+kthread_fpu_enter_md(void)
+{
+
+ fpu_load(curlwp);
+}
+
+void
+kthread_fpu_exit_md(void)
+{
+
+ /* XXX Should fpu_state_release zero the registers itself? */
+ load_fpregs(&zero_fpreg);
+ fpu_discard(curlwp, 0);
+}
diff -r 9f9b4e466042 -r 3a9b1ca3581a sys/arch/aarch64/aarch64/trap.c
--- a/sys/arch/aarch64/aarch64/trap.c Fri Jul 31 03:06:02 2020 +0000
+++ b/sys/arch/aarch64/aarch64/trap.c Fri Jul 31 03:10:13 2020 +0000
@@ -242,6 +242,12 @@
break;
case ESR_EC_FP_ACCESS:
+ if ((curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
+ (LW_SYSTEM|LW_SYSTEM_FPU)) {
+ fpu_load(curlwp);
+ break;
+ }
+ /*FALLTHROUGH*/
case ESR_EC_FP_TRAP_A64:
case ESR_EC_PC_ALIGNMENT:
case ESR_EC_SP_ALIGNMENT:
Home |
Main Index |
Thread Index |
Old Index