Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/alpha Implement fast soft interrupts for Alpha. It...
details: https://anonhg.NetBSD.org/src/rev/843f24850fb9
branches: trunk
changeset: 955043:843f24850fb9
user: thorpej <thorpej%NetBSD.org@localhost>
date: Wed Sep 16 04:07:32 2020 +0000
description:
Implement fast soft interrupts for Alpha. It's not yet enabled, because
there is a bug lurking that causes problems when user space starts up,
so we'll stick with the slow path for now.
diffstat:
sys/arch/alpha/alpha/genassym.cf | 4 +-
sys/arch/alpha/alpha/interrupt.c | 157 +++++++++++++++++++++++++------
sys/arch/alpha/alpha/locore.s | 193 ++++++++++++++++++++++++++++++++++----
sys/arch/alpha/include/cpu.h | 5 +-
sys/arch/alpha/include/intr.h | 21 +++-
sys/arch/alpha/include/types.h | 3 +-
6 files changed, 319 insertions(+), 64 deletions(-)
diffs (truncated from 599 to 300 lines):
diff -r 42bc315153bd -r 843f24850fb9 sys/arch/alpha/alpha/genassym.cf
--- a/sys/arch/alpha/alpha/genassym.cf Tue Sep 15 23:40:03 2020 +0000
+++ b/sys/arch/alpha/alpha/genassym.cf Wed Sep 16 04:07:32 2020 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.25 2020/09/05 18:01:42 thorpej Exp $
+# $NetBSD: genassym.cf,v 1.26 2020/09/16 04:07:32 thorpej Exp $
#
# Copyright (c) 1982, 1990, 1993
@@ -130,6 +130,7 @@
define ALPHA_PSL_IPL_MASK ALPHA_PSL_IPL_MASK
define ALPHA_PSL_IPL_0 ALPHA_PSL_IPL_0
define ALPHA_PSL_IPL_SOFT_LO ALPHA_PSL_IPL_SOFT_LO
+define ALPHA_PSL_IPL_SOFT_HI ALPHA_PSL_IPL_SOFT_HI
define ALPHA_PSL_IPL_HIGH ALPHA_PSL_IPL_HIGH
# pte bits
@@ -189,4 +190,5 @@
define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir)
+define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
define CPU_INFO_SIZEOF sizeof(struct cpu_info)
diff -r 42bc315153bd -r 843f24850fb9 sys/arch/alpha/alpha/interrupt.c
--- a/sys/arch/alpha/alpha/interrupt.c Tue Sep 15 23:40:03 2020 +0000
+++ b/sys/arch/alpha/alpha/interrupt.c Wed Sep 16 04:07:32 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: interrupt.c,v 1.84 2020/09/05 18:01:42 thorpej Exp $ */
+/* $NetBSD: interrupt.c,v 1.85 2020/09/16 04:07:32 thorpej Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.84 2020/09/05 18:01:42 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.85 2020/09/16 04:07:32 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -450,6 +450,124 @@
return (rv);
}
+#ifdef __HAVE_FAST_SOFTINTS
+
+#define SOFTINT_CLOCK_MASK __BIT(SOFTINT_CLOCK)
+#define SOFTINT_BIO_MASK __BIT(SOFTINT_BIO)
+#define SOFTINT_NET_MASK __BIT(SOFTINT_NET)
+#define SOFTINT_SERIAL_MASK __BIT(SOFTINT_SERIAL)
+
+#define ALPHA_IPL1_SOFTINTS (SOFTINT_CLOCK_MASK | SOFTINT_BIO_MASK)
+#define ALPHA_IPL2_SOFTINTS (SOFTINT_NET_MASK | SOFTINT_SERIAL_MASK)
+
+#define ALPHA_ALL_SOFTINTS (ALPHA_IPL1_SOFTINTS | ALPHA_IPL2_SOFTINTS)
+
+#define SOFTINT_TO_IPL(si) \
+ (ALPHA_PSL_IPL_SOFT_LO + ((ALPHA_IPL2_SOFTINTS >> (si)) & 1))
+
+#define SOFTINTS_ELIGIBLE(ipl) \
+ ((ALPHA_ALL_SOFTINTS << ((ipl) << 1)) & ALPHA_ALL_SOFTINTS)
+
+/* Validate some assumptions the code makes. */
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_CLOCK) == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_BIO) == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_NET) == ALPHA_PSL_IPL_SOFT_HI);
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_SERIAL) == ALPHA_PSL_IPL_SOFT_HI);
+
+__CTASSERT(IPL_SOFTCLOCK == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(IPL_SOFTBIO == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(IPL_SOFTNET == ALPHA_PSL_IPL_SOFT_HI);
+__CTASSERT(IPL_SOFTSERIAL == ALPHA_PSL_IPL_SOFT_HI);
+
+__CTASSERT(SOFTINT_CLOCK_MASK & 0x3);
+__CTASSERT(SOFTINT_BIO_MASK & 0x3);
+__CTASSERT(SOFTINT_NET_MASK & 0xc);
+__CTASSERT(SOFTINT_SERIAL_MASK & 0xc);
+__CTASSERT(SOFTINT_COUNT == 4);
+
+__CTASSERT((ALPHA_ALL_SOFTINTS & ~0xfUL) == 0);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_NONE) == ALPHA_ALL_SOFTINTS);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTCLOCK) == ALPHA_IPL2_SOFTINTS);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTBIO) == ALPHA_IPL2_SOFTINTS);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTNET) == 0);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTSERIAL) == 0);
+
+/*
+ * softint_trigger:
+ *
+ * Trigger a soft interrupt.
+ */
+void
+softint_trigger(uintptr_t const machdep)
+{
+ /* No need for an atomic; called at splhigh(). */
+ KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_HIGH);
+ curcpu()->ci_ssir |= machdep;
+}
+
+/*
+ * softint_init_md:
+ *
+ * Machine-dependent initialization for a fast soft interrupt thread.
+ */
+void
+softint_init_md(lwp_t * const l, u_int const level, uintptr_t * const machdep)
+{
+ lwp_t ** lp = &l->l_cpu->ci_silwps[level];
+ KASSERT(*lp == NULL || *lp == l);
+ *lp = l;
+
+ const uintptr_t si_bit = __BIT(level);
+ KASSERT(si_bit & ALPHA_ALL_SOFTINTS);
+ *machdep = si_bit;
+}
+
+/*
+ * Helper macro.
+ *
+ * Dispatch a softint and then restart the loop so that higher
+ * priority softints are always done first.
+ */
+#define DOSOFTINT(level) \
+ if (ssir & SOFTINT_##level##_MASK) { \
+ ci->ci_ssir &= ~SOFTINT_##level##_MASK; \
+ alpha_softint_switchto(l, IPL_SOFT##level, \
+ ci->ci_silwps[SOFTINT_##level]); \
+ KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == \
+ ALPHA_PSL_IPL_HIGH); \
+ continue; \
+ } \
+
+/*
+ * alpha_softint_dispatch:
+ *
+ * Process pending soft interrupts that are eligible to run
+ * at the specified new IPL. Must be called at splhigh().
+ */
+void
+alpha_softint_dispatch(int const ipl)
+{
+ struct lwp * const l = curlwp;
+ struct cpu_info * const ci = l->l_cpu;
+ unsigned long ssir;
+ const unsigned long eligible = SOFTINTS_ELIGIBLE(ipl);
+
+ KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_HIGH);
+
+ for (;;) {
+ ssir = ci->ci_ssir & eligible;
+ if (ssir == 0)
+ break;
+
+ DOSOFTINT(SERIAL);
+ DOSOFTINT(NET);
+ DOSOFTINT(BIO);
+ DOSOFTINT(CLOCK);
+ }
+}
+
+#endif /* __HAVE_FAST_SOFTINTS */
+
/*
* spllower:
*
@@ -457,42 +575,19 @@
* interrupts.
*/
void
-spllower(int ipl)
+spllower(int const ipl)
{
- if (ipl == ALPHA_PSL_IPL_0 && curcpu()->ci_ssir) {
- (void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT_LO);
- softintr_dispatch();
+#ifdef __HAVE_FAST_SOFTINTS
+ if (ipl < ALPHA_PSL_IPL_SOFT_HI && curcpu()->ci_ssir) {
+ (void) alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
+ alpha_softint_dispatch(ipl);
}
+#endif /* __HAVE_FAST_SOFTINTS */
(void) alpha_pal_swpipl(ipl);
}
/*
- * softintr_dispatch:
- *
- * Process pending software interrupts.
- */
-void
-softintr_dispatch(void)
-{
-
- /* XXX Nothing until alpha gets __HAVE_FAST_SOFTINTS */
-}
-
-#ifdef __HAVE_FAST_SOFTINTS
-/*
- * softint_trigger:
- *
- * Trigger a soft interrupt.
- */
-void
-softint_trigger(uintptr_t machdep)
-{
- atomic_or_ulong(&curcpu()->ci_ssir, 1 << (x))
-}
-#endif
-
-/*
* cpu_intr_p:
*
* Return non-zero if executing in interrupt context.
diff -r 42bc315153bd -r 843f24850fb9 sys/arch/alpha/alpha/locore.s
--- a/sys/arch/alpha/alpha/locore.s Tue Sep 15 23:40:03 2020 +0000
+++ b/sys/arch/alpha/alpha/locore.s Wed Sep 16 04:07:32 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.s,v 1.132 2020/09/05 18:01:42 thorpej Exp $ */
+/* $NetBSD: locore.s,v 1.133 2020/09/16 04:07:32 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.132 2020/09/05 18:01:42 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.133 2020/09/16 04:07:32 thorpej Exp $");
#include "assym.h"
@@ -243,19 +243,28 @@
br pv, 1f
1: LDGP(pv)
- ldq s1, (FRAME_PS * 8)(sp) /* get the saved PS */
- and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */
- bne t0, 5f /* != 0: can't do AST or SIR */
+ ldq s1, (FRAME_PS * 8)(sp) /* s1 = new PSL */
+ and s1, ALPHA_PSL_IPL_MASK, s3 /* s3 = new ipl */
+
+ /* --- BEGIN inline spllower() --- */
+
+ cmpult s3, ALPHA_PSL_IPL_SOFT_HI, t1 /* new IPL < SOFT_HI? */
+ beq t1, 5f /* no, can't do AST or SI */
+ /* yes */
/* GET_CURLWP clobbers v0, t0, t8...t11. */
GET_CURLWP
mov v0, s0 /* s0 = curlwp */
+#ifdef __HAVE_FAST_SOFTINTS
/* see if a soft interrupt is pending. */
2: ldq t1, L_CPU(s0) /* t1 = curlwp->l_cpu */
ldq t1, CPU_INFO_SSIR(t1) /* soft int pending? */
bne t1, 6f /* yes */
/* no */
+#endif /* __HAVE_FAST_SOFTINTS */
+
+ /* --- END inline spllower() --- */
and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
beq t0, 5f /* no: just return */
@@ -282,16 +291,19 @@
.set at
/* NOTREACHED */
- /* We've got a SIR */
-6: ldiq a0, ALPHA_PSL_IPL_SOFT_LO
+#ifdef __HAVE_FAST_SOFTINTS
+ /* We've got a softint */
+6: ldiq a0, ALPHA_PSL_IPL_HIGH
call_pal PAL_OSF1_swpipl
mov v0, s2 /* remember old IPL */
- CALL(softintr_dispatch)
+ mov s3, a0 /* pass new ipl */
+ CALL(alpha_softint_dispatch)
- /* SIR handled; restore IPL and check again */
+ /* SI handled; restore IPL and check again */
mov s2, a0
call_pal PAL_OSF1_swpipl
br 2b
+#endif /* __HAVE_FAST_SOFTINTS */
/* We've got an AST */
7: stl zero, L_MD_ASTPENDING(s0) /* no AST pending */
@@ -643,13 +655,117 @@
/**************************************************************************/
+#ifdef __HAVE_FAST_SOFTINTS
+/*
+ * void alpha_softint_switchto(struct lwp *current, int ipl, struct lwp *next)
+ * Switch away from the current LWP to the specified softint LWP, and
+ * dispatch to softint processing.
+ * Aguments:
+ * a0 'struct lwp *' of the LWP to switch from
Home |
Main Index |
Thread Index |
Old Index