Port-i386 archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[PATCH] Add intr_mask() / intr_unmask() interface to mask / unmask individual interrupt sources



Folks --

A patch I need reviewed by x86 interrupt handling experts.  It adds two new routines:

void intr_mask(void *ih);
void intr_unmask(void *ih);

That mask and unmask, respectively, individual interrupt sources.  The "ih" argument is the interrupt cookie that's returned by intr_establish().

There are some corresponding changes to the ACPI interrupt functions that I haven't included here, but suffice to say they're basic wrappers.

These changes are needed in order to be able to defer processing of a hard interrupt to a softint in some circumstances, specifically in cases where it's not actually safe to access the interrupting device's registers while in interrupt context.  Real world example: ihidev (hid-over-i2c driver).  That driver currently does i2c access from the hardware interrupt, but it plays some unsavory tricks in order to do so, and it can't work in all cases as-written.  Unfortunately, the hid-over-i2c spec specifies a level-triggered interrupt, hence the need to be able to mask it off after scheduling a softint.  The interrupt can then be unmasked after processing all outstanding data from the input device.

Unfortunately, I don't have a uhidev device I can test this against directly.  I know bouyer@ does, and I am hoping he can test the larger set of changes I'm working on.  I have tested these patches at least in the sense that the system boots multi-user ... i.e. I didn't break anything :-)

I would appreciate review of these changes by someone who is very familiar with the x86 interrupt handling code.  I am pretty sure I got this right, but it's a critical section of code so I want to be sure.  The interrupt vector changes essentially check the "mask count" of the intrsource and treat it as an interrupt masked by splXXX() in two places:

1- Upon entry to an interrupt vector (either by the "interrupted" path or by the "resume" path).

2- Upon return from invoking all of the handlers for an intrsource ... because the hard interrupt handler will be the thing requesting the interrupt to be masked, we check here so we can void re-enabling the source, thus avoiding an extra interrupt just to mark it as pending.

Note that it's important for a masked interrupt source to always be marked as "pending" so that it can be reliably serviced again when unmasked.

Diff attached.  Please let me know if it looks OK or if you have any questions.

Thanks!

diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf
index 168cae5cb8b5..270a9118d840 100644
--- a/sys/arch/amd64/amd64/genassym.cf
+++ b/sys/arch/amd64/amd64/genassym.cf
@@ -320,7 +320,8 @@ define	IS_FLAGS	offsetof(struct intrsource, is_flags)
 define	IS_PIN		offsetof(struct intrsource, is_pin)
 define	IS_TYPE		offsetof(struct intrsource, is_type)
 define	IS_MAXLEVEL	offsetof(struct intrsource, is_maxlevel)
-define	IS_LWP			offsetof(struct intrsource, is_lwp)
+define	IS_LWP		offsetof(struct intrsource, is_lwp)
+define	IS_MASK_COUNT	offsetof(struct intrsource, is_mask_count)
 
 define	IPL_NONE		IPL_NONE
 define	IPL_PREEMPT		IPL_PREEMPT
diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
index 2ef3a7ba3b08..a0ac3fed6866 100644
--- a/sys/arch/amd64/amd64/vector.S
+++ b/sys/arch/amd64/amd64/vector.S
@@ -387,6 +387,8 @@ IDTVEC(handle_ ## name ## num)						;\
 	sti								;\
 	incl	CPUVAR(IDEPTH)						;\
 	movq	IS_HANDLERS(%r14),%rbx					;\
+	cmpl	$0,IS_MASK_COUNT(%r14)	/* source currently masked? */	;\
+	jne	7f			/* yes, hold it */		;\
 6:									\
 	movl	IH_LEVEL(%rbx),%r12d					;\
 	cmpl	%r13d,%r12d						;\
@@ -399,6 +401,8 @@ IDTVEC(handle_ ## name ## num)						;\
 	testq	%rbx,%rbx						;\
 	jnz	6b							;\
 5:									\
+	cmpl	$0,IS_MASK_COUNT(%r14)	/* source now masked? */	;\
+	jne	7f			/* yes, deal */			;\
 	cli								;\
 	unmask(num)			/* unmask it in hardware */	;\
 	late_ack(num)							;\
diff --git a/sys/arch/i386/i386/genassym.cf b/sys/arch/i386/i386/genassym.cf
index 479c5319fc2f..0002e23d6a5f 100644
--- a/sys/arch/i386/i386/genassym.cf
+++ b/sys/arch/i386/i386/genassym.cf
@@ -319,6 +319,7 @@ define	IS_PIN			offsetof(struct intrsource, is_pin)
 define	IS_TYPE			offsetof(struct intrsource, is_type)
 define	IS_MAXLEVEL		offsetof(struct intrsource, is_maxlevel)
 define	IS_LWP			offsetof(struct intrsource, is_lwp)
+define	IS_MASK_COUNT		offsetof(struct intrsource, is_mask_count)
 
 define	IPL_NONE		IPL_NONE
 define	IPL_PREEMPT		IPL_PREEMPT
diff --git a/sys/arch/i386/i386/vector.S b/sys/arch/i386/i386/vector.S
index 5ef99df2713c..3c5a625cef49 100644
--- a/sys/arch/i386/i386/vector.S
+++ b/sys/arch/i386/i386/vector.S
@@ -408,6 +408,8 @@ IDTVEC(intr_ ## name ## num)						;\
 	IDEPTH_INCR							;\
 	sti								;\
 	movl	IS_HANDLERS(%ebp),%ebx					;\
+	cmpl	$0,IS_MASK_COUNT(%ebp)	/* source currently masked? */	;\
+	jne	7f			/* yes, hold it */		;\
 6:									\
 	movl	IH_LEVEL(%ebx),%edi					;\
 	cmpl	%esi,%edi						;\
@@ -420,6 +422,8 @@ IDTVEC(intr_ ## name ## num)						;\
 	addl	$4,%esp			/* toss the arg */		;\
 	testl	%ebx,%ebx						;\
 	jnz	6b							;\
+	cmpl	$0,IS_MASK_COUNT(%ebp)	/* source now masked? */	;\
+	jne	7f			/* yes, deal */			;\
 	cli								;\
 	unmask(num)			/* unmask it in hardware */	;\
 	late_ack(num)							;\
diff --git a/sys/arch/x86/include/intr.h b/sys/arch/x86/include/intr.h
index 6fb4e9545338..296e9748b688 100644
--- a/sys/arch/x86/include/intr.h
+++ b/sys/arch/x86/include/intr.h
@@ -95,6 +95,7 @@ struct intrsource {
 	u_long ipl_evt_mask2[NR_EVENT_CHANNELS];
 #endif
 	struct evcnt is_evcnt;		/* interrupt counter per cpu */
+	u_int is_mask_count;		/* masked? (nested) */
 	int is_flags;			/* see below */
 	int is_type;			/* level, edge */
 	int is_idtvec;
@@ -215,6 +216,8 @@ void x86_nmi(void);
 void *intr_establish_xname(int, struct pic *, int, int, int, int (*)(void *),
 			   void *, bool, const char *);
 void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *, bool);
+void intr_mask(struct intrhand *);
+void intr_unmask(struct intrhand *);
 void intr_disestablish(struct intrhand *);
 void intr_add_pcibus(struct pcibus_attach_args *);
 const char *intr_string(intr_handle_t, char *, size_t);
diff --git a/sys/arch/x86/x86/intr.c b/sys/arch/x86/x86/intr.c
index 3722436e5886..94316df2fef1 100644
--- a/sys/arch/x86/x86/intr.c
+++ b/sys/arch/x86/x86/intr.c
@@ -976,6 +976,90 @@ intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
 	    level, handler, arg, known_mpsafe, "unknown");
 }
 
+/*
+ * Called on bound CPU to handle intr_mask() / intr_unmask().
+ *
+ * => caller (on initiating CPU) holds cpu_lock on our behalf
+ * => arg1: struct intrhand *ih
+ * => arg2: true -> mask, false -> unmask.
+ */
+static void
+intr_mask_xcall(void *arg1, void *arg2)
+{
+	struct intrhand * const ih = arg1;
+	const uintptr_t mask = (uintptr_t)arg2;
+	struct cpu_info * const ci = ih->ih_cpu;
+	bool force_pending = false;
+
+	KASSERT(ci == curcpu() || !mp_online);
+
+	const u_long psl = x86_read_psl();
+	x86_disable_intr();
+	struct intrsource * const source = ci->ci_isources[ih->ih_slot];
+	struct pic * const pic = source->is_pic;
+
+	if (mask) {
+		source->is_mask_count++;
+		KASSERT(source->is_mask_count != 0);
+		(*pic->pic_hwmask)(pic, ih->ih_pin);
+	} else {
+		KASSERT(source->is_mask_count != 0);
+		if (--source->is_mask_count == 0) {
+			(*pic->pic_hwunmask)(pic, ih->ih_pin);
+			force_pending = true;
+		}
+	}
+
+	/* Re-enable interrupts. */
+	x86_write_psl(psl);
+
+	if (force_pending) {
+		/* Force processing of any pending interrupts. */
+		splx(splhigh());
+	}
+}
+
+static void
+intr_mask_internal(struct intrhand * const ih, const bool mask)
+{
+
+	/*
+	 * Call out to the remote CPU to update its interrupt state.
+	 * Only make RPCs if the APs are up and running.
+	 */
+	mutex_enter(&cpu_lock);
+	struct cpu_info * const ci = ih->ih_cpu;
+	void * const mask_arg = (void *)(uintptr_t)mask;
+	if (ci == curcpu() || !mp_online) {
+		intr_mask_xcall(ih, mask_arg);
+	} else {
+		const uint64_t where =
+		    xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci);
+		xc_wait(where);
+	}
+	mutex_exit(&cpu_lock);
+}
+
+void
+intr_mask(struct intrhand *ih)
+{
+
+	intr_mask_internal(ih, true);
+}
+
+void
+intr_unmask(struct intrhand *ih)
+{
+
+	/*
+	 * This is not safe to call from an interrupt context because
+	 * we don't want to accidentally unmask an interrupt source
+	 * that's masked because it's being serviced.
+	 */
+	KASSERT(!cpu_intr_p());
+	intr_mask_internal(ih, false);
+}
+
 /*
  * Called on bound CPU to handle intr_disestablish().
  *
-- thorpej



Home | Main Index | Thread Index | Old Index