Port-i386 archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: [PATCH] Add intr_mask() / intr_unmask() interface to mask / unmask individual interrupt sources




> On Aug 12, 2019, at 7:54 AM, Jason Thorpe <thorpej%me.com@localhost> wrote:
> 
>> Seems like pic_hwmask only needs to be called when is_mask_count==0.
> 
> Yes, good catch.  I also noticed a few other places where I should be checking is_mask_count before calling pic_hwunmask, and also realized that I needed to add some code to handle the set_affinity flow ... it's complicated by the fact that an intr_mask()'d interrupt will always be "pending", so some care is needed.  Specifically, before waiting for "pending" to drain out, I now mark the interrupt sources as "distribution-pending" ... so when intr_unmask() comes along, it won't re-enable the source at the hardware if it sees that, but will still force processing of the source if it was marked as interrupt-pending.

Ok, I've made some tweaks -- I still need to do some more testing, but I think the basic x86 version of the changes is basically ready.  Updated diff attached.

> 
>> Note that XENINTRSTUB will also need the same changes.
> 
> Thanks, I'll take a look at those, too.

So, I took a look at this, and I'm not entirely sure what can / should be done with them.  They seem only superficially related to the regular interrupt stubs.  They don't seem to have any notion of "pending".  Can someone explain to me how these work?

-- thorpej

diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf
index 168cae5cb8b5..270a9118d840 100644
--- a/sys/arch/amd64/amd64/genassym.cf
+++ b/sys/arch/amd64/amd64/genassym.cf
@@ -320,7 +320,8 @@ define	IS_FLAGS	offsetof(struct intrsource, is_flags)
 define	IS_PIN		offsetof(struct intrsource, is_pin)
 define	IS_TYPE		offsetof(struct intrsource, is_type)
 define	IS_MAXLEVEL	offsetof(struct intrsource, is_maxlevel)
-define	IS_LWP			offsetof(struct intrsource, is_lwp)
+define	IS_LWP		offsetof(struct intrsource, is_lwp)
+define	IS_MASK_COUNT	offsetof(struct intrsource, is_mask_count)
 
 define	IPL_NONE		IPL_NONE
 define	IPL_PREEMPT		IPL_PREEMPT
diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
index 2ef3a7ba3b08..a0ac3fed6866 100644
--- a/sys/arch/amd64/amd64/vector.S
+++ b/sys/arch/amd64/amd64/vector.S
@@ -387,6 +387,8 @@ IDTVEC(handle_ ## name ## num)						;\
 	sti								;\
 	incl	CPUVAR(IDEPTH)						;\
 	movq	IS_HANDLERS(%r14),%rbx					;\
+	cmpl	$0,IS_MASK_COUNT(%r14)	/* source currently masked? */	;\
+	jne	7f			/* yes, hold it */		;\
 6:									\
 	movl	IH_LEVEL(%rbx),%r12d					;\
 	cmpl	%r13d,%r12d						;\
@@ -399,6 +401,8 @@ IDTVEC(handle_ ## name ## num)						;\
 	testq	%rbx,%rbx						;\
 	jnz	6b							;\
 5:									\
+	cmpl	$0,IS_MASK_COUNT(%r14)	/* source now masked? */	;\
+	jne	7f			/* yes, deal */			;\
 	cli								;\
 	unmask(num)			/* unmask it in hardware */	;\
 	late_ack(num)							;\
diff --git a/sys/arch/i386/i386/genassym.cf b/sys/arch/i386/i386/genassym.cf
index 479c5319fc2f..0002e23d6a5f 100644
--- a/sys/arch/i386/i386/genassym.cf
+++ b/sys/arch/i386/i386/genassym.cf
@@ -319,6 +319,7 @@ define	IS_PIN			offsetof(struct intrsource, is_pin)
 define	IS_TYPE			offsetof(struct intrsource, is_type)
 define	IS_MAXLEVEL		offsetof(struct intrsource, is_maxlevel)
 define	IS_LWP			offsetof(struct intrsource, is_lwp)
+define	IS_MASK_COUNT		offsetof(struct intrsource, is_mask_count)
 
 define	IPL_NONE		IPL_NONE
 define	IPL_PREEMPT		IPL_PREEMPT
diff --git a/sys/arch/i386/i386/vector.S b/sys/arch/i386/i386/vector.S
index 5ef99df2713c..3c5a625cef49 100644
--- a/sys/arch/i386/i386/vector.S
+++ b/sys/arch/i386/i386/vector.S
@@ -408,6 +408,8 @@ IDTVEC(intr_ ## name ## num)						;\
 	IDEPTH_INCR							;\
 	sti								;\
 	movl	IS_HANDLERS(%ebp),%ebx					;\
+	cmpl	$0,IS_MASK_COUNT(%ebp)	/* source currently masked? */	;\
+	jne	7f			/* yes, hold it */		;\
 6:									\
 	movl	IH_LEVEL(%ebx),%edi					;\
 	cmpl	%esi,%edi						;\
@@ -420,6 +422,8 @@ IDTVEC(intr_ ## name ## num)						;\
 	addl	$4,%esp			/* toss the arg */		;\
 	testl	%ebx,%ebx						;\
 	jnz	6b							;\
+	cmpl	$0,IS_MASK_COUNT(%ebp)	/* source now masked? */	;\
+	jne	7f			/* yes, deal */			;\
 	cli								;\
 	unmask(num)			/* unmask it in hardware */	;\
 	late_ack(num)							;\
diff --git a/sys/arch/x86/include/intr.h b/sys/arch/x86/include/intr.h
index 6fb4e9545338..e3d56b962e25 100644
--- a/sys/arch/x86/include/intr.h
+++ b/sys/arch/x86/include/intr.h
@@ -95,6 +95,8 @@ struct intrsource {
 	u_long ipl_evt_mask2[NR_EVENT_CHANNELS];
 #endif
 	struct evcnt is_evcnt;		/* interrupt counter per cpu */
+	u_int is_mask_count;		/* masked? (nested) [cpu_lock] */
+	int is_distribute_pending;	/* ci<->ci move pending [cpu_lock] */
 	int is_flags;			/* see below */
 	int is_type;			/* level, edge */
 	int is_idtvec;
@@ -215,6 +217,8 @@ void x86_nmi(void);
 void *intr_establish_xname(int, struct pic *, int, int, int, int (*)(void *),
 			   void *, bool, const char *);
 void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *, bool);
+void intr_mask(struct intrhand *);
+void intr_unmask(struct intrhand *);
 void intr_disestablish(struct intrhand *);
 void intr_add_pcibus(struct pcibus_attach_args *);
 const char *intr_string(intr_handle_t, char *, size_t);
diff --git a/sys/arch/x86/x86/intr.c b/sys/arch/x86/x86/intr.c
index 3722436e5886..b1663c1a7b2a 100644
--- a/sys/arch/x86/x86/intr.c
+++ b/sys/arch/x86/x86/intr.c
@@ -955,7 +955,8 @@ intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type,
 
 	/* All set up, so add a route for the interrupt and unmask it. */
 	(*pic->pic_addroute)(pic, ci, pin, idt_vec, type);
-	(*pic->pic_hwunmask)(pic, pin);
+	if (ci->ci_isources[slot]->is_mask_count == 0)
+		(*pic->pic_hwunmask)(pic, pin);
 	mutex_exit(&cpu_lock);
 
 	if (bootverbose || cpu_index(ci) != 0)
@@ -976,6 +977,103 @@ intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
 	    level, handler, arg, known_mpsafe, "unknown");
 }
 
+/*
+ * Called on bound CPU to handle intr_mask() / intr_unmask().
+ *
+ * => caller (on initiating CPU) holds cpu_lock on our behalf
+ * => arg1: struct intrhand *ih
+ * => arg2: true -> mask, false -> unmask.
+ */
+static void
+intr_mask_xcall(void *arg1, void *arg2)
+{
+	struct intrhand * const ih = arg1;
+	const uintptr_t mask = (uintptr_t)arg2;
+	struct cpu_info * const ci = ih->ih_cpu;
+	bool force_pending = false;
+
+	KASSERT(ci == curcpu() || !mp_online);
+
+	/*
+	 * cpu_lock is held to hold off other readers of is_mask_count,
+	 * but we need to disable interrupts to hold off the interrupt
+	 * vectors.
+	 */
+	const u_long psl = x86_read_psl();
+	x86_disable_intr();
+
+	struct intrsource * const source = ci->ci_isources[ih->ih_slot];
+	struct pic * const pic = source->is_pic;
+
+	if (mask) {
+		source->is_mask_count++;
+		KASSERT(source->is_mask_count != 0);
+		if (source->is_mask_count == 1) {
+			(*pic->pic_hwmask)(pic, ih->ih_pin);
+		}
+	} else {
+		KASSERT(source->is_mask_count != 0);
+		if (--source->is_mask_count == 0) {
+			/*
+			 * If this interrupt source is being moved, don't
+			 * unmask it at the hw.
+			 */
+			if (! source->is_distribute_pending)
+				(*pic->pic_hwunmask)(pic, ih->ih_pin);
+			force_pending = true;
+		}
+	}
+
+	/* Re-enable interrupts. */
+	x86_write_psl(psl);
+
+	if (force_pending) {
+		/* Force processing of any pending interrupts. */
+		splx(splhigh());
+	}
+}
+
+static void
+intr_mask_internal(struct intrhand * const ih, const bool mask)
+{
+
+	/*
+	 * Call out to the remote CPU to update its interrupt state.
+	 * Only make RPCs if the APs are up and running.
+	 */
+	mutex_enter(&cpu_lock);
+	struct cpu_info * const ci = ih->ih_cpu;
+	void * const mask_arg = (void *)(uintptr_t)mask;
+	if (ci == curcpu() || !mp_online) {
+		intr_mask_xcall(ih, mask_arg);
+	} else {
+		const uint64_t where =
+		    xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci);
+		xc_wait(where);
+	}
+	mutex_exit(&cpu_lock);
+}
+
+void
+intr_mask(struct intrhand *ih)
+{
+
+	intr_mask_internal(ih, true);
+}
+
+void
+intr_unmask(struct intrhand *ih)
+{
+
+	/*
+	 * This is not safe to call from an interrupt context because
+	 * we don't want to accidentally unmask an interrupt source
+	 * that's masked because it's being serviced.
+	 */
+	KASSERT(!cpu_intr_p());
+	intr_mask_internal(ih, false);
+}
+
 /*
  * Called on bound CPU to handle intr_disestablish().
  *
@@ -1036,7 +1134,7 @@ intr_disestablish_xcall(void *arg1, void *arg2)
 	if (source->is_handlers == NULL)
 		(*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec,
 		    source->is_type);
-	else
+	else if (source->is_mask_count == 0)
 		(*pic->pic_hwunmask)(pic, ih->ih_pin);
 
 	/* Re-enable interrupts. */
@@ -1851,10 +1949,14 @@ intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset)
 		return err;
 	}
 
+	/* Prevent intr_unmask() from reenabling the source at the hw. */
+	isp->is_distribute_pending = true;
+
 	pin = isp->is_pin;
 	(*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */
-	while (oldci->ci_ipending & (1 << oldslot))
+	while (oldci->ci_ipending & (1 << oldslot)) {
 		(void)kpause("intrdist", false, 1, &cpu_lock);
+	}
 
 	kpreempt_disable();
 
@@ -1891,7 +1993,9 @@ intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset)
 
 	kpreempt_enable();
 
-	(*pic->pic_hwunmask)(pic, pin);
+	isp->is_distribute_pending = false;
+	if (isp->is_mask_count == 0)
+		(*pic->pic_hwunmask)(pic, pin);
 
 	return err;
 }


Home | Main Index | Thread Index | Old Index