Port-xen archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Interrupt codepath review



Hello,

I'm wondering if there'd be any objections to the approach I'm taking in
the attached patch. This will take us closer to unifying the interrupt
path with native.

A rationale is included in this discussion:
http://mail-index.netbsd.org/tech-kern/2018/11/18/msg024198.html


-- 
~cherry

diff -r 901942712a0f sys/arch/x86/include/intrdefs.h
--- a/sys/arch/x86/include/intrdefs.h	Sun Nov 18 16:05:19 2018 +0530
+++ b/sys/arch/x86/include/intrdefs.h	Sun Nov 18 16:25:42 2018 +0530
@@ -13,7 +13,9 @@
 #define	IPL_VM		0x6	/* low I/O, memory allocation */
 #define IPL_SCHED	0x7	/* medium I/O, scheduler, clock */
 #define	IPL_HIGH	0x8	/* high I/O, statclock, IPIs */
-#define	NIPL		9
+#define IPL_HYPERVISOR  0x9	/* Exclusively used by hypervisor callback */
+#define	NIPL		10
+
 
 /* Interrupt sharing types. */
 #define	IST_NONE	0	/* none */
diff -r 901942712a0f sys/arch/xen/x86/hypervisor_machdep.c
--- a/sys/arch/xen/x86/hypervisor_machdep.c	Sun Nov 18 16:05:19 2018 +0530
+++ b/sys/arch/xen/x86/hypervisor_machdep.c	Sun Nov 18 16:25:42 2018 +0530
@@ -211,6 +211,38 @@
 	return (ret);
 }
 
+/* This is essentially a despatch function for queued interrupts */
+static void
+xen_despatch_events(struct cpu_info *ci)
+{
+	int i, spl;
+	u_long psl;
+
+	spl = splraise(IPL_HYPERVISOR);
+	/*
+	 * This bit uses spl(9) magic to brute force calling every
+	 * pending handler at every SPL level down to the interuptee.
+	 */
+
+	psl = x86_read_psl();
+	for (i = IPL_HYPERVISOR;ci->ci_ipending && i >= spl;i--) {
+		x86_enable_intr();
+		spllower(i);
+	}
+	x86_enable_intr();
+	spllower(spl);
+	x86_write_psl(psl);
+}
+
+static void
+xen_queue_event(struct cpu_info *ci, unsigned long port)
+{
+	/* Set pending bit for spl code */
+	hypervisor_set_ipending(evtsource[port]->ev_imask,
+	    port >> LONG_SHIFT, port & LONG_MASK);
+
+}
+
 /* Iterate through pending events and call the event handler */
 
 static inline void
@@ -228,7 +260,7 @@
 #endif
 	if (evtsource[port]) {
 		ci->ci_idepth++;
-		evtchn_do_event(port, regs);
+		xen_queue_event(ci, port);
 		ci->ci_idepth--;
 	}
 #ifdef DOM0OPS
@@ -280,6 +312,9 @@
 		evt_iterate_bits(&vci->evtchn_pending_sel,
 		    s->evtchn_pending, s->evtchn_mask,
 		    evt_do_hypervisor_callback, regs);
+
+		xen_despatch_events(ci);
+
 	}
 
 #ifdef DIAGNOSTIC
@@ -389,7 +424,6 @@
 	 * for its lowest IPL, and pending IPLs are processed high to low,
 	 * we know that all callback for this event have been processed.
 	 */
-
 	evt_iterate_bits(&ci->ci_isources[ipl]->ipl_evt_mask1,
 	    ci->ci_isources[ipl]->ipl_evt_mask2, NULL, 
 	    evt_enable_event, NULL);
diff -r 901942712a0f sys/arch/xen/xen/evtchn.c
--- a/sys/arch/xen/xen/evtchn.c	Sun Nov 18 16:05:19 2018 +0530
+++ b/sys/arch/xen/xen/evtchn.c	Sun Nov 18 16:25:42 2018 +0530
@@ -305,136 +305,6 @@
 	return true;
 }
 
-
-unsigned int
-evtchn_do_event(int evtch, struct intrframe *regs)
-{
-	struct cpu_info *ci;
-	int ilevel;
-	struct intrhand *ih;
-	int	(*ih_fun)(void *, void *);
-	uint32_t iplmask;
-	int i;
-	uint32_t iplbit;
-
-	KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch);
-	KASSERTMSG(evtch < NR_EVENT_CHANNELS,
-	    "evtch number %d > NR_EVENT_CHANNELS", evtch);
-
-#ifdef IRQ_DEBUG
-	if (evtch == IRQ_DEBUG)
-		printf("evtchn_do_event: evtch %d\n", evtch);
-#endif
-	ci = curcpu();
-
-	/*
-	 * Shortcut for the debug handler, we want it to always run,
-	 * regardless of the IPL level.
-	 */
-	if (__predict_false(evtch == debug_port)) {
-		xen_debug_handler(NULL);
-		hypervisor_unmask_event(debug_port);
-#if NPCI > 0 || NISA > 0
-		hypervisor_ack_pirq_event(debug_port);
-#endif /* NPCI > 0 || NISA > 0 */		
-		return 0;
-	}
-
-	KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch);
-	ci->ci_data.cpu_nintr++;
-	evtsource[evtch]->ev_evcnt.ev_count++;
-	ilevel = ci->ci_ilevel;
-
-	if (evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) {
-		hypervisor_send_event(evtsource[evtch]->ev_cpu, evtch);
-		return 0;
-	}
-
-	if (evtsource[evtch]->ev_maxlevel <= ilevel) {
-#ifdef IRQ_DEBUG
-		if (evtch == IRQ_DEBUG)
-		    printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
-		    evtch, evtsource[evtch]->ev_maxlevel, ilevel);
-#endif
-		hypervisor_set_ipending(evtsource[evtch]->ev_imask,
-					evtch >> LONG_SHIFT,
-					evtch & LONG_MASK);
-
-		/* leave masked */
-
-		return 0;
-	}
-	ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
-	iplmask = evtsource[evtch]->ev_imask;
-	sti();
-	mutex_spin_enter(&evtlock[evtch]);
-	ih = evtsource[evtch]->ev_handlers;
-	while (ih != NULL) {
-		if (ih->ih_cpu != ci) {
-			hypervisor_send_event(ih->ih_cpu, evtch);
-			iplmask &= ~IUNMASK(ci, ih->ih_level);
-			ih = ih->ih_evt_next;
-			continue;
-		}
-		if (ih->ih_level <= ilevel) {
-#ifdef IRQ_DEBUG
-		if (evtch == IRQ_DEBUG)
-		    printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
-#endif
-			cli();
-			hypervisor_set_ipending(iplmask,
-			    evtch >> LONG_SHIFT, evtch & LONG_MASK);
-			/* leave masked */
-			mutex_spin_exit(&evtlock[evtch]);
-			goto splx;
-		}
-		iplmask &= ~IUNMASK(ci, ih->ih_level);
-		ci->ci_ilevel = ih->ih_level;
-		ih_fun = (void *)ih->ih_fun;
-		ih_fun(ih->ih_arg, regs);
-		ih = ih->ih_evt_next;
-	}
-	mutex_spin_exit(&evtlock[evtch]);
-	cli();
-	hypervisor_unmask_event(evtch);
-#if NPCI > 0 || NISA > 0
-	hypervisor_ack_pirq_event(evtch);
-#endif /* NPCI > 0 || NISA > 0 */		
-
-splx:
-	/*
-	 * C version of spllower(). ASTs will be checked when
-	 * hypevisor_callback() exits, so no need to check here.
-	 */
-	iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending);
-	while (iplmask != 0) {
-		iplbit = 1 << (NIPL - 1);
-		i = (NIPL - 1);
-		while (iplmask != 0 && i > ilevel) {
-			while (iplmask & iplbit) {
-				ci->ci_ipending &= ~iplbit;
-				ci->ci_ilevel = i;
-				for (ih = ci->ci_isources[i]->is_handlers;
-				    ih != NULL; ih = ih->ih_next) {
-					KASSERT(ih->ih_cpu == ci);
-					sti();
-					ih_fun = (void *)ih->ih_fun;
-					ih_fun(ih->ih_arg, regs);
-					cli();
-				}
-				hypervisor_enable_ipl(i);
-				/* more pending IPLs may have been registered */
-				iplmask =
-				    (IUNMASK(ci, ilevel) & ci->ci_ipending);
-			}
-			i--;
-			iplbit >>= 1;
-		}
-	}
-	ci->ci_ilevel = ilevel;
-	return 0;
-}
-
 #define PRIuCPUID	"lu" /* XXX: move this somewhere more appropriate */
 
 /* PIC callbacks */


Home | Main Index | Thread Index | Old Index