Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/cherry-xenmp]: src/sys/arch/xen first cut at per-cpu event handling



details:   https://anonhg.NetBSD.org/src/rev/1e215a9952e9
branches:  cherry-xenmp
changeset: 765622:1e215a9952e9
user:      cherry <cherry%NetBSD.org@localhost>
date:      Thu Aug 04 09:07:46 2011 +0000

description:
first cut at per-cpu event handling

diffstat:

 sys/arch/xen/include/evtchn.h         |    6 +-
 sys/arch/xen/include/hypervisor.h     |    6 +-
 sys/arch/xen/include/intr.h           |    9 +-
 sys/arch/xen/x86/hypervisor_machdep.c |  272 ++++++++++++++++++++-------------
 sys/arch/xen/xen/evtchn.c             |   37 +++-
 sys/arch/xen/xen/xenevt.c             |    9 +-
 6 files changed, 206 insertions(+), 133 deletions(-)

diffs (truncated from 577 to 300 lines):

diff -r 2dd8f42d7cac -r 1e215a9952e9 sys/arch/xen/include/evtchn.h
--- a/sys/arch/xen/include/evtchn.h     Sun Jul 31 20:55:22 2011 +0000
+++ b/sys/arch/xen/include/evtchn.h     Thu Aug 04 09:07:46 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: evtchn.h,v 1.18.10.1 2011/06/03 13:27:40 cherry Exp $  */
+/*     $NetBSD: evtchn.h,v 1.18.10.2 2011/08/04 09:07:46 cherry Exp $  */
 
 /*
  *
@@ -42,7 +42,9 @@
 int event_remove_handler(int, int (*func)(void *), void *);
 
 struct intrhand;
-void event_set_iplhandler(struct intrhand *, int);
+void event_set_iplhandler(struct cpu_info *ci,
+                         struct intrhand *, 
+                         int);
 
 extern int debug_port;
 extern int xen_debug_handler(void *);
diff -r 2dd8f42d7cac -r 1e215a9952e9 sys/arch/xen/include/hypervisor.h
--- a/sys/arch/xen/include/hypervisor.h Sun Jul 31 20:55:22 2011 +0000
+++ b/sys/arch/xen/include/hypervisor.h Thu Aug 04 09:07:46 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: hypervisor.h,v 1.31 2009/10/19 18:41:10 bouyer Exp $   */
+/*     $NetBSD: hypervisor.h,v 1.31.10.1 2011/08/04 09:07:46 cherry Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -91,6 +91,7 @@
 #include <xen/xen3-public/io/netif.h>
 #include <xen/xen3-public/io/blkif.h>
 
+#include <machine/cpu.h>
 #include <machine/hypercalls.h>
 
 #undef u8
@@ -136,7 +137,8 @@
 void hypervisor_mask_event(unsigned int);
 void hypervisor_clear_event(unsigned int);
 void hypervisor_enable_ipl(unsigned int);
-void hypervisor_set_ipending(uint32_t, int, int);
+void hypervisor_set_ipending(struct cpu_info *, 
+                            uint32_t, int, int);
 void hypervisor_machdep_attach(void);
 
 /* 
diff -r 2dd8f42d7cac -r 1e215a9952e9 sys/arch/xen/include/intr.h
--- a/sys/arch/xen/include/intr.h       Sun Jul 31 20:55:22 2011 +0000
+++ b/sys/arch/xen/include/intr.h       Thu Aug 04 09:07:46 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: intr.h,v 1.31.10.2 2011/06/26 12:56:32 cherry Exp $    */
+/*     $NetBSD: intr.h,v 1.31.10.3 2011/08/04 09:07:46 cherry Exp $    */
 /*     NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp       */
 
 /*-
@@ -58,13 +58,8 @@
        struct intrhand *ev_handlers;   /* handler chain */
        struct evcnt ev_evcnt;          /* interrupt counter */
        char ev_evname[32];             /* event counter name */
+       struct cpu_info *ev_cpu;        /* cpu on which this event is bound */
        struct simplelock ev_lock;      /* protects this structure */
-
-       /* 
-        * XXX: The lock is quite coursegrained ( for the entire
-        * handler list ), but contention is expected to be low. See
-        * how this performs and revisit.
-        */
 };
 
 /*
diff -r 2dd8f42d7cac -r 1e215a9952e9 sys/arch/xen/x86/hypervisor_machdep.c
--- a/sys/arch/xen/x86/hypervisor_machdep.c     Sun Jul 31 20:55:22 2011 +0000
+++ b/sys/arch/xen/x86/hypervisor_machdep.c     Thu Aug 04 09:07:46 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: hypervisor_machdep.c,v 1.14.2.1 2011/06/03 13:27:41 cherry Exp $       */
+/*     $NetBSD: hypervisor_machdep.c,v 1.14.2.2 2011/08/04 09:07:47 cherry Exp $       */
 
 /*
  *
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.14.2.1 2011/06/03 13:27:41 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.14.2.2 2011/08/04 09:07:47 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -86,13 +86,101 @@
 // #define PORT_DEBUG 4
 // #define EARLY_DEBUG_EVENT
 
+static inline unsigned int
+evt_bitstr_to_port(unsigned long l1, unsigned long l2)
+{
+       unsigned int l1i, l2i, port;
+
+       l1i = xen_ffs(l1) - 1;
+       l2i = xen_ffs(l2) - 1;
+
+       port = (l1i << LONG_SHIFT) + l2i;
+       return port;
+}
+
+/* callback function type */
+typedef void (*iterate_func_t)(struct cpu_info *,
+                              unsigned int,
+                              unsigned int,
+                              unsigned int,
+                              void *);
+
+
+static inline void
+evt_iterate_pending(struct cpu_info *ci,
+                   volatile unsigned long *pendingl1,
+                   volatile unsigned long *pendingl2,
+                   volatile unsigned long *mask,
+                   iterate_func_t iterate_pending,
+                   void *iterate_args)
+{
+
+       KASSERT(pendingl1 != NULL);
+       KASSERT(pendingl2 != NULL);
+       
+       unsigned long l1, l2;
+       unsigned int l1i, l2i, port;
+
+       l1 = xen_atomic_xchg(pendingl1, 0);
+       while ((l1i = xen_ffs(l1)) != 0) {
+               l1i--;
+               l1 &= ~(1UL << l1i);
+
+               l2 = pendingl2[l1i] & (mask != NULL ? ~mask[l1i] : -1UL);
+
+               if (mask != NULL) xen_atomic_setbits_l(&mask[l1i], l2);
+               xen_atomic_clearbits_l(&pendingl2[l1i], l2);
+
+               while ((l2i = xen_ffs(l2)) != 0) {
+                       l2i--;
+                       l2 &= ~(1UL << l2i);
+
+                       port = (l1i << LONG_SHIFT) + l2i;
+
+                       iterate_pending(ci, port, l1i, l2i, iterate_args);
+               }
+       }
+}
+
+/*
+ * Set per-cpu "pending" information for outstanding events that
+ * cannot be processed now.
+ */
+   
+static inline void
+evt_set_pending(struct cpu_info *ci,
+               unsigned int port,
+               unsigned int l1i,
+               unsigned int l2i,
+               void *args)
+{
+
+       KASSERT(args != NULL);
+       KASSERT(ci != NULL);
+
+       int *ret = args;
+
+       if (evtsource[port]) {
+               hypervisor_set_ipending(ci,
+                       evtsource[port]->ev_imask,
+                       l1i, l2i);
+               evtsource[port]->ev_evcnt.ev_count++;
+               if (*ret == 0 && ci->ci_ilevel <
+                   evtsource[port]->ev_maxlevel)
+                       *ret = 1;
+       }
+#ifdef DOM0OPS
+       else  {
+               /* set pending event */
+               xenevt_setipending(l1i, l2i);
+       }
+#endif
+}
+
 int stipending(void);
 int
 stipending(void)
 {
-       unsigned long l1;
-       unsigned long l2;
-       unsigned int l1i, l2i, port;
        volatile shared_info_t *s = HYPERVISOR_shared_info;
        struct cpu_info *ci;
        volatile struct vcpu_info *vci;
@@ -120,45 +208,19 @@
         * we're only called after STIC, so we know that we'll have to
         * STI at the end
         */
+
        while (vci->evtchn_upcall_pending) {
                cli();
+
                vci->evtchn_upcall_pending = 0;
-               /* NB. No need for a barrier here -- XCHG is a barrier
-                * on x86. */
-               l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
-               while ((l1i = xen_ffs(l1)) != 0) {
-                       l1i--;
-                       l1 &= ~(1UL << l1i);
-
-                       l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
-                       /*
-                        * mask and clear event. More efficient than calling
-                        * hypervisor_mask/clear_event for each event.
-                        */
-                       xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
-                       xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
-                       while ((l2i = xen_ffs(l2)) != 0) {
-                               l2i--;
-                               l2 &= ~(1UL << l2i);
 
-                               port = (l1i << LONG_SHIFT) + l2i;
-                               if (evtsource[port]) {
-                                       hypervisor_set_ipending(
-                                           evtsource[port]->ev_imask,
-                                           l1i, l2i);
-                                       evtsource[port]->ev_evcnt.ev_count++;
-                                       if (ret == 0 && ci->ci_ilevel <
-                                           evtsource[port]->ev_maxlevel)
-                                               ret = 1;
-                               }
-#ifdef DOM0OPS
-                               else  {
-                                       /* set pending event */
-                                       xenevt_setipending(l1i, l2i);
-                               }
-#endif
-                       }
-               }
+               evt_iterate_pending(ci,
+                                   &vci->evtchn_pending_sel,
+                                   s->evtchn_pending,
+                                   s->evtchn_mask,
+                                   evt_set_pending,
+                                   &ret);
+
                sti();
        }
 
@@ -173,12 +235,45 @@
        return (ret);
 }
 
+/* Iterate through pending events and call the event handler */
+
+static inline void
+evt_do_hypervisor_callback(struct cpu_info *ci,
+                          unsigned int port,
+                          unsigned int l1i,
+                          unsigned int l2i,
+                          void *args)
+{
+       KASSERT(args != NULL);
+       KASSERT(ci == curcpu());
+
+       struct intrframe *regs = args;
+
+#ifdef PORT_DEBUG
+       if (port == PORT_DEBUG)
+               printf("do_hypervisor_callback event %d\n", port);
+#endif
+       if (evtsource[port])
+               call_evtchn_do_event(port, regs);
+#ifdef DOM0OPS
+       else  {
+               if (ci->ci_ilevel < IPL_HIGH) {
+                       /* fast path */
+                       int oipl = ci->ci_ilevel;
+                       ci->ci_ilevel = IPL_HIGH;
+                       call_xenevt_event(port);
+                       ci->ci_ilevel = oipl;
+               } else {
+                       /* set pending event */
+                       xenevt_setipending(l1i, l2i);
+               }
+       }
+#endif
+}
+
 void
 do_hypervisor_callback(struct intrframe *regs)
 {
-       unsigned long l1;
-       unsigned long l2;
-       unsigned int l1i, l2i, port;
        volatile shared_info_t *s = HYPERVISOR_shared_info;
        struct cpu_info *ci;
        volatile struct vcpu_info *vci;
@@ -199,51 +294,13 @@
 



Home | Main Index | Thread Index | Old Index