Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-9]: src/sys/arch/x86/pci Pull up following revision(s) (requested...



details:   https://anonhg.NetBSD.org/src/rev/a65ab0af9438
branches:  netbsd-9
changeset: 962852:a65ab0af9438
user:      martin <martin%NetBSD.org@localhost>
date:      Tue Aug 20 11:31:46 2019 +0000

description:
Pull up following revision(s) (requested by knakahara in ticket #99):

        sys/arch/x86/pci/if_vmx.c: revision 1.46
        sys/arch/x86/pci/if_vmx.c: revision 1.47
        sys/arch/x86/pci/if_vmx.c: revision 1.48

vmx(4) uses interrupt distribution for each queue like ixg(4).

fix panic when vmx(4) is detached.

add vmx(4) basic statistics counters.
Sorry, I have forgotten this TODO in r1.40 commit message.

diffstat:

 sys/arch/x86/pci/if_vmx.c |  161 ++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 156 insertions(+), 5 deletions(-)

diffs (truncated from 333 to 300 lines):

diff -r f2d5ff5baad6 -r a65ab0af9438 sys/arch/x86/pci/if_vmx.c
--- a/sys/arch/x86/pci/if_vmx.c Mon Aug 19 16:12:04 2019 +0000
+++ b/sys/arch/x86/pci/if_vmx.c Tue Aug 20 11:31:46 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: if_vmx.c,v 1.45 2019/07/30 11:16:15 knakahara Exp $    */
+/*     $NetBSD: if_vmx.c,v 1.45.2.1 2019/08/20 11:31:46 martin Exp $   */
 /*     $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $        */
 
 /*
@@ -19,7 +19,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.45 2019/07/30 11:16:15 knakahara Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.45.2.1 2019/08/20 11:31:46 martin Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -31,6 +31,7 @@
 #include <sys/sockio.h>
 #include <sys/pcq.h>
 #include <sys/workqueue.h>
+#include <sys/interrupt.h>
 
 #include <net/bpf.h>
 #include <net/if.h>
@@ -199,6 +200,13 @@
        char vxtxq_name[16];
 
        void *vxtxq_si;
+
+       struct evcnt vxtxq_intr;
+       struct evcnt vxtxq_defer;
+       struct evcnt vxtxq_deferreq;
+       struct evcnt vxtxq_pcqdrop;
+       struct evcnt vxtxq_transmitdef;
+       struct evcnt vxtxq_watchdogto;
 };
 
 struct vmxnet3_rxq_stats {
@@ -218,6 +226,10 @@
        struct vmxnet3_rxq_stats vxrxq_stats;
        struct vmxnet3_rxq_shared *vxrxq_rs;
        char vxrxq_name[16];
+
+       struct evcnt vxrxq_intr;
+       struct evcnt vxrxq_defer;
+       struct evcnt vxrxq_deferreq;
 };
 
 struct vmxnet3_queue {
@@ -270,6 +282,12 @@
        int vmx_max_rxsegs;
 
        struct vmxnet3_statistics vmx_stats;
+       struct evcnt vmx_event_intr;
+       struct evcnt vmx_event_link;
+       struct evcnt vmx_event_txqerror;
+       struct evcnt vmx_event_rxqerror;
+       struct evcnt vmx_event_dic;
+       struct evcnt vmx_event_debug;
 
        int vmx_intr_type;
        int vmx_intr_mask_mode;
@@ -348,6 +366,9 @@
 int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
 int vmxnet3_setup_sysctl(struct vmxnet3_softc *);
 
+int vmxnet3_setup_stats(struct vmxnet3_softc *);
+void vmxnet3_teardown_stats(struct vmxnet3_softc *);
+
 int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
 int vmxnet3_init_txq(struct vmxnet3_softc *, int);
 int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
@@ -630,6 +651,10 @@
        if (error)
                return;
 
+       error = vmxnet3_setup_stats(sc);
+       if (error)
+               return;
+
        sc->vmx_flags |= VMXNET3_FLAG_ATTACHED;
 }
 
@@ -654,6 +679,7 @@
                if_detach(ifp);
        }
 
+       vmxnet3_teardown_stats(sc);
        sysctl_teardown(&sc->vmx_sysctllog);
 
        vmxnet3_free_interrupts(sc);
@@ -858,11 +884,13 @@
        int i;
 
        workqueue_destroy(sc->vmx_queue_wq);
-       for (i = 0; i < sc->vmx_nintrs; i++) {
+       for (i = 0; i < sc->vmx_ntxqueues; i++) {
                struct vmxnet3_queue *vmxq =  &sc->vmx_queue[i];
 
                softint_disestablish(vmxq->vxq_si);
                vmxq->vxq_si = NULL;
+       }
+       for (i = 0; i < sc->vmx_nintrs; i++) {
                pci_intr_disestablish(pc, sc->vmx_ihs[i]);
        }
        pci_intr_release(pc, sc->vmx_intrs, sc->vmx_nintrs);
@@ -876,6 +904,7 @@
        pci_intr_handle_t *intr;
        void **ihs;
        int intr_idx, i, use_queues, error;
+       kcpuset_t *affinity;
        const char *intrstr;
        char intrbuf[PCI_INTRSTR_LEN];
        char xnamebuf[32];
@@ -903,6 +932,16 @@
                }
                aprint_normal_dev(sc->vmx_dev, "txrx interrupting at %s\n", intrstr);
 
+               kcpuset_create(&affinity, true);
+               kcpuset_set(affinity, intr_idx % ncpu);
+               error = interrupt_distribute(*ihs, affinity, NULL);
+               if (error) {
+                       aprint_normal_dev(sc->vmx_dev,
+                           "%s cannot be changed affinity, use default CPU\n",
+                           intrstr);
+               }
+               kcpuset_destroy(affinity);
+
                vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
                    vmxnet3_handle_queue, vmxq);
                if (vmxq->vxq_si == NULL) {
@@ -1909,6 +1948,93 @@
        return error;
 }
 
+int
+vmxnet3_setup_stats(struct vmxnet3_softc *sc)
+{
+       struct vmxnet3_queue *vmxq;
+       struct vmxnet3_txqueue *txq;
+       struct vmxnet3_rxqueue *rxq;
+       int i;
+
+       for (i = 0; i < sc->vmx_ntxqueues; i++) {
+               vmxq = &sc->vmx_queue[i];
+               txq = &vmxq->vxq_txqueue;
+               evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR,
+                   NULL, txq->vxtxq_name, "Interrupt on queue");
+               evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC,
+                   NULL, txq->vxtxq_name, "Handled queue in softint/workqueue");
+               evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC,
+                   NULL, txq->vxtxq_name, "Requested in softint/workqueue");
+               evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC,
+                   NULL, txq->vxtxq_name, "Dropped in pcq");
+               evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC,
+                   NULL, txq->vxtxq_name, "Deferred transmit");
+               evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC,
+                   NULL, txq->vxtxq_name, "Watchdog timeount");
+       }
+
+       for (i = 0; i < sc->vmx_nrxqueues; i++) {
+               vmxq = &sc->vmx_queue[i];
+               rxq = &vmxq->vxq_rxqueue;
+               evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR,
+                   NULL, rxq->vxrxq_name, "Interrupt on queue");
+               evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC,
+                   NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue");
+               evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC,
+                   NULL, rxq->vxrxq_name, "Requested in softint/workqueue");
+       }
+
+       evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR,
+           NULL, device_xname(sc->vmx_dev), "Interrupt for other events");
+       evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC,
+           NULL, device_xname(sc->vmx_dev), "Link status event");
+       evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC,
+           NULL, device_xname(sc->vmx_dev), "Tx queue error event");
+       evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC,
+           NULL, device_xname(sc->vmx_dev), "Rx queue error event");
+       evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC,
+           NULL, device_xname(sc->vmx_dev), "Device impl change event");
+       evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC,
+           NULL, device_xname(sc->vmx_dev), "Debug event");
+
+       return 0;
+}
+
+void
+vmxnet3_teardown_stats(struct vmxnet3_softc *sc)
+{
+       struct vmxnet3_queue *vmxq;
+       struct vmxnet3_txqueue *txq;
+       struct vmxnet3_rxqueue *rxq;
+       int i;
+
+       for (i = 0; i < sc->vmx_ntxqueues; i++) {
+               vmxq = &sc->vmx_queue[i];
+               txq = &vmxq->vxq_txqueue;
+               evcnt_detach(&txq->vxtxq_intr);
+               evcnt_detach(&txq->vxtxq_defer);
+               evcnt_detach(&txq->vxtxq_deferreq);
+               evcnt_detach(&txq->vxtxq_pcqdrop);
+               evcnt_detach(&txq->vxtxq_transmitdef);
+               evcnt_detach(&txq->vxtxq_watchdogto);
+       }
+
+       for (i = 0; i < sc->vmx_nrxqueues; i++) {
+               vmxq = &sc->vmx_queue[i];
+               rxq = &vmxq->vxq_rxqueue;
+               evcnt_detach(&rxq->vxrxq_intr);
+               evcnt_detach(&rxq->vxrxq_defer);
+               evcnt_detach(&rxq->vxrxq_deferreq);
+       }
+
+       evcnt_detach(&sc->vmx_event_intr);
+       evcnt_detach(&sc->vmx_event_link);
+       evcnt_detach(&sc->vmx_event_txqerror);
+       evcnt_detach(&sc->vmx_event_rxqerror);
+       evcnt_detach(&sc->vmx_event_dic);
+       evcnt_detach(&sc->vmx_event_debug);
+}
+
 void
 vmxnet3_evintr(struct vmxnet3_softc *sc)
 {
@@ -1928,12 +2054,18 @@
        vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
 
        if (event & VMXNET3_EVENT_LINK) {
+               sc->vmx_event_link.ev_count++;
                vmxnet3_link_status(sc);
                if (sc->vmx_link_active != 0)
                        if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
        }
 
        if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
+               if (event & VMXNET3_EVENT_TQERROR)
+                       sc->vmx_event_txqerror.ev_count++;
+               if (event & VMXNET3_EVENT_RQERROR)
+                       sc->vmx_event_rxqerror.ev_count++;
+
                reset = 1;
                vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
                ts = sc->vmx_queue[0].vxq_txqueue.vxtxq_ts;
@@ -1945,10 +2077,14 @@
                device_printf(dev, "Rx/Tx queue error event ... resetting\n");
        }
 
-       if (event & VMXNET3_EVENT_DIC)
+       if (event & VMXNET3_EVENT_DIC) {
+               sc->vmx_event_dic.ev_count++;
                device_printf(dev, "device implementation change event\n");
-       if (event & VMXNET3_EVENT_DEBUG)
+       }
+       if (event & VMXNET3_EVENT_DEBUG) {
+               sc->vmx_event_debug.ev_count++;
                device_printf(dev, "debug event\n");
+       }
 
        if (reset != 0)
                vmxnet3_init_locked(sc);
@@ -2405,10 +2541,12 @@
                vmxnet3_disable_intr(sc, vmxq->vxq_intr_idx);
 
        VMXNET3_TXQ_LOCK(txq);
+       txq->vxtxq_intr.ev_count++;
        txmore = vmxnet3_txq_eof(txq, txlimit);
        VMXNET3_TXQ_UNLOCK(txq);
 
        VMXNET3_RXQ_LOCK(rxq);
+       rxq->vxrxq_intr.ev_count++;
        rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
        VMXNET3_RXQ_UNLOCK(rxq);
 
@@ -2444,7 +2582,10 @@
        rxlimit = sc->vmx_rx_process_limit;
 
        VMXNET3_TXQ_LOCK(txq);
+       txq->vxtxq_defer.ev_count++;
        txmore = vmxnet3_txq_eof(txq, txlimit);
+       if (txmore)
+               txq->vxtxq_deferreq.ev_count++;
        /* for ALTQ */
        if (vmxq->vxq_id == 0)
                if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
@@ -2452,7 +2593,10 @@
        VMXNET3_TXQ_UNLOCK(txq);
 
        VMXNET3_RXQ_LOCK(rxq);
+       rxq->vxrxq_defer.ev_count++;
        rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
+       if (rxmore)
+               rxq->vxrxq_deferreq.ev_count++;
        VMXNET3_RXQ_UNLOCK(rxq);
 
        if (txmore || rxmore)
@@ -2480,6 +2624,8 @@
        if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)



Home | Main Index | Thread Index | Old Index