Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/dev/pci unify TX and RX interrupt handler to use MSI-X v...



details:   https://anonhg.NetBSD.org/src/rev/f42cdc570c05
branches:  trunk
changeset: 815465:f42cdc570c05
user:      knakahara <knakahara%NetBSD.org@localhost>
date:      Thu May 19 08:27:57 2016 +0000

description:
unify TX and RX interrupt handler to use MSI-X vector efficiently

diffstat:

 sys/dev/pci/if_wm.c |  580 +++++++++++++++++++--------------------------------
 1 files changed, 221 insertions(+), 359 deletions(-)

diffs (truncated from 1120 to 300 lines):

diff -r 2a66d58a54e4 -r f42cdc570c05 sys/dev/pci/if_wm.c
--- a/sys/dev/pci/if_wm.c       Thu May 19 08:22:37 2016 +0000
+++ b/sys/dev/pci/if_wm.c       Thu May 19 08:27:57 2016 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: if_wm.c,v 1.404 2016/05/19 08:22:37 knakahara Exp $    */
+/*     $NetBSD: if_wm.c,v 1.405 2016/05/19 08:27:57 knakahara Exp $    */
 
 /*
  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
@@ -83,7 +83,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.404 2016/05/19 08:22:37 knakahara Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.405 2016/05/19 08:27:57 knakahara Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -164,9 +164,8 @@
 /*
  * This device driver's max interrupt numbers.
  */
-#define WM_MAX_NTXINTR         16
-#define WM_MAX_NRXINTR         16
-#define WM_MAX_NINTR           (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
+#define WM_MAX_NQUEUEINTR      16
+#define WM_MAX_NINTR           (WM_MAX_NQUEUEINTR + 1)
 
 /*
  * Transmit descriptor list size.  Due to errata, we can only have
@@ -256,10 +255,7 @@
 struct wm_txqueue {
        kmutex_t *txq_lock;             /* lock for tx operations */
 
-       struct wm_softc *txq_sc;
-
-       int txq_id;                     /* index of transmit queues */
-       int txq_intr_idx;               /* index of MSI-X tables */
+       struct wm_softc *txq_sc;        /* shortcut (skip struct wm_queue) */
 
        /* Software state for the transmit descriptors. */
        int txq_num;                    /* must be a power of two */
@@ -310,10 +306,7 @@
 struct wm_rxqueue {
        kmutex_t *rxq_lock;             /* lock for rx operations */
 
-       struct wm_softc *rxq_sc;
-
-       int rxq_id;                     /* index of receive queues */
-       int rxq_intr_idx;               /* index of MSI-X tables */
+       struct wm_softc *rxq_sc;        /* shortcut (skip struct wm_queue) */
 
        /* Software state for the receive descriptors. */
        wiseman_rxdesc_t *rxq_descs;
@@ -338,6 +331,14 @@
        /* XXX which event counter is required? */
 };
 
+struct wm_queue {
+       int wmq_id;                     /* index of transmit and receive queues */
+       int wmq_intr_idx;               /* index of MSI-X tables */
+
+       struct wm_txqueue wmq_txq;
+       struct wm_rxqueue wmq_rxq;
+};
+
 /*
  * Software state per device.
  */
@@ -402,11 +403,8 @@
        int sc_ich8_flash_bank_size;
        int sc_nvm_k1_enabled;
 
-       int sc_ntxqueues;
-       struct wm_txqueue *sc_txq;
-
-       int sc_nrxqueues;
-       struct wm_rxqueue *sc_rxq;
+       int sc_nqueues;
+       struct wm_queue *sc_queue;
 
        int sc_affinity_offset;
 
@@ -609,18 +607,22 @@
 static int     wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
 static void    wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
 static void    wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
-static void    wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
+static void    wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
+    struct wm_txqueue *);
 static int     wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
 static void    wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
-static void    wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
+static void    wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
+    struct wm_rxqueue *);
 static int     wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
 static void    wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
 static void    wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
 static int     wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
 static void    wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
 static int     wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
-static void    wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
-static int     wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
+static void    wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
+    struct wm_txqueue *);
+static int     wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
+    struct wm_rxqueue *);
 static int     wm_alloc_txrx_queues(struct wm_softc *);
 static void    wm_free_txrx_queues(struct wm_softc *);
 static int     wm_init_txrx_queues(struct wm_softc *);
@@ -645,8 +647,7 @@
 static void    wm_linkintr_serdes(struct wm_softc *, uint32_t);
 static void    wm_linkintr(struct wm_softc *, uint32_t);
 static int     wm_intr_legacy(void *);
-static int     wm_txintr_msix(void *);
-static int     wm_rxintr_msix(void *);
+static int     wm_txrxintr_msix(void *);
 static int     wm_linkintr_msix(void *);
 
 /*
@@ -1675,7 +1676,7 @@
 
        /* Allocation settings */
        max_type = PCI_INTR_TYPE_MSIX;
-       counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
+       counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
        counts[PCI_INTR_TYPE_MSI] = 1;
        counts[PCI_INTR_TYPE_INTX] = 1;
 
@@ -2414,7 +2415,7 @@
        ifp->if_ioctl = wm_ioctl;
        if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
                ifp->if_start = wm_nq_start;
-               if (sc->sc_ntxqueues > 1)
+               if (sc->sc_nqueues > 1)
                        ifp->if_transmit = wm_nq_transmit;
        } else
                ifp->if_start = wm_start;
@@ -2635,8 +2636,8 @@
        if_percpuq_destroy(sc->sc_ipq);
 
        /* Unload RX dmamaps and free mbufs */
-       for (i = 0; i < sc->sc_nrxqueues; i++) {
-               struct wm_rxqueue *rxq = &sc->sc_rxq[i];
+       for (i = 0; i < sc->sc_nqueues; i++) {
+               struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
                WM_RX_LOCK(rxq);
                wm_rxdrain(rxq);
                WM_RX_UNLOCK(rxq);
@@ -2709,8 +2710,8 @@
        int qid;
        struct wm_softc *sc = ifp->if_softc;
 
-       for (qid = 0; qid < sc->sc_ntxqueues; qid++) {
-               struct wm_txqueue *txq = &sc->sc_txq[qid];
+       for (qid = 0; qid < sc->sc_nqueues; qid++) {
+               struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
 
                wm_watchdog_txq(ifp, txq);
        }
@@ -3695,8 +3696,8 @@
        case WM_T_82547_2:
                sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
                    PBA_22K : PBA_30K;
-               for (i = 0; i < sc->sc_ntxqueues; i++) {
-                       struct wm_txqueue *txq = &sc->sc_txq[i];
+               for (i = 0; i < sc->sc_nqueues; i++) {
+                       struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
                        txq->txq_fifo_head = 0;
                        txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
                        txq->txq_fifo_size =
@@ -4195,7 +4196,7 @@
        for (i = 0; i < RETA_NUM_ENTRIES; i++) {
                int qid, reta_ent;
 
-               qid  = i % sc->sc_nrxqueues;
+               qid  = i % sc->sc_nqueues;
                switch(sc->sc_type) {
                case WM_T_82574:
                        reta_ent = __SHIFTIN(qid,
@@ -4248,11 +4249,10 @@
 static void
 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
 {
-       int hw_ntxqueues, hw_nrxqueues;
-
-       if (nvectors < 3) {
-               sc->sc_ntxqueues = 1;
-               sc->sc_nrxqueues = 1;
+       int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
+
+       if (nvectors < 2) {
+               sc->sc_nqueues = 1;
                return;
        }
 
@@ -4304,33 +4304,24 @@
                break;
        }
 
+       hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
+
        /*
-        * As queues more then MSI-X vectors cannot improve scaling, we limit
+        * As queues more than MSI-X vectors cannot improve scaling, we limit
         * the number of queues used actually.
-        *
-        * XXX
-        * Currently, we separate TX queue interrupts and RX queue interrupts.
-        * Howerver, the number of MSI-X vectors of recent controllers (such as
-        * I354) expects that drivers bundle a TX queue interrupt and a RX
-        * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
-        * such a way.
         */
-       if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
-               sc->sc_ntxqueues = (nvectors - 1) / 2;
-               sc->sc_nrxqueues = (nvectors - 1) / 2;
+       if (nvectors < hw_nqueues + 1) {
+               sc->sc_nqueues = nvectors - 1;
        } else {
-               sc->sc_ntxqueues = hw_ntxqueues;
-               sc->sc_nrxqueues = hw_nrxqueues;
+               sc->sc_nqueues = hw_nqueues;
        }
 
        /*
         * As queues more then cpus cannot improve scaling, we limit
         * the number of queues used actually.
         */
-       if (ncpu < sc->sc_ntxqueues)
-               sc->sc_ntxqueues = ncpu;
-       if (ncpu < sc->sc_nrxqueues)
-               sc->sc_nrxqueues = ncpu;
+       if (ncpu < sc->sc_nqueues)
+               sc->sc_nqueues = ncpu;
 }
 
 /*
@@ -4374,13 +4365,13 @@
 {
        void *vih;
        kcpuset_t *affinity;
-       int qidx, error, intr_idx, tx_established, rx_established;
+       int qidx, error, intr_idx, txrx_established;
        pci_chipset_tag_t pc = sc->sc_pc;
        const char *intrstr = NULL;
        char intrbuf[PCI_INTRSTR_LEN];
        char intr_xname[INTRDEVNAMEBUF];
 
-       if (sc->sc_ntxqueues + sc->sc_nrxqueues < ncpu) {
+       if (sc->sc_nqueues < ncpu) {
                /*
                 * To avoid other devices' interrupts, the affinity of Tx/Rx
                 * interrupts start from CPU#1.
@@ -4405,11 +4396,11 @@
        intr_idx = 0;
 
        /*
-        * TX
+        * TX and RX
         */
-       tx_established = 0;
-       for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
-               struct wm_txqueue *txq = &sc->sc_txq[qidx];
+       txrx_established = 0;
+       for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
+               struct wm_queue *wmq = &sc->sc_queue[qidx];
                int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
 
                intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
@@ -4419,17 +4410,17 @@
                    PCI_INTR_MPSAFE, true);
 #endif
                memset(intr_xname, 0, sizeof(intr_xname));
-               snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
+               snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
                    device_xname(sc->sc_dev), qidx);
                vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
-                   IPL_NET, wm_txintr_msix, txq, intr_xname);
+                   IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
                if (vih == NULL) {
                        aprint_error_dev(sc->sc_dev,
-                           "unable to establish MSI-X(for TX)%s%s\n",
+                           "unable to establish MSI-X(for TX and RX)%s%s\n",
                            intrstr ? " at " : "",
                            intrstr ? intrstr : "");
 
-                       goto fail_0;
+                       goto fail;
                }
                kcpuset_zero(affinity);
                /* Round-robin affinity */
@@ -4437,64 +4428,17 @@
                error = interrupt_distribute(vih, affinity, NULL);
                if (error == 0) {
                        aprint_normal_dev(sc->sc_dev,
-                           "for TX interrupting at %s affinity to %u\n",
+                           "for TX and RX interrupting at %s affinity to %u\n",
                            intrstr, affinity_to);
                } else {



Home | Main Index | Thread Index | Old Index