Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/xen/xen add support for scatter-gather when accepti...



details:   https://anonhg.NetBSD.org/src/rev/faa01dee2b44
branches:  trunk
changeset: 931764:faa01dee2b44
user:      jdolecek <jdolecek%NetBSD.org@localhost>
date:      Thu Apr 30 11:23:44 2020 +0000

description:
add support for scatter-gather when accepting packets on frontend Tx path
(frontend -> backend)

don't enable ETHERCAP_JUMBO_MTU nor feature-sg yet, need to implement
support also for the frontend Rx side

diffstat:

 sys/arch/xen/xen/xennetback_xenbus.c |  390 +++++++++++++++++++++++++---------
 1 files changed, 279 insertions(+), 111 deletions(-)

diffs (truncated from 540 to 300 lines):

diff -r 65dd58477a40 -r faa01dee2b44 sys/arch/xen/xen/xennetback_xenbus.c
--- a/sys/arch/xen/xen/xennetback_xenbus.c      Thu Apr 30 11:19:39 2020 +0000
+++ b/sys/arch/xen/xen/xennetback_xenbus.c      Thu Apr 30 11:23:44 2020 +0000
@@ -1,4 +1,4 @@
-/*      $NetBSD: xennetback_xenbus.c,v 1.98 2020/04/26 13:09:52 jdolecek Exp $      */
+/*      $NetBSD: xennetback_xenbus.c,v 1.99 2020/04/30 11:23:44 jdolecek Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -25,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.98 2020/04/26 13:09:52 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.99 2020/04/30 11:23:44 jdolecek Exp $");
 
 #include "opt_xen.h"
 
@@ -78,6 +78,7 @@
  * transmit at once).
  */
 #define NB_XMIT_PAGES_BATCH 64
+CTASSERT(NB_XMIT_PAGES_BATCH >= XEN_NETIF_NR_SLOTS_MIN);
 
 /* ratecheck(9) for pool allocation failures */
 static const struct timeval xni_pool_errintvl = { 30, 0 };  /* 30s, each */
@@ -91,9 +92,10 @@
 
 struct xnetback_xstate {
        bus_dmamap_t xs_dmamap;
+       bool xs_loaded;
        struct mbuf *xs_m;
-       int xs_id;
-       int xs_flags;
+       struct netif_tx_request xs_tx;
+       uint16_t xs_tx_size;            /* Size of data in this Tx fragment */
 };
 
 /* we keep the xnetback instances in a linked list */
@@ -235,8 +237,9 @@
 
        /* Initialize DMA map, used only for loading PA */
        for (i = 0; i < __arraycount(xneti->xni_xstate); i++) {
-               if (bus_dmamap_create(xneti->xni_xbusd->xbusd_dmat, PAGE_SIZE,
-                   1, PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
+               if (bus_dmamap_create(xneti->xni_xbusd->xbusd_dmat,
+                   ETHER_MAX_LEN_JUMBO, XEN_NETIF_NR_SLOTS_MIN,
+                   PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
                    &xneti->xni_xstate[i].xs_dmamap)
                    != 0) {
                        aprint_error_ifnet(ifp,
@@ -249,7 +252,11 @@
        /* create pseudo-interface */
        aprint_verbose_ifnet(ifp, "Ethernet address %s\n",
            ether_sprintf(xneti->xni_enaddr));
-       xneti->xni_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
+       xneti->xni_ec.ec_capabilities |= ETHERCAP_VLAN_MTU
+#ifdef notyet
+               | ETHERCAP_JUMBO_MTU
+#endif
+       ;
        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
        ifp->if_snd.ifq_maxlen =
            uimax(ifqmaxlen, NET_TX_RING_SIZE * 2);
@@ -309,6 +316,16 @@
                            xbusd->xbusd_path, err);
                        goto abort_xbt;
                }
+#if notyet
+               err = xenbus_printf(xbt, xbusd->xbusd_path,
+                   "feature-sg", "%d", 1);
+               if (err) {
+                       aprint_error_ifnet(ifp,
+                           "failed to write %s/feature-sg: %d\n",
+                           xbusd->xbusd_path, err);
+                       goto abort_xbt;
+               }
+#endif
        } while ((err = xenbus_transaction_end(xbt, 0)) == EAGAIN);
        if (err) {
                aprint_error_ifnet(ifp,
@@ -624,80 +641,182 @@
        }
 }
 
-static inline const char *
-xennetback_tx_check_packet(const netif_tx_request_t *txreq, int vlan)
+static const char *
+xennetback_tx_check_packet(const netif_tx_request_t *txreq)
 {
-       if (__predict_false(txreq->size < ETHER_HDR_LEN))
-               return "too small";
-
-       if (__predict_false(txreq->offset + txreq->size > PAGE_SIZE))
-               return "crossing a page boundary";
+       if (__predict_false((txreq->flags & NETTXF_more_data) == 0 &&
+           txreq->offset + txreq->size > PAGE_SIZE))
+               return "crossing page boundary";
 
-       int maxlen = ETHER_MAX_LEN - ETHER_CRC_LEN;
-       if (vlan)
-               maxlen += ETHER_VLAN_ENCAP_LEN;
-       if (__predict_false(txreq->size > maxlen))
-               return "too big";
-
-       /* Somewhat duplicit, MCLBYTES is > ETHER_MAX_LEN */
-       if (__predict_false(txreq->size > MCLBYTES))
-               return "bigger than MCLBYTES";
+       if (__predict_false(txreq->size > ETHER_MAX_LEN_JUMBO))
+               return "bigger then jumbo";
 
        return NULL;
 }
 
+static int
+xennetback_copy(struct ifnet *ifp, gnttab_copy_t *gop, int copycnt)
+{
+       /*
+        * Copy the data and ack it. Delaying it until the mbuf is
+        * freed will stall transmit.
+        */
+       if (HYPERVISOR_grant_table_op(GNTTABOP_copy, gop, copycnt) != 0) {
+               printf("%s: GNTTABOP_copy Tx failed", ifp->if_xname);
+               return EINVAL;
+       }
+
+       for (int i = 0; i < copycnt; i++) {
+               if (gop->status != GNTST_okay) {
+                       printf("%s GNTTABOP_copy[%d] Tx %d\n",
+                           ifp->if_xname, i, gop->status);
+                       return EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void
+xennetback_tx_copy_abort(struct ifnet *ifp, struct xnetback_instance *xneti,
+       int queued)
+{
+       struct xnetback_xstate *xst;
+
+       for (int i = 0; i < queued; i++) {
+               xst = &xneti->xni_xstate[i];
+
+               if (xst->xs_loaded) {
+                       KASSERT(xst->xs_m != NULL);
+                       bus_dmamap_unload(xneti->xni_xbusd->xbusd_dmat,
+                           xst->xs_dmamap);
+                       xst->xs_loaded = false;
+                       m_freem(xst->xs_m);
+               }
+
+               xennetback_tx_response(xneti, xst->xs_tx.id, NETIF_RSP_ERROR);
+               if_statinc(ifp, if_ierrors);
+       }
+}
+
 static void
 xennetback_tx_copy_process(struct ifnet *ifp, struct xnetback_instance *xneti,
        int queued)
 {
-       int i = 0;
        gnttab_copy_t *gop;
        struct xnetback_xstate *xst;
+       int copycnt = 0, seg = 0;
+       size_t goff = 0, segoff = 0, gsize, take;
+       bus_dmamap_t dm = NULL;
+       paddr_t ma;
 
-       /*
-        * Copy the data and ack it. Delaying it until the mbuf is
-        * freed will stall transmit.
-        */
-       if (HYPERVISOR_grant_table_op(GNTTABOP_copy, xneti->xni_gop_copy,
-           queued) != 0) {
-               printf("%s: GNTTABOP_copy Tx failed", ifp->if_xname);
-               goto abort;
-       }
-
-       for (; i < queued; i++) {
-               gop = &xneti->xni_gop_copy[i];
+       for (int i = 0; i < queued; i++) {
                xst = &xneti->xni_xstate[i];
 
-               if (gop->status != GNTST_okay) {
-                       printf("%s GNTTABOP_copy[%d] Tx %d\n",
-                           ifp->if_xname, i, gop->status);
-                       goto abort;
+               if (xst->xs_m != NULL) {
+                       KASSERT(xst->xs_m->m_pkthdr.len == xst->xs_tx.size);
+                       if (__predict_false(bus_dmamap_load_mbuf(
+                           xneti->xni_xbusd->xbusd_dmat,
+                           xst->xs_dmamap, xst->xs_m, BUS_DMA_NOWAIT) != 0))
+                               goto abort;
+                       xst->xs_loaded = true;
+                       dm = xst->xs_dmamap;
+                       seg = 0;
+                       goff = segoff = 0;
                }
 
-               xennetback_tx_response(xneti, xst->xs_id, NETIF_RSP_OKAY);
+               gsize = xst->xs_tx_size;
+               for (; seg < dm->dm_nsegs && gsize > 0; seg++) {
+                       bus_dma_segment_t *ds = &dm->dm_segs[seg];
+                       ma = ds->ds_addr;
+                       take = uimin(gsize, ds->ds_len);
+
+                       KASSERT(copycnt <= NB_XMIT_PAGES_BATCH);
+                       if (copycnt == NB_XMIT_PAGES_BATCH) {
+                               if (xennetback_copy(ifp, xneti->xni_gop_copy,
+                                   copycnt) != 0)
+                                       goto abort;
+                               copycnt = 0;
+                       }
+
+                       /* Queue for the copy */
+                       gop = &xneti->xni_gop_copy[copycnt++];
+                       memset(gop, 0, sizeof(*gop));
+                       gop->flags = GNTCOPY_source_gref;
+                       gop->len = take;
+
+                       gop->source.u.ref = xst->xs_tx.gref;
+                       gop->source.offset = xst->xs_tx.offset + goff;
+                       gop->source.domid = xneti->xni_domid;
+
+                       gop->dest.offset = (ma & PAGE_MASK) + segoff;
+                       KASSERT(gop->dest.offset <= PAGE_SIZE);
+                       gop->dest.domid = DOMID_SELF;
+                       gop->dest.u.gmfn = ma >> PAGE_SHIFT;
 
-               if (xst->xs_flags & NETTXF_csum_blank)
-                       xennet_checksum_fill(ifp, xst->xs_m);
-               else if (xst->xs_flags & NETTXF_data_validated)
-                       xst->xs_m->m_pkthdr.csum_flags = XN_M_CSUM_SUPPORTED;
-               m_set_rcvif(xst->xs_m, ifp);
+                       goff += take;
+                       gsize -= take;
+                       if (take + segoff < ds->ds_len) {
+                               segoff += take;
+                               /* Segment not completely consumed yet */
+                               break;
+                       }
+                       segoff = 0;
+               }
+               KASSERT(gsize == 0);
+       }
+       if (copycnt > 0) {
+               if (xennetback_copy(ifp, xneti->xni_gop_copy, copycnt) != 0)
+                       goto abort;
+               copycnt = 0;
+       }
 
-               if_percpuq_enqueue(ifp->if_percpuq, xst->xs_m);
+       /* If we got here, the whole copy was successful */
+       for (int i = 0; i < queued; i++) {
+               xst = &xneti->xni_xstate[i];
+
+               xennetback_tx_response(xneti, xst->xs_tx.id, NETIF_RSP_OKAY);
+
+               if (xst->xs_m != NULL) {
+                       KASSERT(xst->xs_loaded);
+                       bus_dmamap_unload(xneti->xni_xbusd->xbusd_dmat,
+                           xst->xs_dmamap);
 
-               bus_dmamap_unload(xneti->xni_xbusd->xbusd_dmat,
-                    xst->xs_dmamap);
+                       if (xst->xs_tx.flags & NETTXF_csum_blank)
+                               xennet_checksum_fill(ifp, xst->xs_m);
+                       else if (xst->xs_tx.flags & NETTXF_data_validated) {
+                               xst->xs_m->m_pkthdr.csum_flags =
+                                   XN_M_CSUM_SUPPORTED;
+                       }
+                       m_set_rcvif(xst->xs_m, ifp);
+
+                       if_percpuq_enqueue(ifp->if_percpuq, xst->xs_m);
+               }
        }
 
        return;
 
 abort:
-       for (; i < queued; i++) {
-               xst = &xneti->xni_xstate[i];
+       xennetback_tx_copy_abort(ifp, xneti, queued);
+}
+
+static int
+xennetback_tx_m0len_fragment(struct xnetback_instance *xneti,
+    int m0_len, int req_cons, int *cntp)
+{
+       netif_tx_request_t *txreq;
 
-               m_freem(xst->xs_m);
-               xennetback_tx_response(xneti, xst->xs_id, NETIF_RSP_ERROR);
-               if_statinc(ifp, if_ierrors);
-       }
+       /* This assumes all the requests are already pushed into the ring */ 
+       *cntp = 1;



Home | Main Index | Thread Index | Old Index