Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-9]: src/sys/arch/xen/xen Pull up following revision(s) (requested...



details:   https://anonhg.NetBSD.org/src/rev/2f8f8c4ba664
branches:  netbsd-9
changeset: 1002173:2f8f8c4ba664
user:      martin <martin%NetBSD.org@localhost>
date:      Tue Feb 23 18:50:21 2021 +0000

description:
Pull up following revision(s) (requested by jdolecek in ticket #1211):

        sys/arch/xen/xen/xennetback_xenbus.c: revision 1.92 (via patch)

remove support for legacy rx-flip mode for xennet(4)/xvif(4), making
rx-copy (first shipped in NetBSD 6.0 in 2012) the only supported
mode

this is mostly to simplify maintenance and future development
rx-flip is not supported by Linux Dom0/DomU, and NetBSD Dom0/DomU
defaults to rx-copy for over 8 years now too, so there is little
need to keep the support for compatibility

besides compatibility there is no other reason to keep rx-flip -
page transfer is generally slower than copy due to necessary MMU/TLB
manipulation, especially on MP systems

diffstat:

 sys/arch/xen/xen/xennetback_xenbus.c |  367 +----------------------------------
 1 files changed, 10 insertions(+), 357 deletions(-)

diffs (truncated from 444 to 300 lines):

diff -r 27dff531132d -r 2f8f8c4ba664 sys/arch/xen/xen/xennetback_xenbus.c
--- a/sys/arch/xen/xen/xennetback_xenbus.c      Tue Feb 23 11:02:12 2021 +0000
+++ b/sys/arch/xen/xen/xennetback_xenbus.c      Tue Feb 23 18:50:21 2021 +0000
@@ -1,4 +1,4 @@
-/*      $NetBSD: xennetback_xenbus.c,v 1.75 2019/03/09 08:42:25 maxv Exp $      */
+/*      $NetBSD: xennetback_xenbus.c,v 1.75.4.1 2021/02/23 18:50:21 martin Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -25,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.75 2019/03/09 08:42:25 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.75.4.1 2021/02/23 18:50:21 martin Exp $");
 
 #include "opt_xen.h"
 
@@ -129,7 +129,6 @@
        void xvifattach(int);
 static int  xennetback_ifioctl(struct ifnet *, u_long, void *);
 static void xennetback_ifstart(struct ifnet *);
-static void xennetback_ifsoftstart_transfer(void *);
 static void xennetback_ifsoftstart_copy(void *);
 static void xennetback_ifwatchdog(struct ifnet *);
 static int  xennetback_ifinit(struct ifnet *);
@@ -160,35 +159,10 @@
  */
 #define NB_XMIT_PAGES_BATCH 64
 
-/*
- * We will transfer a mapped page to the remote domain, and remap another
- * page in place immediately. For this we keep a list of pages available.
- * When the list is empty, we ask the hypervisor to give us
- * NB_XMIT_PAGES_BATCH pages back.
- */
-static unsigned long mcl_pages[NB_XMIT_PAGES_BATCH]; /* our physical pages */
-int mcl_pages_alloc; /* current index in mcl_pages */
-static int  xennetback_get_mcl_page(paddr_t *);
-static void xennetback_get_new_mcl_pages(void);
-
-/*
- * If we can't transfer the mbuf directly, we have to copy it to a page which
- * will be transferred to the remote domain. We use a pool_cache for this.
- */
-pool_cache_t xmit_pages_cache;
-
 /* arrays used in xennetback_ifstart(), too large to allocate on stack */
 /* XXXSMP */
-static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH];
-static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH + 1];
-static gnttab_transfer_t xstart_gop_transfer[NB_XMIT_PAGES_BATCH];
 static gnttab_copy_t     xstart_gop_copy[NB_XMIT_PAGES_BATCH];
 static struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH];
-static struct _pages_pool_free {
-       vaddr_t va;
-       paddr_t pa;
-} pages_pool_free[NB_XMIT_PAGES_BATCH];
-
 
 static inline void
 xni_pkt_unmap(struct xni_pkt *pkt, vaddr_t pkt_va)
@@ -200,31 +174,11 @@
 void
 xvifattach(int n)
 {
-       int i;
-       struct pglist mlist;
-       struct vm_page *pg;
-
        XENPRINTF(("xennetback_init\n"));
 
-       /*
-        * steal some non-managed pages to the VM system, to replace
-        * mbuf cluster or xmit_pages_pool pages given to foreign domains.
-        */
-       if (uvm_pglistalloc(PAGE_SIZE * NB_XMIT_PAGES_BATCH, 0, 0xffffffff,
-           0, 0, &mlist, NB_XMIT_PAGES_BATCH, 0) != 0)
-               panic("xennetback_init: uvm_pglistalloc");
-       for (i = 0, pg = mlist.tqh_first; pg != NULL;
-           pg = pg->pageq.queue.tqe_next, i++)
-               mcl_pages[i] = xpmap_ptom(VM_PAGE_TO_PHYS(pg)) >> PAGE_SHIFT;
-       if (i != NB_XMIT_PAGES_BATCH)
-               panic("xennetback_init: %d mcl pages", i);
-       mcl_pages_alloc = NB_XMIT_PAGES_BATCH - 1;
-
        /* initialise pools */
        pool_init(&xni_pkt_pool, sizeof(struct xni_pkt), 0, 0, 0,
            "xnbpkt", NULL, IPL_VM);
-       xmit_pages_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, "xnbxm", NULL,
-           IPL_VM, NULL, NULL, NULL);
 
        SLIST_INIT(&xnetback_instances);
        mutex_init(&xnetback_lock, MUTEX_DEFAULT, IPL_NONE);
@@ -342,14 +296,6 @@
                            xbusd->xbusd_path, err);
                        goto abort_xbt;
                }
-               err = xenbus_printf(xbt, xbusd->xbusd_path,
-                   "feature-rx-flip", "%d", 1);
-               if (err) {
-                       aprint_error_ifnet(ifp,
-                           "failed to write %s/feature-rx-flip: %d\n",
-                           xbusd->xbusd_path, err);
-                       goto abort_xbt;
-               }
        } while ((err = xenbus_transaction_end(xbt, 0)) == EAGAIN);
        if (err) {
                aprint_error_ifnet(ifp,
@@ -472,21 +418,19 @@
        }
        err = xenbus_read_ul(NULL, xbusd->xbusd_otherend,
            "request-rx-copy", &rx_copy, 10);
-       if (err == ENOENT)
-               rx_copy = 0;
-       else if (err) {
+       if (err == ENOENT || !rx_copy) {
+               xenbus_dev_fatal(xbusd, err,
+                   "%s/request-rx-copy not supported by frontend",
+                   xbusd->xbusd_otherend);
+               return -1;
+       } else if (err) {
                xenbus_dev_fatal(xbusd, err, "reading %s/request-rx-copy",
                    xbusd->xbusd_otherend);
                return -1;
        }
 
-       if (rx_copy)
-               xneti->xni_softintr = softint_establish(SOFTINT_NET,
-                   xennetback_ifsoftstart_copy, xneti);
-       else
-               xneti->xni_softintr = softint_establish(SOFTINT_NET,
-                   xennetback_ifsoftstart_transfer, xneti);
-
+       xneti->xni_softintr = softint_establish(SOFTINT_NET,
+           xennetback_ifsoftstart_copy, xneti);
        if (xneti->xni_softintr == NULL) {
                err = ENOMEM;
                xenbus_dev_fatal(xbusd, ENOMEM,
@@ -662,48 +606,6 @@
        return found;
 }
 
-/* get a page to replace a mbuf cluster page given to a domain */
-static int
-xennetback_get_mcl_page(paddr_t *map)
-{
-       if (mcl_pages_alloc < 0) {
-               /*
-                * we exhausted our allocation. We can't allocate new ones yet
-                * because the current pages may not have been loaned to
-                * the remote domain yet. We have to let the caller do this.
-                */
-               return -1;
-       }
-
-       *map = ((paddr_t)mcl_pages[mcl_pages_alloc]) << PAGE_SHIFT;
-       mcl_pages_alloc--;
-       return 0;
-}
-
-static void
-xennetback_get_new_mcl_pages(void)
-{
-       int nb_pages;
-       struct xen_memory_reservation res;
-
-       /* get some new pages. */
-       set_xen_guest_handle(res.extent_start, mcl_pages);
-       res.nr_extents = NB_XMIT_PAGES_BATCH;
-       res.extent_order = 0;
-       res.address_bits = 0;
-       res.domid = DOMID_SELF;
-
-       nb_pages = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res);
-       if (nb_pages <= 0) {
-               printf("xennetback: can't get new mcl pages (%d)\n", nb_pages);
-               return;
-       }
-       if (nb_pages != NB_XMIT_PAGES_BATCH)
-               printf("xennetback: got only %d new mcl pages\n", nb_pages);
-
-       mcl_pages_alloc = nb_pages - 1;
-}
-
 static inline void
 xennetback_tx_response(struct xnetback_instance *xneti, int id, int status)
 {
@@ -965,255 +867,6 @@
        softint_schedule(xneti->xni_softintr);
 }
 
-static void
-xennetback_ifsoftstart_transfer(void *arg)
-{
-       struct xnetback_instance *xneti = arg;
-       struct ifnet *ifp = &xneti->xni_if;
-       struct mbuf *m;
-       vaddr_t xmit_va;
-       paddr_t xmit_pa;
-       paddr_t xmit_ma;
-       paddr_t newp_ma = 0; /* XXX gcc */
-       int i, j, nppitems;
-       mmu_update_t *mmup;
-       multicall_entry_t *mclp;
-       netif_rx_response_t *rxresp;
-       netif_rx_request_t rxreq;
-       RING_IDX req_prod, resp_prod;
-       int do_event = 0;
-       gnttab_transfer_t *gop;
-       int id, offset;
-
-       XENPRINTF(("xennetback_ifsoftstart_transfer "));
-       int s = splnet();
-       if (__predict_false(
-           (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
-               splx(s);
-               return;
-       }
-
-       while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
-               XENPRINTF(("pkt\n"));
-               req_prod = xneti->xni_rxring.sring->req_prod;
-               resp_prod = xneti->xni_rxring.rsp_prod_pvt;
-               xen_rmb();
-
-               mmup = xstart_mmu;
-               mclp = xstart_mcl;
-               gop = xstart_gop_transfer;
-               for (nppitems = 0, i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) {
-                       XENPRINTF(("have a packet\n"));
-                       IFQ_POLL(&ifp->if_snd, m);
-                       if (__predict_false(m == NULL))
-                               panic("xennetback_ifstart: IFQ_POLL");
-                       if (__predict_false(
-                           req_prod == xneti->xni_rxring.req_cons ||
-                           xneti->xni_rxring.req_cons - resp_prod ==
-                           NET_RX_RING_SIZE)) {
-                               /* out of ring space */
-                               XENPRINTF(("xennetback_ifstart: ring full "
-                                   "req_prod 0x%x req_cons 0x%x resp_prod "
-                                   "0x%x\n",
-                                   req_prod, xneti->xni_rxring.req_cons,
-                                   resp_prod));
-                               ifp->if_timer = 1;
-                               break;
-                       }
-                       if (__predict_false(i == NB_XMIT_PAGES_BATCH))
-                               break; /* we filled the array */
-                       if (__predict_false(
-                           xennetback_get_mcl_page(&newp_ma) != 0))
-                               break; /* out of memory */
-                       if ((m->m_flags & M_EXT_CLUSTER) != 0 &&
-                           !M_READONLY(m) && MCLBYTES == PAGE_SIZE) {
-                               /* we can give this page away */
-                               xmit_pa = m->m_ext.ext_paddr;
-                               xmit_ma = xpmap_ptom(xmit_pa);
-                               xmit_va = (vaddr_t)m->m_ext.ext_buf;
-                               KASSERT(xmit_pa != M_PADDR_INVALID);
-                               KASSERT((xmit_va & PAGE_MASK) == 0);
-                               offset = m->m_data - m->m_ext.ext_buf;
-                       } else {
-                               /* we have to copy the packet */
-                               xmit_va = (vaddr_t)pool_cache_get_paddr(
-                                   xmit_pages_cache, PR_NOWAIT, &xmit_pa);
-                               if (__predict_false(xmit_va == 0))
-                                       break; /* out of memory */
-
-                               KASSERT(xmit_pa != POOL_PADDR_INVALID);
-                               xmit_ma = xpmap_ptom(xmit_pa);
-                               XENPRINTF(("xennetback_get_xmit_page: got va "
-                                   "0x%x ma 0x%x\n", (u_int)xmit_va,
-                                   (u_int)xmit_ma));
-                               m_copydata(m, 0, m->m_pkthdr.len,
-                                   (char *)xmit_va + LINUX_REQUESTED_OFFSET);
-                               offset = LINUX_REQUESTED_OFFSET;
-                               pages_pool_free[nppitems].va = xmit_va;
-                               pages_pool_free[nppitems].pa = xmit_pa;
-                               nppitems++;
-                       }
-                       /* start filling ring */
-                       RING_COPY_REQUEST(&xneti->xni_rxring,
-                           xneti->xni_rxring.req_cons, &rxreq);
-                       gop->ref = rxreq.gref;
-                       id = rxreq.id;
-                       xen_rmb();
-                       xneti->xni_rxring.req_cons++;
-                       rxresp = RING_GET_RESPONSE(&xneti->xni_rxring,
-                           resp_prod);
-                       rxresp->id = id;
-                       rxresp->offset = offset;
-                       rxresp->status = m->m_pkthdr.len;
-                       if ((m->m_pkthdr.csum_flags &
-                           (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
-                               rxresp->flags = NETRXF_csum_blank;
-                       } else {
-                               rxresp->flags = 0;
-                       }
-                       /*
-                        * transfers the page containing the packet to the



Home | Main Index | Thread Index | Old Index