Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/xen/xen convert to bus_dma(9), remove now not neces...



details:   https://anonhg.NetBSD.org/src/rev/d5855aa56ba9
branches:  trunk
changeset: 1009035:d5855aa56ba9
user:      jdolecek <jdolecek%NetBSD.org@localhost>
date:      Fri Apr 10 18:03:06 2020 +0000

description:
convert to bus_dma(9), remove now not necessary XENPVHVM redefines

diffstat:

 sys/arch/xen/xen/if_xennet_xenbus.c |  245 ++++++++++++++++-------------------
 1 files changed, 115 insertions(+), 130 deletions(-)

diffs (truncated from 443 to 300 lines):

diff -r a2e5ff16a104 -r d5855aa56ba9 sys/arch/xen/xen/if_xennet_xenbus.c
--- a/sys/arch/xen/xen/if_xennet_xenbus.c       Fri Apr 10 17:26:46 2020 +0000
+++ b/sys/arch/xen/xen/if_xennet_xenbus.c       Fri Apr 10 18:03:06 2020 +0000
@@ -1,4 +1,4 @@
-/*      $NetBSD: if_xennet_xenbus.c,v 1.109 2020/04/07 11:47:06 jdolecek Exp $      */
+/*      $NetBSD: if_xennet_xenbus.c,v 1.110 2020/04/10 18:03:06 jdolecek Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -81,7 +81,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.109 2020/04/07 11:47:06 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.110 2020/04/10 18:03:06 jdolecek Exp $");
 
 #include "opt_xen.h"
 #include "opt_nfs_boot.h"
@@ -143,21 +143,6 @@
 #define DPRINTFN(n,x)
 #endif
 
-#ifdef XENPVHVM
-/* Glue for p2m table stuff. Should be removed eventually */
-#define xpmap_mtop_masked(mpa) (mpa & ~PAGE_MASK)
-#define xpmap_mtop(mpa) (mpa & ~PTE_4KFRAME)
-#define xpmap_ptom_masked(mpa) (mpa & ~PAGE_MASK)
-#define xpmap_ptom(mpa) (mpa & ~PTE_4KFRAME)
-#define xpmap_ptom_map(ppa, mpa)
-#define xpmap_ptom_unmap(ppa)
-#define xpmap_ptom_isvalid 1 /* XXX: valid PA check */
-#define xpmap_pg_nx pmap_pg_nx /* We use the native setting */
-#define xpq_flush_queue() tlbflush()
-#endif /* XENPVHVM */
-
-extern pt_entry_t xpmap_pg_nx;
-
 #define GRANT_INVALID_REF -1 /* entry is free */
 
 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
@@ -168,15 +153,15 @@
        uint16_t txreq_id; /* ID passed to backend */
        grant_ref_t txreq_gntref; /* grant ref of this request */
        struct mbuf *txreq_m; /* mbuf being transmitted */
+       bus_dmamap_t txreq_dmamap;
 };
 
 struct xennet_rxreq {
        SLIST_ENTRY(xennet_rxreq) rxreq_next;
        uint16_t rxreq_id; /* ID passed to backend */
        grant_ref_t rxreq_gntref; /* grant ref of this request */
-/* va/pa for this receive buf. ma will be provided by backend */
-       paddr_t rxreq_pa;
-       vaddr_t rxreq_va;
+       struct mbuf *rxreq_m;
+       bus_dmamap_t rxreq_dmamap;
 };
 
 struct xennet_xenbus_softc {
@@ -184,6 +169,7 @@
        struct ethercom sc_ethercom;
        uint8_t sc_enaddr[6];
        struct xenbus_device *sc_xbusd;
+       bus_dma_tag_t sc_dmat;
 
        netif_tx_front_ring_t sc_tx_ring;
        netif_rx_front_ring_t sc_rx_ring;
@@ -272,17 +258,13 @@
        unsigned long uval;
        extern int ifqmaxlen; /* XXX */
        char mac[32];
-#ifdef XENNET_DEBUG
-       char **dir;
-       int dir_n = 0;
-       char id_str[20];
-#endif
 
        aprint_normal(": Xen Virtual Network Interface\n");
        sc->sc_dev = self;
 
        sc->sc_xbusd = xa->xa_xbusd;
        sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
+       sc->sc_dmat = xa->xa_dmat;
 
        /* xenbus ensure 2 devices can't be probed at the same time */
        if (if_xennetrxbuf_cache_inited == 0) {
@@ -295,7 +277,14 @@
        mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET);
        SLIST_INIT(&sc->sc_txreq_head);
        for (i = 0; i < NET_TX_RING_SIZE; i++) {
-               sc->sc_txreqs[i].txreq_id = i;
+               struct xennet_txreq *txreq = &sc->sc_txreqs[i];
+       
+               txreq->txreq_id = i;
+               if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
+                   PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
+                   &txreq->txreq_dmamap) != 0)
+                       break;
+
                SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
                    txreq_next);
        }
@@ -305,9 +294,9 @@
        for (i = 0; i < NET_RX_RING_SIZE; i++) {
                struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
                rxreq->rxreq_id = i;
-               rxreq->rxreq_va = (vaddr_t)pool_cache_get_paddr(
-                   if_xennetrxbuf_cache, PR_WAITOK, &rxreq->rxreq_pa);
-               if (rxreq->rxreq_va == 0)
+               if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
+                   PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
+                   &rxreq->rxreq_dmamap) != 0)
                        break;
                rxreq->rxreq_gntref = GRANT_INVALID_REF;
                SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
@@ -443,10 +432,9 @@
        xennet_free_rx_buffer(sc);
        for (i = 0; i < NET_RX_RING_SIZE; i++) {
                struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
-               if (rxreq->rxreq_va != 0) {
-                       pool_cache_put_paddr(if_xennetrxbuf_cache,
-                           (void *)rxreq->rxreq_va, rxreq->rxreq_pa);
-                       rxreq->rxreq_va = 0;
+               if (rxreq->rxreq_m != NULL) {
+                       m_freem(rxreq->rxreq_m);
+                       rxreq->rxreq_m = NULL;
                }
        }
        mutex_exit(&sc->sc_rx_lock);
@@ -692,7 +680,6 @@
  * in the ring. This allows the backend to use them to communicate with
  * frontend when some data is destined to frontend
  */
-
 static void
 xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
 {
@@ -700,6 +687,10 @@
        RING_IDX i;
        struct xennet_rxreq *req;
        int otherend_id, notify;
+       struct mbuf *m;
+       vaddr_t va;
+       paddr_t pa, ma;
+       struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 
        KASSERT(mutex_owned(&sc->sc_rx_lock));
 
@@ -709,15 +700,54 @@
                req  = SLIST_FIRST(&sc->sc_rxreq_head);
                KASSERT(req != NULL);
                KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
+               KASSERT(req->rxreq_m == NULL);
+               KASSERT(req->rxreq_gntref = GRANT_INVALID_REF);
+
+               MGETHDR(m, M_DONTWAIT, MT_DATA);
+               if (__predict_false(m == NULL)) {
+                       printf("%s: rx no mbuf\n", ifp->if_xname);
+                       break;
+               }
+ 
+               va = (vaddr_t)pool_cache_get_paddr(
+                   if_xennetrxbuf_cache, PR_NOWAIT, &pa);
+               if (__predict_false(va == 0)) {
+                       printf("%s: rx no cluster\n", ifp->if_xname);
+                       m_freem(m);
+                       break;
+               }
+
+               MEXTADD(m, va, PAGE_SIZE,
+                   M_DEVBUF, xennet_rx_mbuf_free, NULL);
+               m->m_len = m->m_pkthdr.len = PAGE_SIZE;
+               m->m_ext.ext_paddr = pa;
+               m->m_flags |= M_EXT_RW; /* we own the buffer */
+
+               /* Set M_EXT_CLUSTER so that load_mbuf uses m_ext.ext_paddr */
+               m->m_flags |= M_EXT_CLUSTER;
+               if (__predict_false(bus_dmamap_load_mbuf(sc->sc_dmat,
+                   req->rxreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) {
+                       printf("%s: rx mbuf load failed", ifp->if_xname);
+                       m->m_flags &= ~M_EXT_CLUSTER;
+                       m_freem(m);
+                       break;
+               }
+               m->m_flags &= ~M_EXT_CLUSTER;
+
+               KASSERT(req->rxreq_dmamap->dm_nsegs == 1);
+               ma = req->rxreq_dmamap->dm_segs[0].ds_addr;
+
+               if (xengnt_grant_access(otherend_id, trunc_page(ma),
+                   0, &req->rxreq_gntref) != 0) {
+                       m_freem(m);
+                       break;
+               }
+
+               req->rxreq_m = m;
+
                RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
                    req->rxreq_id;
 
-               if (xengnt_grant_access(otherend_id,
-                   xpmap_ptom_masked(req->rxreq_pa),
-                   0, &req->rxreq_gntref) != 0) {
-                       goto out_loop;
-               }
-
                RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
                    req->rxreq_gntref;
 
@@ -725,16 +755,13 @@
                sc->sc_free_rxreql--;
        }
 
-out_loop:
-       if (i == 0) {
-               return;
+       /* Notify backend if more Rx is possible */
+       if (i > 0) {
+               sc->sc_rx_ring.req_prod_pvt = req_prod + i;
+               RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_rx_ring, notify);
+               if (notify)
+                       hypervisor_notify_via_evtchn(sc->sc_evtchn);
        }
-
-       sc->sc_rx_ring.req_prod_pvt = req_prod + i;
-       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_rx_ring, notify);
-       if (notify)
-               hypervisor_notify_via_evtchn(sc->sc_evtchn);
-       return;
 }
 
 /*
@@ -765,6 +792,10 @@
                        rxreq->rxreq_gntref = GRANT_INVALID_REF;
                }
 
+               if (rxreq->rxreq_m != NULL) {
+                       m_freem(rxreq->rxreq_m);
+                       rxreq->rxreq_m = NULL;
+               }
        }
        DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev)));
 }
@@ -827,6 +858,8 @@
                KASSERT(req->txreq_id ==
                    RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id);
                KASSERT(xengnt_status(req->txreq_gntref) == 0);
+               KASSERT(req->txreq_m != NULL);
+
                if (__predict_false(
                    RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status !=
                    NETIF_RSP_OKAY))
@@ -834,7 +867,9 @@
                else
                        if_statinc(ifp, if_opackets);
                xengnt_revoke_access(req->txreq_gntref);
+               bus_dmamap_unload(sc->sc_dmat, req->txreq_dmamap);
                m_freem(req->txreq_m);
+               req->txreq_m = NULL;
                SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
        }
 
@@ -860,10 +895,7 @@
        struct ifnet *ifp = &sc->sc_ethercom.ec_if;
        RING_IDX resp_prod, i;
        struct xennet_rxreq *req;
-       paddr_t pa;
-       vaddr_t va;
        struct mbuf *m;
-       void *pktp;
        int more_to_do;
 
        if (sc->sc_backend_status != BEST_CONNECTED)
@@ -891,47 +923,16 @@
                xengnt_revoke_access(req->rxreq_gntref);
                req->rxreq_gntref = GRANT_INVALID_REF;
 
-               pa = req->rxreq_pa;
-               va = req->rxreq_va;
+               m = req->rxreq_m;
+               req->rxreq_m = NULL;
 
-               pktp = (void *)(va + rx->offset);
-#ifdef XENNET_DEBUG_DUMP
-               xennet_hex_dump(pktp, rx->status, "r", rx->id);
-#endif
-               MGETHDR(m, M_DONTWAIT, MT_DATA);
-               if (__predict_false(m == NULL)) {
-                       printf("%s: rx no mbuf\n", ifp->if_xname);
-                       if_statinc(ifp, if_ierrors);
-                       xennet_rx_free_req(sc, req);
-                       continue;
-               }
+               m->m_len = m->m_pkthdr.len = rx->status;
+               bus_dmamap_sync(sc->sc_dmat, req->rxreq_dmamap, 0,
+                    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
+
                MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
-
                m_set_rcvif(m, ifp);



Home | Main Index | Thread Index | Old Index