Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/xen/xen Tighten up locking in the network driver.



details:   https://anonhg.NetBSD.org/src/rev/a776f4af6f6a
branches:  trunk
changeset: 772438:a776f4af6f6a
user:      cherry <cherry%NetBSD.org@localhost>
date:      Wed Jan 04 10:48:24 2012 +0000

description:
Tighten up locking in the network driver.
This probably needs more attention since xennet_rx_mbuf_free() hooks
into the network layer.
The locking can also be made finer grained. Performance testing could
add some insights.

diffstat:

 sys/arch/xen/xen/if_xennet_xenbus.c |  60 ++++++++++++++++++++++++------------
 1 files changed, 40 insertions(+), 20 deletions(-)

diffs (224 lines):

diff -r 535f0f26da4f -r a776f4af6f6a sys/arch/xen/xen/if_xennet_xenbus.c
--- a/sys/arch/xen/xen/if_xennet_xenbus.c       Wed Jan 04 10:30:23 2012 +0000
+++ b/sys/arch/xen/xen/if_xennet_xenbus.c       Wed Jan 04 10:48:24 2012 +0000
@@ -1,4 +1,4 @@
-/*      $NetBSD: if_xennet_xenbus.c,v 1.56 2011/12/07 15:47:43 cegger Exp $      */
+/*      $NetBSD: if_xennet_xenbus.c,v 1.57 2012/01/04 10:48:24 cherry Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -85,7 +85,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.56 2011/12/07 15:47:43 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.57 2012/01/04 10:48:24 cherry Exp $");
 
 #include "opt_xen.h"
 #include "opt_nfs_boot.h"
@@ -790,6 +790,7 @@
        multicall_entry_t mcl[2];
 
        int s = splbio();
+       mutex_enter(&sc->sc_rx_lock);
        
        DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev)));
        /* get back memory from RX ring */
@@ -802,18 +803,18 @@
                 */
                while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
                    GRANT_STACK_REF)
+                       mutex_exit(&sc->sc_rx_lock);
                        tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);
+                       mutex_enter(&sc->sc_rx_lock);
 
                if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
                        /*
                         * this req is still granted. Get back the page or
                         * allocate a new one, and remap it.
                         */
-                       mutex_enter(&sc->sc_rx_lock);
                        SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
                            rxreq_next);
                        sc->sc_free_rxreql++;
-                       mutex_exit(&sc->sc_rx_lock);
 
                        switch (sc->sc_rx_feature) {
                        case FEATURE_RX_COPY:
@@ -868,6 +869,7 @@
                }
 
        }
+       mutex_exit(&sc->sc_rx_lock);
        splx(s);
        DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev)));
 }
@@ -880,27 +882,31 @@
 {
        struct xennet_rxreq *req = arg;
        struct xennet_xenbus_softc *sc = req->rxreq_sc;
-
+       
        mutex_enter(&sc->sc_rx_lock);
 
        /* puts back the RX request in the list of free RX requests */
        SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
        sc->sc_free_rxreql++;
 
-       mutex_exit(&sc->sc_rx_lock);
-
        /*
         * ring needs more requests to be pushed in, allocate some
         * RX buffers to catch-up with backend's consumption
         */
        req->rxreq_gntref = GRANT_INVALID_REF;
+       
        if (sc->sc_free_rxreql >= SC_NLIVEREQ(sc) &&
            __predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
+               mutex_exit(&sc->sc_rx_lock);
                xennet_alloc_rx_buffer(sc);
        }
-
+       else {
+               mutex_exit(&sc->sc_rx_lock);
+       }
+       
        if (m)
                pool_cache_put(mb_cache, m);
+
 }
 
 /*
@@ -921,6 +927,7 @@
 again:
        resp_prod = sc->sc_tx_ring.sring->rsp_prod;
        xen_rmb();
+       mutex_enter(&sc->sc_tx_lock);
        for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) {
                req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id];
                KASSERT(req->txreq_id ==
@@ -938,12 +945,11 @@
                else
                        ifp->if_opackets++;
                xengnt_revoke_access(req->txreq_gntref);
+               m_freem(req->txreq_m);
+               SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
+       }
+       mutex_exit(&sc->sc_tx_lock);
 
-               m_freem(req->txreq_m);
-               mutex_enter(&sc->sc_tx_lock);
-               SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
-               mutex_exit(&sc->sc_tx_lock);
-       }
        sc->sc_tx_ring.rsp_cons = resp_prod;
        /* set new event and check for race with rsp_cons update */
        sc->sc_tx_ring.sring->rsp_event = 
@@ -995,6 +1001,14 @@
 
        resp_prod = sc->sc_rx_ring.sring->rsp_prod;
        xen_rmb(); /* ensure we see replies up to resp_prod */
+
+       /*
+        * The locking in this loop needs to be done carefully, as
+        * m_free() will take the sc_rx_lock() via
+        * xennet_rx_mbuf_free()
+        */
+       mutex_enter(&sc->sc_rx_lock);
+       
        for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) {
                netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i);
                req = &sc->sc_rxreqs[rx->id];
@@ -1035,7 +1049,7 @@
 
                pa = req->rxreq_pa;
                va = req->rxreq_va;
-
+               
                if (sc->sc_rx_feature == FEATURE_RX_FLIP) {
                        /* remap the page */
                        mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
@@ -1064,8 +1078,10 @@
                                DPRINTFN(XEDB_EVENT,
                                    ("xennet_handler bad dest\n"));
                                /* packet not for us */
+                               mutex_exit(&sc->sc_rx_lock);
                                xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE,
                                    req);
+                               mutex_enter(&sc->sc_rx_lock);
                                continue;
                        }
                }
@@ -1073,7 +1089,9 @@
                if (__predict_false(m == NULL)) {
                        printf("%s: rx no mbuf\n", ifp->if_xname);
                        ifp->if_ierrors++;
+                       mutex_exit(&sc->sc_rx_lock);
                        xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
+                       mutex_enter(&sc->sc_rx_lock);
                        continue;
                }
                MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
@@ -1086,6 +1104,7 @@
                            M_DEVBUF, xennet_rx_mbuf_free, req);
                        m->m_flags |= M_EXT_RW; /* we own the buffer */
                        req->rxreq_gntref = GRANT_STACK_REF;
+                       mutex_exit(&sc->sc_rx_lock);
                } else {
                        /*
                         * This was our last receive buffer, allocate
@@ -1094,19 +1113,23 @@
                         */
                        m->m_len = min(MHLEN, rx->status);
                        m->m_pkthdr.len = 0;
+                       mutex_exit(&sc->sc_rx_lock);
                        m_copyback(m, 0, rx->status, pktp);
                        xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
                        if (m->m_pkthdr.len < rx->status) {
                                /* out of memory, just drop packets */
                                ifp->if_ierrors++;
                                m_freem(m);
+                               mutex_enter(&sc->sc_rx_lock);
                                continue;
                        }
                }
+               
                if ((rx->flags & NETRXF_csum_blank) != 0) {
                        xennet_checksum_fill(&m);
                        if (m == NULL) {
                                ifp->if_ierrors++;
+                               mutex_enter(&sc->sc_rx_lock);
                                continue;
                        }
                }
@@ -1119,10 +1142,13 @@
 
                /* Pass the packet up. */
                (*ifp->if_input)(ifp, m);
+               mutex_enter(&sc->sc_rx_lock);
        }
        xen_rmb();
        sc->sc_rx_ring.rsp_cons = i;
        RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do);
+       mutex_exit(&sc->sc_rx_lock);
+       
        if (more_to_do)
                goto again;
 
@@ -1398,16 +1424,10 @@
 xennet_stop(struct ifnet *ifp, int disable)
 {
        struct xennet_xenbus_softc *sc = ifp->if_softc;
-       mutex_enter(&sc->sc_tx_lock);
-       mutex_enter(&sc->sc_rx_lock);
 
        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
        hypervisor_mask_event(sc->sc_evtchn);
        xennet_reset(sc);
-
-       mutex_exit(&sc->sc_rx_lock);
-       mutex_exit(&sc->sc_tx_lock);
-
 }
 
 void



Home | Main Index | Thread Index | Old Index