Port-arm archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: Issue awge(4) on NetBSD/CubieBoard2
hi
On 12/21/2014 05:50 PM, MAEKAWA Masahide wrote:
I found an issue of awge(4) on NetBSD/CubieBoard2.
When TX ring is almost full, ``map'' (L858) has the valid DMA map,
and overrided (L869) and destroyed (L870) by bus_dma(9).
After this, tx_intr do bus_dmamap_sync(9) and panic.
I have attached the temporary fix to just avoid kernel panic.
Before: https://twitter.com/cvsync/status/545781495898451968
After: https://twitter.com/cvsync/status/546531923342798848 (works fine)
Some bugs seems to be there. (esp. heavey load cases).
Taken from sys/dev/ic/dwc_gmac.c (rev.1.29)
857: first = sc->sc_txq.t_cur;
858: map = sc->sc_txq.t_data[first].td_map;
859: flags = 0;
860:
861: error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
862: BUS_DMA_WRITE|BUS_DMA_NOWAIT);
863: if (error != 0) {
864: aprint_error_dev(sc->sc_dev, "could not map mbuf "
865: "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
866: return error;
867: }
868:
869: if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
870: bus_dmamap_unload(sc->sc_dmat, map);
871: return ENOBUFS;
872: }
on dm_nsegs > 1 case,
first segment: not owned by device
last segment : owned by device
should be possible.
in dwc_gmac_tx_intr(), t_queued is decremented at first segment.
http://nxr.netbsd.org/xref/src/sys/dev/ic/dwc_gmac.c#978
but td_map is not freed until last segment is freed from device.
http://nxr.netbsd.org/xref/src/sys/dev/ic/dwc_gmac.c#1000
then, sc->sc_txq.t_data[first].td_map in dwc_gmac_queue() can have a
valid (still in use) DMA map, overridden, and destroyed.
please try attached patch.
--
FUKAUMI Naoki
diff --git a/sys/arch/arm/rockchip/rockchip_emac.c b/sys/arch/arm/rockchip/rockchip_emac.c
index 916bbd7..17a92f0 100644
--- a/sys/arch/arm/rockchip/rockchip_emac.c
+++ b/sys/arch/arm/rockchip/rockchip_emac.c
@@ -84,6 +84,7 @@ struct rkemac_txdata {
bus_dmamap_t td_map;
bus_dmamap_t td_active;
struct mbuf *td_m;
+ int td_nbufs;
};
struct rkemac_txring {
@@ -574,6 +575,10 @@ rkemac_start(struct ifnet *ifp)
}
IFQ_DEQUEUE(&ifp->if_snd, m0);
bpf_mtap(ifp, m0);
+ if (sc->sc_txq.t_queued == RKEMAC_TX_RING_COUNT) {
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
}
if (sc->sc_txq.t_queued != old) {
@@ -679,15 +684,14 @@ rkemac_tick(void *priv)
static int
rkemac_queue(struct rkemac_softc *sc, struct mbuf *m0)
{
- struct rkemac_txdesc *tx;
struct rkemac_txdata *td = NULL;
+ struct rkemac_txdesc *tx = NULL;
bus_dmamap_t map;
- uint32_t info, len;
+ uint32_t info, len, padlen;
int error, first;
first = sc->sc_txq.t_cur;
map = sc->sc_txq.t_data[first].td_map;
- info = 0;
error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
BUS_DMA_WRITE | BUS_DMA_NOWAIT);
@@ -698,8 +702,14 @@ rkemac_queue(struct rkemac_softc *sc, struct mbuf *m0)
KASSERT(map->dm_nsegs > 0);
- const u_int nbufs = map->dm_nsegs +
- ((m0->m_pkthdr.len < ETHER_MIN_LEN) ? 1 : 0);
+ int nbufs = map->dm_nsegs;
+
+ if (m0->m_pkthdr.len < ETHER_MIN_LEN) {
+ padlen = ETHER_MIN_LEN - m0->m_pkthdr.len;
+ nbufs++;
+ } else {
+ padlen = 0;
+ }
if (sc->sc_txq.t_queued + nbufs > RKEMAC_TX_RING_COUNT) {
bus_dmamap_unload(sc->sc_dmat, map);
@@ -716,34 +726,34 @@ rkemac_queue(struct rkemac_softc *sc, struct mbuf *m0)
tx->tx_info = htole32(info | len);
info &= ~EMAC_TXDESC_FIRST;
info |= EMAC_TXDESC_OWN;
- if (i == map->dm_nsegs - 1) {
- if (m0->m_pkthdr.len < ETHER_MIN_LEN) {
- sc->sc_txq.t_queued++;
- sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
- % RKEMAC_TX_RING_COUNT;
- td = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
- tx = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
- tx->tx_ptr = htole32(sc->sc_pad_physaddr);
- len = __SHIFTIN(ETHER_MIN_LEN -
- m0->m_pkthdr.len , EMAC_TXDESC_TXLEN);
- tx->tx_info = htole32(info | len);
- }
- tx->tx_info |= htole32(EMAC_TXDESC_LAST);
- }
sc->sc_txq.t_queued++;
- sc->sc_txq.t_cur =
- (sc->sc_txq.t_cur + 1) % RKEMAC_TX_RING_COUNT;
+ sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
}
- sc->sc_txq.t_desc[first].tx_info |= htole32(EMAC_TXDESC_OWN);
+ if (padlen) {
+ td = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
+ tx = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
+
+ tx->tx_ptr = htole32(sc->sc_pad_physaddr);
+ len = __SHIFTIN(padlen, EMAC_TXDESC_TXLEN);
+ tx->tx_info = htole32(info | len);
+
+ sc->sc_txq.t_queued++;
+ sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
+ }
+
+ tx->tx_info |= htole32(EMAC_TXDESC_LAST);
td->td_m = m0;
td->td_active = map;
+ td->td_nbufs = nbufs;
bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ sc->sc_txq.t_desc[first].tx_info |= htole32(EMAC_TXDESC_OWN);
+
return 0;
}
@@ -770,8 +780,7 @@ rkemac_txintr(struct rkemac_softc *sc)
struct ifnet *ifp = &sc->sc_ec.ec_if;
int i;
- for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
- i = TX_NEXT(i), sc->sc_txq.t_queued--) {
+ for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
struct rkemac_txdata *td = &sc->sc_txq.t_data[i];
struct rkemac_txdesc *tx = &sc->sc_txq.t_desc[i];
@@ -792,6 +801,8 @@ rkemac_txintr(struct rkemac_softc *sc)
m_freem(td->td_m);
td->td_m = NULL;
+
+ sc->sc_txq.t_queued -= td->td_nbufs;
}
sc->sc_txq.t_next = i;
diff --git a/sys/dev/ic/dwc_gmac.c b/sys/dev/ic/dwc_gmac.c
index 75fbe99..580860a 100644
--- a/sys/dev/ic/dwc_gmac.c
+++ b/sys/dev/ic/dwc_gmac.c
@@ -803,6 +803,10 @@ dwc_gmac_start(struct ifnet *ifp)
}
IFQ_DEQUEUE(&ifp->if_snd, m0);
bpf_mtap(ifp, m0);
+ if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
}
if (sc->sc_txq.t_queued != old) {
@@ -847,7 +851,7 @@ dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
struct dwc_gmac_dev_dmadesc *desc = NULL;
struct dwc_gmac_tx_data *data = NULL;
bus_dmamap_t map;
- uint32_t flags, len;
+ uint32_t flags, len, status;
int error, i, first;
#ifdef DWC_GMAC_DEBUG
@@ -857,7 +861,6 @@ dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
first = sc->sc_txq.t_cur;
map = sc->sc_txq.t_data[first].td_map;
- flags = 0;
error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
BUS_DMA_WRITE|BUS_DMA_NOWAIT);
@@ -867,21 +870,19 @@ dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
return error;
}
- if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT) {
+ if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
bus_dmamap_unload(sc->sc_dmat, map);
return ENOBUFS;
}
- data = NULL;
flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
+ status = 0;
for (i = 0; i < map->dm_nsegs; i++) {
data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
- len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
- if (i == map->dm_nsegs-1)
- flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
+ len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
#ifdef DWC_GMAC_DEBUG
aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
@@ -898,16 +899,14 @@ dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
* Defer passing ownership of the first descriptor
* until we are done.
*/
- if (i)
- desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
+ desc->ddesc_status = htole32(status);
+ status |= DDESC_STATUS_OWNEDBYDEV;
sc->sc_txq.t_queued++;
sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
}
- /* Pass first to device */
- sc->sc_txq.t_desc[first].ddesc_status
- = htole32(DDESC_STATUS_OWNEDBYDEV);
+ flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
data->td_m = m0;
data->td_active = map;
@@ -915,6 +914,10 @@ dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ /* Pass first to device */
+ sc->sc_txq.t_desc[first].ddesc_status =
+ htole32(DDESC_STATUS_OWNEDBYDEV);
+
return 0;
}
@@ -969,21 +972,19 @@ dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
static void
dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
{
+ struct ifnet *ifp = &sc->sc_ec.ec_if;
struct dwc_gmac_tx_data *data;
struct dwc_gmac_dev_dmadesc *desc;
- uint32_t flags;
- int i;
-
- for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
- i = TX_NEXT(i), sc->sc_txq.t_queued--) {
+ uint32_t status;
+ int i, nsegs;
+ for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
#ifdef DWC_GMAC_DEBUG
aprint_normal_dev(sc->sc_dev,
"dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
i, sc->sc_txq.t_queued);
#endif
- desc = &sc->sc_txq.t_desc[i];
/*
* i+1 does not need to be a valid descriptor,
* this is just a special notion to just sync
@@ -991,15 +992,18 @@ dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
*/
dwc_gmac_txdesc_sync(sc, i, i+1,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- flags = le32toh(desc->ddesc_status);
- if (flags & DDESC_STATUS_OWNEDBYDEV)
+ desc = &sc->sc_txq.t_desc[i];
+ status = le32toh(desc->ddesc_status);
+ if (status & DDESC_STATUS_OWNEDBYDEV)
break;
data = &sc->sc_txq.t_data[i];
if (data->td_m == NULL)
continue;
- sc->sc_ec.ec_if.if_opackets++;
+
+ ifp->if_opackets++;
+ nsegs = data->td_active->dm_nsegs;
bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, data->td_active);
@@ -1012,12 +1016,14 @@ dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
m_freem(data->td_m);
data->td_m = NULL;
+
+ sc->sc_txq.t_queued -= nsegs;
}
sc->sc_txq.t_next = i;
if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
- sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
+ ifp->if_flags &= ~IFF_OACTIVE;
}
}
Home |
Main Index |
Thread Index |
Old Index