Here's a diff, which is a little icky: it's from a version of netbsd-6
in may to a version of netbsd-6 with the bnx fixes from march, but I
think it's pretty much what you want.
This diff is:
Approved for Public Release, Distribution Unlimited
This material is based upon work supported by the Defense Advanced Research
Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
Contract No. N66001-09-C-2073.
and is the work of Bev Schwartz of BBN.
A commit message is some blend of:
fix potential livelock problem
If the bnx driver receives packets while there are no mbuf
clusters to be obtained, the packet is thrown away and
the mbuf cluster is recycled. This change allows the
packet to be processed and lets the receive ring slot
go empty, to be refilled later. The slot can be filled
either by bnx_rx_intr processing a future packet, or
by bnx_tick, which periodically tries to fill any
empty slots in the receive ring.
Fix memory overwrite in bnx receive ring
In bnx_rx_intr, there are two places where the producer can get pushed
ahead of the consumer. In the while (sw_cons != hw_cons) loop,
there is a check for a packet whose layer 2 data has been corrupted,
and a check for being able to allocate a new mbuf cluster to be
placed in the receive queue. In both cases, if the test fails,
the mbuf cluster that has just been removed from the receive ring
buffer is put back, and the producer index is pushed forward. However,
the consumer index is not pushed forward, so the next time around,
the loop, the mbuf cluster is removed from where the consumer points,
but a new (or recycled) mbuf cluster is put where the producer points.
This overwrites the pointer to another mbuf cluster. The mbuf cluster is
lost forever, and the receive buffer ends up filled with NULLs because
the consumer and producer indices stay out of sync. Eventually all
mbuf clusters are lost, and no more communication can happen over any
interface.
Instead of calling "continue" in each of these cases, a "goto" is
used to leave the if conditional, but continue on with the processing
in the loop. This remaining processing in the loop is what pushes
the consumer index forward.
A KASSERT has been added to bnx_add_buf to check that any mbuf
cluster put into the receive ring is actually going into an
available slot. This conditional was essential to finding
this problem.
bnx_tick now informs chip when the producer pointer has moved
bnx_tick calls bnx_get_buf in case any slots in the receiver
ring need to be filled with an mbuf cluster. Before this
fix, bnx_tick never communicated the change to the chip
itself. This can cause an interface freeze when the chip
and the cpu get out of sync on where they each think the
producer is pointing. (The chip will think there are no
buffers for receiving packets, and the cpu doesn't move
along because its waiting for the chip to signal that
there are new packets.)
Added a counter to track how many times bnx_tick successfully
acquires mbuf clusters.
Update hw consumer index
In bnx_rx_intr, there is a check to see if the chip has
detected new incoming packets, and if so, update software
counters and continue processing. However, this code
never actually pulled in the value from the chip which
lets the cpu know if there's new work. This fix adds
a single statement which gets values from the chip.
diff --git a/src/sys/dev/pci/if_bnx.c b/src/sys/dev/pci/if_bnx.c
index 711ec34..3d029ec 100644
--- a/src/sys/dev/pci/if_bnx.c
+++ b/src/sys/dev/pci/if_bnx.c
@@ -3730,6 +3730,7 @@ bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new,
u_int16_t *prod,
* last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches)
* and update counter.
*/
+ KASSERT(sc->rx_mbuf_ptr[*chain_prod] == NULL);
sc->rx_mbuf_ptr[*chain_prod] = m_new;
sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
sc->rx_mbuf_map[*chain_prod] = map;
@@ -4436,7 +4437,7 @@ bnx_rx_intr(struct bnx_softc *sc)
len >
(BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
ifp->if_ierrors++;
- DBRUNIF(1, sc->l2fhdr_status_errors++);
+ sc->l2fhdr_status_errors++;
/* Reuse the mbuf for a new frame. */
if (bnx_add_buf(sc, m, &sw_prod,
@@ -4445,7 +4446,7 @@ bnx_rx_intr(struct bnx_softc *sc)
panic("%s: Can't reuse RX mbuf!\n",
device_xname(sc->bnx_dev));
}
- continue;
+ goto bnx_rx_receive_error;
}
/*
@@ -4460,17 +4461,7 @@ bnx_rx_intr(struct bnx_softc *sc)
"Failed to allocate "
"new mbuf, incoming frame dropped!\n"));
- ifp->if_ierrors++;
-
- /* Try and reuse the exisitng mbuf. */
- if (bnx_add_buf(sc, m, &sw_prod,
- &sw_chain_prod, &sw_prod_bseq)) {
- DBRUNIF(1, bnx_breakpoint(sc));
- panic("%s: Double mbuf allocation "
- "failure!",
- device_xname(sc->bnx_dev));
- }
- continue;
+ sc->bnx_rx_intr_allocation_failure++;
}
/* Skip over the l2_fhdr when passing the data up
@@ -4557,10 +4548,13 @@ bnx_rx_intr(struct bnx_softc *sc)
}
+bnx_rx_receive_error:
sw_cons = NEXT_RX_BD(sw_cons);
/* Refresh hw_cons to see if there's new work */
if (sw_cons == hw_cons) {
+ bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
+ BNX_STATUS_BLK_SZ, BUS_DMASYNC_POSTREAD);
hw_cons = sc->hw_rx_cons =
sblk->status_rx_quick_consumer_index0;
if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
@@ -5615,8 +5609,13 @@ bnx_tick(void *xsc)
prod_bseq = sc->rx_prod_bseq;
chain_prod = RX_CHAIN_IDX(prod);
bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
- sc->rx_prod = prod;
- sc->rx_prod_bseq = prod_bseq;
+ if (prod != sc->rx_prod || prod_bseq != sc->rx_prod_bseq) {
+ sc->bnx_tick_allocation_success++;
+ sc->rx_prod = prod;
+ sc->rx_prod_bseq = prod_bseq;
+ REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, prod);
+ REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, prod_bseq);
+ }
splx(s);
return;
}
diff --git a/src/sys/dev/pci/if_bnxvar.h b/src/sys/dev/pci/if_bnxvar.h
index 6545c76..fef5230 100644
--- a/src/sys/dev/pci/if_bnxvar.h
+++ b/src/sys/dev/pci/if_bnxvar.h
@@ -220,8 +220,8 @@ struct bnx_softc
bus_addr_t tx_bd_chain_paddr[TX_PAGES];
/* H/W maintained RX buffer descriptor chain structure. */
- bus_dma_segment_t rx_bd_chain_seg[RX_PAGES];
- int rx_bd_chain_rseg[RX_PAGES];
+ bus_dma_segment_t rx_bd_chain_seg[TX_PAGES];
+ int rx_bd_chain_rseg[TX_PAGES];
bus_dmamap_t rx_bd_chain_map[RX_PAGES];
struct rx_bd *rx_bd_chain[RX_PAGES];
bus_addr_t rx_bd_chain_paddr[RX_PAGES];
@@ -337,6 +337,15 @@ struct bnx_softc
/* TX DMA mapping failure counter. */
u_int32_t tx_dma_map_failures;
+ /* bnx_tick rx buffer allocation counter */
+ u_int32_t bnx_tick_allocation_success;
+
+ /* l2 fhdr status error counter */
+ u_int32_t l2fhdr_status_errors;
+
+ /* number of times bnx_rx_intr fails to allocate an mbuf cluster */
+ u_int32_t bnx_rx_intr_allocation_failure;
+
#ifdef BNX_DEBUG
/* Track the number of enqueued mbufs. */
int tx_mbuf_alloc;
@@ -356,7 +365,6 @@ struct bnx_softc
u_int32_t tx_hi_watermark; /* Greatest number of tx_bd's used. */
u_int32_t tx_full_count; /* Number of times the TX chain was
full. */
u_int32_t mbuf_sim_alloc_failed;/* Mbuf simulated allocation failure
counter. */
- u_int32_t l2fhdr_status_errors;
u_int32_t unexpected_attentions;
u_int32_t lost_status_block_updates;
#endif
Attachment:
pgpsKsWnR7dzf.pgp
Description: PGP signature