Page MenuHomeFreeBSD

D19067.id53546.diff
No OneTemporary

D19067.id53546.diff

Index: sys/net/iflib.c
===================================================================
--- sys/net/iflib.c
+++ sys/net/iflib.c
@@ -353,8 +353,8 @@
uint8_t ift_closed;
uint8_t ift_update_freq;
struct iflib_filter_info ift_filter_info;
- bus_dma_tag_t ift_desc_tag;
- bus_dma_tag_t ift_tso_desc_tag;
+ bus_dma_tag_t ift_buf_tag;
+ bus_dma_tag_t ift_tso_buf_tag;
iflib_dma_info_t ift_ifdi;
#define MTX_NAME_LEN 16
char ift_mtx_name[MTX_NAME_LEN];
@@ -389,7 +389,7 @@
iflib_rxsd_array_t ifl_sds;
iflib_rxq_t ifl_rxq;
uint8_t ifl_id;
- bus_dma_tag_t ifl_desc_tag;
+ bus_dma_tag_t ifl_buf_tag;
iflib_dma_info_t ifl_ifdi;
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
@@ -922,10 +922,9 @@
if_ctx_t ctx = ifp->if_softc;
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
- bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
+ bus_dmamap_sync(txq->ift_buf_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
/*
* First part: process new packets to send.
* nm_i is the current index in the netmap kring,
@@ -992,7 +991,8 @@
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
- netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
+ netmap_reload_map(na, txq->ift_buf_tag,
+ txq->ift_sds.ifsd_map[nic_i], addr);
}
/* make sure changes to the buffer are synced */
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
@@ -1005,7 +1005,7 @@
kring->nr_hwcur = nm_i;
/* synchronize the NIC ring */
- bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
+ bus_dmamap_sync(txq->ift_buf_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
@@ -1072,8 +1072,9 @@
for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
if (fl->ifl_sds.ifsd_map == NULL)
continue;
- bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(rxq->ifr_fl[i].ifl_buf_tag,
+ fl->ifl_ifdi->idi_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
}
/*
* First part: import newly received packets.
@@ -1199,7 +1200,8 @@
* netmap slot index, si
*/
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
- netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
+ netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
+ NMB(na, slot + si));
}
}
@@ -1576,12 +1578,13 @@
/*********************************************************************
*
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
+ * Allocate DMA resources for TX buffers as well as memory for the TX
+ * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
+ * iflib_sw_tx_desc_array structure, storing all the information that
+ * is needed to transmit a packet on the wire. This is called only
+ * once at attach, setup is done every reset.
*
**********************************************************************/
-
static int
iflib_txsd_alloc(iflib_txq_t txq)
{
@@ -1607,7 +1610,7 @@
}
/*
- * Setup DMA descriptor areas.
+ * Set up DMA tags for TX buffers.
*/
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
@@ -1620,7 +1623,7 @@
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
- &txq->ift_desc_tag))) {
+ &txq->ift_buf_tag))) {
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
@@ -1638,38 +1641,42 @@
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
- &txq->ift_tso_desc_tag))) {
- device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
+ &txq->ift_tso_buf_tag))) {
+ device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
+ err);
goto fail;
}
+
+ /* Allocate memory for the TX mbuf map. */
if (!(txq->ift_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ device_printf(dev, "Unable to allocate TX mbuf map memory\n");
err = ENOMEM;
goto fail;
}
- /* Create the descriptor buffer dma maps */
+ /*
+ * Create the DMA maps for TX buffers.
+ */
if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
- device_printf(dev, "Unable to allocate tx_buffer map memory\n");
+ device_printf(dev,
+ "Unable to allocate TX buffer DMA map memory\n");
err = ENOMEM;
goto fail;
}
-
if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
- device_printf(dev, "Unable to allocate TSO tx_buffer "
- "map memory\n");
+ device_printf(dev,
+ "Unable to allocate TSO TX buffer map memory\n");
err = ENOMEM;
goto fail;
}
-
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
- err = bus_dmamap_create(txq->ift_desc_tag, 0,
+ err = bus_dmamap_create(txq->ift_buf_tag, 0,
&txq->ift_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
@@ -1677,7 +1684,7 @@
}
if (!tso)
continue;
- err = bus_dmamap_create(txq->ift_tso_desc_tag, 0,
+ err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
&txq->ift_sds.ifsd_tso_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TSO TX DMA map\n");
@@ -1700,9 +1707,9 @@
if (txq->ift_sds.ifsd_map != NULL)
map = txq->ift_sds.ifsd_map[i];
if (map != NULL) {
- bus_dmamap_sync(txq->ift_desc_tag, map, BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txq->ift_desc_tag, map);
- bus_dmamap_destroy(txq->ift_desc_tag, map);
+ bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txq->ift_buf_tag, map);
+ bus_dmamap_destroy(txq->ift_buf_tag, map);
txq->ift_sds.ifsd_map[i] = NULL;
}
@@ -1710,10 +1717,10 @@
if (txq->ift_sds.ifsd_tso_map != NULL)
map = txq->ift_sds.ifsd_tso_map[i];
if (map != NULL) {
- bus_dmamap_sync(txq->ift_tso_desc_tag, map,
+ bus_dmamap_sync(txq->ift_tso_buf_tag, map,
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txq->ift_tso_desc_tag, map);
- bus_dmamap_destroy(txq->ift_tso_desc_tag, map);
+ bus_dmamap_unload(txq->ift_tso_buf_tag, map);
+ bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
txq->ift_sds.ifsd_tso_map[i] = NULL;
}
}
@@ -1737,13 +1744,13 @@
free(txq->ift_sds.ifsd_m, M_IFLIB);
txq->ift_sds.ifsd_m = NULL;
}
- if (txq->ift_desc_tag != NULL) {
- bus_dma_tag_destroy(txq->ift_desc_tag);
- txq->ift_desc_tag = NULL;
+ if (txq->ift_buf_tag != NULL) {
+ bus_dma_tag_destroy(txq->ift_buf_tag);
+ txq->ift_buf_tag = NULL;
}
- if (txq->ift_tso_desc_tag != NULL) {
- bus_dma_tag_destroy(txq->ift_tso_desc_tag);
- txq->ift_tso_desc_tag = NULL;
+ if (txq->ift_tso_buf_tag != NULL) {
+ bus_dma_tag_destroy(txq->ift_tso_buf_tag);
+ txq->ift_tso_buf_tag = NULL;
}
}
@@ -1757,14 +1764,14 @@
return;
if (txq->ift_sds.ifsd_map != NULL) {
- bus_dmamap_sync(txq->ift_desc_tag,
+ bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]);
+ bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
}
if (txq->ift_sds.ifsd_tso_map != NULL) {
- bus_dmamap_sync(txq->ift_tso_desc_tag,
+ bus_dmamap_sync(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txq->ift_tso_desc_tag,
+ bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i]);
}
m_free(*mp);
@@ -1803,10 +1810,13 @@
/*********************************************************************
*
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per received packet, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've allocated.
+ * Allocate DMA resources for RX buffers as well as memory for the RX
+ * mbuf map, direct RX cluster pointer map and RX cluster bus address
+ * map. RX DMA map, RX mbuf map, direct RX cluster pointer map and
+ * RX cluster map are kept in a iflib_sw_rx_desc_array structure.
+ * Since we use use one entry in iflib_sw_rx_desc_array per received
+ * packet, the maximum number of entries we'll need is equal to the
+ * number of hardware receive descriptors that we've allocated.
*
**********************************************************************/
static int
@@ -1825,6 +1835,7 @@
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
+ /* Set up DMA tag for RX buffers. */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@@ -1836,45 +1847,56 @@
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
- &fl->ifl_desc_tag);
+ &fl->ifl_buf_tag);
if (err) {
- device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
- __func__, err);
+ device_printf(dev,
+ "Unable to allocate RX DMA tag: %d\n", err);
goto fail;
}
+
+ /* Allocate memory for the RX mbuf map. */
if (!(fl->ifl_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ device_printf(dev,
+ "Unable to allocate RX mbuf map memory\n");
err = ENOMEM;
goto fail;
}
+
+ /* Allocate memory for the direct RX cluster pointer map. */
if (!(fl->ifl_sds.ifsd_cl =
(caddr_t *) malloc(sizeof(caddr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ device_printf(dev,
+ "Unable to allocate RX cluster map memory\n");
err = ENOMEM;
goto fail;
}
+ /* Allocate memory for the RX cluster bus address map. */
if (!(fl->ifl_sds.ifsd_ba =
(bus_addr_t *) malloc(sizeof(bus_addr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx bus addr memory\n");
+ device_printf(dev,
+ "Unable to allocate RX bus address map memory\n");
err = ENOMEM;
goto fail;
}
- /* Create the descriptor buffer dma maps */
+ /*
+ * Create the DMA maps for RX buffers.
+ */
if (!(fl->ifl_sds.ifsd_map =
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer map memory\n");
+ device_printf(dev,
+ "Unable to allocate RX buffer DMA map memory\n");
err = ENOMEM;
goto fail;
}
-
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
- err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
+ err = bus_dmamap_create(fl->ifl_buf_tag, 0,
+ &fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create RX buffer DMA map\n");
goto fail;
@@ -1974,7 +1996,7 @@
cb_arg.error = 0;
MPASS(sd_map != NULL);
- err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
+ err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
BUS_DMA_NOWAIT);
if (err != 0 || cb_arg.error) {
@@ -1986,7 +2008,7 @@
break;
}
- bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
+ bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
BUS_DMASYNC_PREREAD);
sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
sd_cl[frag_idx] = cl;
@@ -2087,14 +2109,14 @@
if (*sd_cl != NULL) {
sd_map = fl->ifl_sds.ifsd_map[i];
- bus_dmamap_sync(fl->ifl_desc_tag, sd_map,
+ bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
+ bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
if (*sd_cl != NULL)
uma_zfree(fl->ifl_zone, *sd_cl);
// XXX: Should this get moved out?
if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
- bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
+ bus_dmamap_destroy(fl->ifl_buf_tag, sd_map);
if (*sd_m != NULL) {
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
uma_zfree(zone_mbuf, *sd_m);
@@ -2196,23 +2218,23 @@
if (rxq->ifr_fl != NULL) {
for (i = 0; i < rxq->ifr_nfl; i++) {
fl = &rxq->ifr_fl[i];
- if (fl->ifl_desc_tag != NULL) {
+ if (fl->ifl_buf_tag != NULL) {
if (fl->ifl_sds.ifsd_map != NULL) {
for (j = 0; j < fl->ifl_size; j++) {
if (fl->ifl_sds.ifsd_map[j] ==
NULL)
continue;
bus_dmamap_sync(
- fl->ifl_desc_tag,
+ fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(
- fl->ifl_desc_tag,
+ fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j]);
}
}
- bus_dma_tag_destroy(fl->ifl_desc_tag);
- fl->ifl_desc_tag = NULL;
+ bus_dma_tag_destroy(fl->ifl_buf_tag);
+ fl->ifl_buf_tag = NULL;
}
free(fl->ifl_sds.ifsd_m, M_IFLIB);
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
@@ -2497,9 +2519,9 @@
/* not valid assert if bxe really does SGE from non-contiguous elements */
MPASS(fl->ifl_cidx == cidx);
- bus_dmamap_sync(fl->ifl_desc_tag, map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
if (unload)
- bus_dmamap_unload(fl->ifl_desc_tag, map);
+ bus_dmamap_unload(fl->ifl_buf_tag, map);
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
if (__predict_false(fl->ifl_cidx == 0))
fl->ifl_gen = 0;
@@ -2582,7 +2604,7 @@
m->m_data += 2;
#endif
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
- bus_dmamap_sync(rxq->ifr_fl->ifl_desc_tag,
+ bus_dmamap_sync(rxq->ifr_fl->ifl_buf_tag,
rxq->ifr_fl->ifl_sds.ifsd_map[ri->iri_frags[0].irf_idx],
BUS_DMASYNC_PREREAD);
m->m_len = ri->iri_frags[0].irf_len;
@@ -3083,9 +3105,9 @@
ifsd_m = txq->ift_sds.ifsd_m;
m = ifsd_m[pidx];
ifsd_m[pidx] = NULL;
- bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[pidx]);
+ bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
if (txq->ift_sds.ifsd_tso_map != NULL)
- bus_dmamap_unload(txq->ift_tso_desc_tag,
+ bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[pidx]);
#if MEMORY_LOGGING
txq->ift_dequeued++;
@@ -3162,6 +3184,7 @@
if_ctx_t ctx;
if_shared_ctx_t sctx;
if_softc_ctx_t scctx;
+ bus_dma_tag_t buf_tag;
bus_dma_segment_t *segs;
struct mbuf *m_head, **ifsd_m;
void *next_txd;
@@ -3169,7 +3192,6 @@
struct if_pkt_info pi;
int remap = 0;
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
- bus_dma_tag_t desc_tag;
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
@@ -3200,13 +3222,13 @@
ifsd_m = txq->ift_sds.ifsd_m;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- desc_tag = txq->ift_tso_desc_tag;
+ buf_tag = txq->ift_tso_buf_tag;
max_segs = scctx->isc_tx_tso_segments_max;
map = txq->ift_sds.ifsd_tso_map[pidx];
- MPASS(desc_tag != NULL);
+ MPASS(buf_tag != NULL);
MPASS(max_segs > 0);
} else {
- desc_tag = txq->ift_desc_tag;
+ buf_tag = txq->ift_buf_tag;
max_segs = scctx->isc_tx_nsegments;
map = txq->ift_sds.ifsd_map[pidx];
}
@@ -3238,7 +3260,7 @@
}
retry:
- err = bus_dmamap_load_mbuf_sg(desc_tag, map, m_head, segs, &nsegs,
+ err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
defrag:
if (__predict_false(err)) {
@@ -3284,7 +3306,7 @@
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
txq->ift_no_desc_avail++;
- bus_dmamap_unload(desc_tag, map);
+ bus_dmamap_unload(buf_tag, map);
DBG_COUNTER_INC(encap_txq_avail_fail);
DBG_COUNTER_INC(encap_txd_encap_fail);
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
@@ -3311,7 +3333,7 @@
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
- bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -3387,16 +3409,16 @@
if ((m = ifsd_m[cidx]) != NULL) {
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
- bus_dmamap_sync(txq->ift_tso_desc_tag,
+ bus_dmamap_sync(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[cidx],
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txq->ift_tso_desc_tag,
+ bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[cidx]);
} else {
- bus_dmamap_sync(txq->ift_desc_tag,
+ bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[cidx],
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txq->ift_desc_tag,
+ bus_dmamap_unload(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[cidx]);
}
/* XXX we don't support any drivers that batch packets yet */
@@ -5203,15 +5225,18 @@
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
/* Set up some basics */
- if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
- device_printf(dev, "failed to allocate iflib_dma_info\n");
+ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
+ M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
+ device_printf(dev,
+ "Unable to allocate TX DMA info memory\n");
err = ENOMEM;
goto err_tx_desc;
}
txq->ift_ifdi = ifdip;
for (j = 0; j < ntxqs; j++, ifdip++) {
- if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
- device_printf(dev, "Unable to allocate Descriptor memory\n");
+ if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
+ device_printf(dev,
+ "Unable to allocate TX descriptors\n");
err = ENOMEM;
goto err_tx_desc;
}
@@ -5255,8 +5280,10 @@
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
/* Set up some basics */
- if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
- device_printf(dev, "failed to allocate iflib_dma_info\n");
+ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
+ M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
+ device_printf(dev,
+ "Unable to allocate RX DMA info memory\n");
err = ENOMEM;
goto err_tx_desc;
}
@@ -5266,8 +5293,9 @@
rxq->ifr_ntxqirq = 1;
rxq->ifr_txqid[0] = i;
for (j = 0; j < nrxqs; j++, ifdip++) {
- if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
- device_printf(dev, "Unable to allocate Descriptor memory\n");
+ if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
+ device_printf(dev,
+ "Unable to allocate RX descriptors\n");
err = ENOMEM;
goto err_tx_desc;
}
@@ -5319,7 +5347,8 @@
}
}
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
- device_printf(ctx->ifc_dev, "device queue allocation failed\n");
+ device_printf(ctx->ifc_dev,
+ "Unable to allocate device RX queue\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
@@ -5340,7 +5369,8 @@
}
}
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
- device_printf(ctx->ifc_dev, "device queue allocation failed\n");
+ device_printf(ctx->ifc_dev,
+ "Unable to allocate device RX queue\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);

File Metadata

Mime Type
text/plain
Expires
Tue, Mar 10, 3:06 PM (6 h, 48 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29498026
Default Alt Text
D19067.id53546.diff (20 KB)

Event Timeline