Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ice/ice_iflib_txrx.c
Show First 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi); | static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi); | ||||
static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri); | static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri); | ||||
static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); | static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); | ||||
static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear); | static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear); | ||||
static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget); | static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget); | ||||
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx); | static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx); | ||||
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru); | static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru); | ||||
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m); | |||||
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the | /* Macro to help extract the NIC mode flexible Rx descriptor fields from the | ||||
* advanced 32byte Rx descriptors. | * advanced 32byte Rx descriptors. | ||||
*/ | */ | ||||
#define RX_FLEX_NIC(desc, field) \ | #define RX_FLEX_NIC(desc, field) \ | ||||
(((struct ice_32b_rx_flex_desc_nic *)desc)->field) | (((struct ice_32b_rx_flex_desc_nic *)desc)->field) | ||||
/** | /** | ||||
* @var ice_txrx | * @var ice_txrx | ||||
* @brief Tx/Rx operations for the iflib stack | * @brief Tx/Rx operations for the iflib stack | ||||
* | * | ||||
* Structure defining the Tx and Rx related operations that iflib can request | * Structure defining the Tx and Rx related operations that iflib can request | ||||
* the driver to perform. These are the main entry points for the hot path of | * the driver to perform. These are the main entry points for the hot path of | ||||
* the transmit and receive paths in the iflib driver. | * the transmit and receive paths in the iflib driver. | ||||
*/ | */ | ||||
struct if_txrx ice_txrx = { | struct if_txrx ice_txrx = { | ||||
.ift_txd_encap = ice_ift_txd_encap, | .ift_txd_encap = ice_ift_txd_encap, | ||||
.ift_txd_flush = ice_ift_txd_flush, | .ift_txd_flush = ice_ift_txd_flush, | ||||
.ift_txd_credits_update = ice_ift_txd_credits_update, | .ift_txd_credits_update = ice_ift_txd_credits_update, | ||||
.ift_rxd_available = ice_ift_rxd_available, | .ift_rxd_available = ice_ift_rxd_available, | ||||
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get, | .ift_rxd_pkt_get = ice_ift_rxd_pkt_get, | ||||
.ift_rxd_refill = ice_ift_rxd_refill, | .ift_rxd_refill = ice_ift_rxd_refill, | ||||
.ift_rxd_flush = ice_ift_rxd_flush, | .ift_rxd_flush = ice_ift_rxd_flush, | ||||
.ift_txq_select = ice_ift_queue_select, | |||||
}; | }; | ||||
/** | /** | ||||
* ice_ift_txd_encap - prepare Tx descriptors for a packet | * ice_ift_txd_encap - prepare Tx descriptors for a packet | ||||
* @arg: the iflib softc structure pointer | * @arg: the iflib softc structure pointer | ||||
* @pi: packet info | * @pi: packet info | ||||
* | * | ||||
* Prepares and encapsulates the given packet into into Tx descriptors, in | * Prepares and encapsulates the given packet into into Tx descriptors, in | ||||
▲ Show 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
/** | /** | ||||
* ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer | * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer | ||||
* @arg: device specific softc | * @arg: device specific softc | ||||
* @ri: receive packet info | * @ri: receive packet info | ||||
* | * | ||||
* This function is called by iflib, and executes in ithread context. It is | * This function is called by iflib, and executes in ithread context. It is | ||||
* called by iflib to obtain data which has been DMA'ed into host memory. | * called by iflib to obtain data which has been DMA'ed into host memory. | ||||
* Returns zero on success, and an error code on failure. | * Returns zero on success, and EBADMSG on failure. | ||||
*/ | */ | ||||
static int | static int | ||||
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri) | ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri) | ||||
{ | { | ||||
struct ice_softc *sc = (struct ice_softc *)arg; | struct ice_softc *sc = (struct ice_softc *)arg; | ||||
if_softc_ctx_t scctx = sc->scctx; | if_softc_ctx_t scctx = sc->scctx; | ||||
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx]; | struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx]; | ||||
union ice_32b_rx_flex_desc *cur; | union ice_32b_rx_flex_desc *cur; | ||||
u16 status0, plen, ptype; | u16 status0, plen, ptype; | ||||
bool eop; | bool eop; | ||||
size_t cidx; | size_t cidx; | ||||
int i; | int i; | ||||
cidx = ri->iri_cidx; | cidx = ri->iri_cidx; | ||||
i = 0; | i = 0; | ||||
do { | do { | ||||
/* 5 descriptor receive limit */ | /* 5 descriptor receive limit */ | ||||
MPASS(i < ICE_MAX_RX_SEGS); | MPASS(i < ICE_MAX_RX_SEGS); | ||||
cur = &rxq->rx_base[cidx]; | cur = &rxq->rx_base[cidx]; | ||||
status0 = le16toh(cur->wb.status_error0); | status0 = le16toh(cur->wb.status_error0); | ||||
plen = le16toh(cur->wb.pkt_len) & | plen = le16toh(cur->wb.pkt_len) & | ||||
ICE_RX_FLX_DESC_PKT_LEN_M; | ICE_RX_FLX_DESC_PKT_LEN_M; | ||||
ptype = le16toh(cur->wb.ptype_flex_flags0) & | |||||
ICE_RX_FLEX_DESC_PTYPE_M; | |||||
/* we should never be called without a valid descriptor */ | /* we should never be called without a valid descriptor */ | ||||
MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0); | MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0); | ||||
ri->iri_len += plen; | ri->iri_len += plen; | ||||
cur->wb.status_error0 = 0; | cur->wb.status_error0 = 0; | ||||
eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)); | eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)); | ||||
/* | |||||
* Make sure packets with bad L2 values are discarded. | |||||
* NOTE: Only the EOP descriptor has valid error results. | |||||
*/ | |||||
if (eop && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S))) { | |||||
rxq->stats.desc_errs++; | |||||
return (EBADMSG); | |||||
} | |||||
ri->iri_frags[i].irf_flid = 0; | ri->iri_frags[i].irf_flid = 0; | ||||
ri->iri_frags[i].irf_idx = cidx; | ri->iri_frags[i].irf_idx = cidx; | ||||
ri->iri_frags[i].irf_len = plen; | ri->iri_frags[i].irf_len = plen; | ||||
if (++cidx == rxq->desc_count) | if (++cidx == rxq->desc_count) | ||||
cidx = 0; | cidx = 0; | ||||
i++; | i++; | ||||
} while (!eop); | } while (!eop); | ||||
/* capture soft statistics for this Rx queue */ | /* End of Packet reached; cur is eop/last descriptor */ | ||||
/* Make sure packets with bad L2 values are discarded. | |||||
* This bit is only valid in the last descriptor. | |||||
*/ | |||||
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) { | |||||
rxq->stats.desc_errs++; | |||||
return (EBADMSG); | |||||
} | |||||
/* Get VLAN tag information if one is in descriptor */ | |||||
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { | |||||
ri->iri_vtag = le16toh(cur->wb.l2tag1); | |||||
ri->iri_flags |= M_VLANTAG; | |||||
} | |||||
/* Capture soft statistics for this Rx queue */ | |||||
rxq->stats.rx_packets++; | rxq->stats.rx_packets++; | ||||
rxq->stats.rx_bytes += ri->iri_len; | rxq->stats.rx_bytes += ri->iri_len; | ||||
erj: Things got shuffled around here in ice_ift_rxd_pkt_get(); some of the code in the for-loop has… | |||||
/* Get packet type and set checksum flags */ | |||||
ptype = le16toh(cur->wb.ptype_flex_flags0) & | |||||
ICE_RX_FLEX_DESC_PTYPE_M; | |||||
if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) | if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) | ||||
ice_rx_checksum(rxq, &ri->iri_csum_flags, | ice_rx_checksum(rxq, &ri->iri_csum_flags, | ||||
&ri->iri_csum_data, status0, ptype); | &ri->iri_csum_data, status0, ptype); | ||||
/* Set remaining iflib RX descriptor info fields */ | |||||
ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash)); | ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash)); | ||||
ri->iri_rsstype = ice_ptype_to_hash(ptype); | ri->iri_rsstype = ice_ptype_to_hash(ptype); | ||||
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { | |||||
ri->iri_vtag = le16toh(cur->wb.l2tag1); | |||||
ri->iri_flags |= M_VLANTAG; | |||||
} | |||||
ri->iri_nfrags = i; | ri->iri_nfrags = i; | ||||
return (0); | return (0); | ||||
} | } | ||||
/** | /** | ||||
Done Inline Actionsiflib doesn't handle this case well and it causes issues, but in practice this only seems to happen when adapters are configured to store bad packets which I don't think we currently allow in this driver. erj: iflib doesn't handle this case well and it causes issues, but in practice this only seems to… | |||||
* ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware | * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware | ||||
* @arg: device specific softc structure | * @arg: device specific softc structure | ||||
* @iru: the Rx descriptor update structure | * @iru: the Rx descriptor update structure | ||||
* | * | ||||
* Update the Rx descriptor indices for a given queue, assigning new physical | * Update the Rx descriptor indices for a given queue, assigning new physical | ||||
* addresses to the descriptors, preparing them for re-use by the hardware. | * addresses to the descriptors, preparing them for re-use by the hardware. | ||||
*/ | */ | ||||
static void | static void | ||||
Show All 35 Lines | |||||
ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused, | ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused, | ||||
qidx_t pidx) | qidx_t pidx) | ||||
{ | { | ||||
struct ice_softc *sc = (struct ice_softc *)arg; | struct ice_softc *sc = (struct ice_softc *)arg; | ||||
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; | struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; | ||||
struct ice_hw *hw = &sc->hw; | struct ice_hw *hw = &sc->hw; | ||||
wr32(hw, rxq->tail, pidx); | wr32(hw, rxq->tail, pidx); | ||||
} | |||||
static qidx_t | |||||
ice_ift_queue_select(void *arg, struct mbuf *m) | |||||
Done Inline ActionsThis function is now called by iflib in its TX path to determine which queue to output on erj: This function is now called by iflib in its TX path to determine which queue to output on | |||||
{ | |||||
struct ice_softc *sc = (struct ice_softc *)arg; | |||||
struct ice_vsi *vsi = &sc->pf_vsi; | |||||
u16 tc_base_queue, tc_qcount; | |||||
u8 up, tc; | |||||
#ifdef ALTQ | |||||
/* Included to match default iflib behavior */ | |||||
/* Only go out on default queue if ALTQ is enabled */ | |||||
struct ifnet *ifp = (struct ifnet *)iflib_get_ifp(sc->ctx); | |||||
if (ALTQ_IS_ENABLED(&ifp->if_snd)) | |||||
return (0); | |||||
#endif | |||||
if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) { | |||||
if (M_HASHTYPE_GET(m)) { | |||||
/* Default iflib queue selection method */ | |||||
return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues); | |||||
} else | |||||
return (0); | |||||
} | |||||
/* Use default TC unless overridden */ | |||||
tc = 0; /* XXX: Get default TC for traffic if >1 TC? */ | |||||
if (m->m_flags & M_VLANTAG) { | |||||
up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag); | |||||
tc = sc->hw.port_info->qos_cfg.local_dcbx_cfg.etscfg.prio_table[up]; | |||||
} | |||||
tc_base_queue = vsi->tc_info[tc].qoffset; | |||||
tc_qcount = vsi->tc_info[tc].qcount_tx; | |||||
if (M_HASHTYPE_GET(m)) | |||||
return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue); | |||||
else | |||||
return (tc_base_queue); | |||||
} | } |
Things got shuffled around here in ice_ift_rxd_pkt_get(); some of the code in the for-loop has been moved out of the for-loop because it only applies to the last descriptor in a packet.