Index: head/sys/dev/bnxt/bnxt_txrx.c =================================================================== --- head/sys/dev/bnxt/bnxt_txrx.c (revision 332408) +++ head/sys/dev/bnxt/bnxt_txrx.c (revision 332409) @@ -1,681 +1,681 @@ /*- * Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2016 Broadcom, All Rights Reserved. * The term Broadcom refers to Broadcom Limited and/or its subsidiaries * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "bnxt.h" /* * Function prototypes */ static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi); static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx); static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear); static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru); /* uint16_t rxqid, uint8_t flid, uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count, uint16_t buf_size); */ static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid, qidx_t pidx); static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget); static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri); static int bnxt_intr(void *sc); struct if_txrx bnxt_txrx = { - bnxt_isc_txd_encap, - bnxt_isc_txd_flush, - bnxt_isc_txd_credits_update, - bnxt_isc_rxd_available, - bnxt_isc_rxd_pkt_get, - bnxt_isc_rxd_refill, - bnxt_isc_rxd_flush, - bnxt_intr + .ift_txd_encap = bnxt_isc_txd_encap, + .ift_txd_flush = bnxt_isc_txd_flush, + .ift_txd_credits_update = bnxt_isc_txd_credits_update, + .ift_rxd_available = bnxt_isc_rxd_available, + .ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get, + .ift_rxd_refill = bnxt_isc_rxd_refill, + .ift_rxd_flush = bnxt_isc_rxd_flush, + .ift_legacy_intr = bnxt_intr }; /* * Device Dependent Packet Transmit and Receive Functions */ static const uint16_t bnxt_tx_lhint[] = { TX_BD_SHORT_FLAGS_LHINT_LT512, TX_BD_SHORT_FLAGS_LHINT_LT1K, TX_BD_SHORT_FLAGS_LHINT_LT2K, TX_BD_SHORT_FLAGS_LHINT_LT2K, TX_BD_SHORT_FLAGS_LHINT_GTE2K, }; static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx]; struct tx_bd_long *tbd; struct tx_bd_long_hi *tbdh; bool need_hi = false; uint16_t flags_type; uint16_t lflags; uint32_t cfa_meta; int seg = 0; /* If we have offloads enabled, we need to use two BDs. */ if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) || pi->ipi_mflags & M_VLANTAG) need_hi = true; /* TODO: Devices before Cu+B1 need to not mix long and short BDs */ need_hi = true; pi->ipi_new_pidx = pi->ipi_pidx; tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx]; pi->ipi_ndescs = 0; /* No need to byte-swap the opaque value */ tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx; tbd->len = htole16(pi->ipi_segs[seg].ds_len); tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr); flags_type = ((pi->ipi_nsegs + need_hi) << TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK; if (pi->ipi_len >= 2048) flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K; else flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9]; if (need_hi) { flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx); tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx]; tbdh->mss = htole16(pi->ipi_tso_segsz); tbdh->hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen) >> 1); tbdh->cfa_action = 0; lflags = 0; cfa_meta = 0; if (pi->ipi_mflags & M_VLANTAG) { /* TODO: Do we need to byte-swap the vtag here? */ cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG | pi->ipi_vtag; cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; } tbdh->cfa_meta = htole32(cfa_meta); if (pi->ipi_csum_flags & CSUM_TSO) { lflags |= TX_BD_LONG_LFLAGS_LSO | TX_BD_LONG_LFLAGS_T_IPID; } else if(pi->ipi_csum_flags & CSUM_OFFLOAD) { lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | TX_BD_LONG_LFLAGS_IP_CHKSUM; } else if(pi->ipi_csum_flags & CSUM_IP) { lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; } tbdh->lflags = htole16(lflags); } else { flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; } for (; seg < pi->ipi_nsegs; seg++) { tbd->flags_type = htole16(flags_type); pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx); tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx]; tbd->len = htole16(pi->ipi_segs[seg].ds_len); tbd->addr = htole64(pi->ipi_segs[seg].ds_addr); flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; } flags_type |= TX_BD_SHORT_FLAGS_PACKET_END; tbd->flags_type = htole16(flags_type); pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx); return 0; } static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *tx_ring = &softc->tx_rings[txqid]; /* pidx is what we last set ipi_new_pidx to */ BNXT_TX_DB(tx_ring, pidx); /* TODO: Cumulus+ doesn't need the double doorbell */ BNXT_TX_DB(tx_ring, pidx); return; } static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid]; struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr; int avail = 0; uint32_t cons = cpr->cons; bool v_bit = cpr->v_bit; bool last_v_bit; uint32_t last_cons; uint16_t type; uint16_t err; for (;;) { last_cons = cons; last_v_bit = v_bit; NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmpl[cons], v_bit)) goto done; type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK; switch (type) { case TX_CMPL_TYPE_TX_L2: err = (le16toh(cmpl[cons].errors_v) & TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >> TX_CMPL_ERRORS_BUFFER_ERROR_SFT; if (err) device_printf(softc->dev, "TX completion error %u\n", err); /* No need to byte-swap the opaque value */ avail += cmpl[cons].opaque >> 24; /* * If we're not clearing, iflib only cares if there's * at least one buffer. Don't scan the whole ring in * this case. */ if (!clear) goto done; break; default: if (type & 1) { NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); if (!CMP_VALID(&cmpl[cons], v_bit)) goto done; } device_printf(softc->dev, "Unhandled TX completion type %u\n", type); break; } } done: if (clear && avail) { cpr->cons = last_cons; cpr->v_bit = last_v_bit; BNXT_CP_IDX_DISABLE_DB(&cpr->ring, cpr->cons); } return avail; } static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *rx_ring; struct rx_prod_pkt_bd *rxbd; uint16_t type; uint16_t i; uint16_t rxqid; uint16_t count, len; uint32_t pidx; uint8_t flid; uint64_t *paddrs; caddr_t *vaddrs; qidx_t *frag_idxs; rxqid = iru->iru_qsidx; count = iru->iru_count; len = iru->iru_buf_size; pidx = iru->iru_pidx; flid = iru->iru_flidx; vaddrs = iru->iru_vaddrs; paddrs = iru->iru_paddrs; frag_idxs = iru->iru_idxs; if (flid == 0) { rx_ring = &softc->rx_rings[rxqid]; type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; } else { rx_ring = &softc->ag_rings[rxqid]; type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG; } rxbd = (void *)rx_ring->vaddr; for (i=0; iring_size) pidx = 0; } return; } static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid, qidx_t pidx) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_ring *rx_ring; if (flid == 0) rx_ring = &softc->rx_rings[rxqid]; else rx_ring = &softc->ag_rings[rxqid]; /* * We *must* update the completion ring before updating the RX ring * or we will overrun the completion ring and the device will wedge for * RX. */ if (softc->rx_cp_rings[rxqid].cons != UINT32_MAX) BNXT_CP_IDX_DISABLE_DB(&softc->rx_cp_rings[rxqid].ring, softc->rx_cp_rings[rxqid].cons); /* We're given the last filled RX buffer here, not the next empty one */ BNXT_RX_DB(rx_ring, RING_NEXT(rx_ring, pidx)); /* TODO: Cumulus+ doesn't need the double doorbell */ BNXT_RX_DB(rx_ring, RING_NEXT(rx_ring, pidx)); return; } static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid]; struct rx_pkt_cmpl *rcp; struct rx_tpa_end_cmpl *rtpae; struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr; int avail = 0; uint32_t cons = cpr->cons; bool v_bit = cpr->v_bit; uint8_t ags; int i; uint16_t type; for (;;) { NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK; switch (type) { case CMPL_BASE_TYPE_RX_L2: rcp = (void *)&cmp[cons]; ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> RX_PKT_CMPL_AGG_BUFS_SFT; NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; /* Now account for all the AG completions */ for (i=0; iring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; } avail++; break; case CMPL_BASE_TYPE_RX_TPA_END: rtpae = (void *)&cmp[cons]; ags = (rtpae->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT; NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; /* Now account for all the AG completions */ for (i=0; iring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; } avail++; break; case CMPL_BASE_TYPE_RX_TPA_START: NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; break; case CMPL_BASE_TYPE_RX_AGG: break; default: device_printf(softc->dev, "Unhandled completion type %d on RXQ %d\n", type, rxqid); /* Odd completion types use two completions */ if (type & 1) { NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); CMPL_PREFETCH_NEXT(cpr, cons); if (!CMP_VALID(&cmp[cons], v_bit)) goto cmpl_invalid; } break; } if (avail > budget) break; } cmpl_invalid: return avail; } static void bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type) { uint8_t rss_profile_id; rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type); switch (rss_profile_id) { case BNXT_RSS_HASH_TYPE_TCPV4: ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4; break; case BNXT_RSS_HASH_TYPE_UDPV4: ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4; break; case BNXT_RSS_HASH_TYPE_IPV4: ri->iri_rsstype = M_HASHTYPE_RSS_IPV4; break; case BNXT_RSS_HASH_TYPE_TCPV6: ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6; break; case BNXT_RSS_HASH_TYPE_UDPV6: ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6; break; case BNXT_RSS_HASH_TYPE_IPV6: ri->iri_rsstype = M_HASHTYPE_RSS_IPV6; break; default: ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; break; } } static int bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri, struct bnxt_cp_ring *cpr, uint16_t flags_type) { struct rx_pkt_cmpl *rcp; struct rx_pkt_cmpl_hi *rcph; struct rx_abuf_cmpl *acp; uint32_t flags2; uint32_t errors; uint8_t ags; int i; rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons]; /* Extract from the first 16-byte BD */ if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { ri->iri_flowid = le32toh(rcp->rss_hash); bnxt_set_rsstype(ri, rcp->rss_hash_type); } else { ri->iri_rsstype = M_HASHTYPE_NONE; } ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> RX_PKT_CMPL_AGG_BUFS_SFT; ri->iri_nfrags = ags + 1; /* No need to byte-swap the opaque value */ ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff; ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff; ri->iri_frags[0].irf_len = le16toh(rcp->len); ri->iri_len = le16toh(rcp->len); /* Now the second 16-byte BD */ NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons]; flags2 = le32toh(rcph->flags2); errors = le16toh(rcph->errors_v2); if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) == RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { ri->iri_flags |= M_VLANTAG; /* TODO: Should this be the entire 16-bits? */ ri->iri_vtag = le32toh(rcph->metadata) & (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE | RX_PKT_CMPL_METADATA_PRI_MASK); } if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) { ri->iri_csum_flags |= CSUM_IP_CHECKED; if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR)) ri->iri_csum_flags |= CSUM_IP_VALID; } if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC | RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) { ri->iri_csum_flags |= CSUM_L4_CALC; if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR | RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) { ri->iri_csum_flags |= CSUM_L4_VALID; ri->iri_csum_data = 0xffff; } } /* And finally the ag ring stuff. */ for (i=1; i < ri->iri_nfrags; i++) { NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons]; /* No need to byte-swap the opaque value */ ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff); ri->iri_frags[i].irf_idx = acp->opaque & 0xffff; ri->iri_frags[i].irf_len = le16toh(acp->len); ri->iri_len += le16toh(acp->len); } return 0; } static int bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri, struct bnxt_cp_ring *cpr, uint16_t flags_type) { struct rx_tpa_end_cmpl *agend = &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons]; struct rx_tpa_end_cmpl_hi *agendh; struct rx_abuf_cmpl *acp; struct bnxt_full_tpa_start *tpas; uint32_t flags2; uint8_t ags; uint8_t agg_id; int i; /* Get the agg_id */ agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> RX_TPA_END_CMPL_AGG_ID_SFT; tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]); /* Extract from the first 16-byte BD */ if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) { ri->iri_flowid = le32toh(tpas->low.rss_hash); bnxt_set_rsstype(ri, tpas->low.rss_hash_type); } else { ri->iri_rsstype = M_HASHTYPE_NONE; } ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT; ri->iri_nfrags = ags + 1; /* No need to byte-swap the opaque value */ ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff); ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff); ri->iri_frags[0].irf_len = le16toh(tpas->low.len); ri->iri_len = le16toh(tpas->low.len); /* Now the second 16-byte BD */ NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); agendh = &((struct rx_tpa_end_cmpl_hi *)cpr->ring.vaddr)[cpr->cons]; flags2 = le32toh(tpas->high.flags2); if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) == RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) { ri->iri_flags |= M_VLANTAG; /* TODO: Should this be the entire 16-bits? */ ri->iri_vtag = le32toh(tpas->high.metadata) & (RX_TPA_START_CMPL_METADATA_VID_MASK | RX_TPA_START_CMPL_METADATA_DE | RX_TPA_START_CMPL_METADATA_PRI_MASK); } if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) { ri->iri_csum_flags |= CSUM_IP_CHECKED; ri->iri_csum_flags |= CSUM_IP_VALID; } if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) { ri->iri_csum_flags |= CSUM_L4_CALC; ri->iri_csum_flags |= CSUM_L4_VALID; ri->iri_csum_data = 0xffff; } /* Now the ag ring stuff. */ for (i=1; i < ri->iri_nfrags; i++) { NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons]; /* No need to byte-swap the opaque value */ ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff); ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff); ri->iri_frags[i].irf_len = le16toh(acp->len); ri->iri_len += le16toh(acp->len); } /* And finally, the empty BD at the end... */ ri->iri_nfrags++; /* No need to byte-swap the opaque value */ ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff); ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff); ri->iri_frags[i].irf_len = le16toh(agend->len); ri->iri_len += le16toh(agend->len); return 0; } /* If we return anything but zero, iflib will assert... */ static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx]; struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr; struct cmpl_base *cmp; struct rx_tpa_start_cmpl *rtpa; uint16_t flags_type; uint16_t type; uint8_t agg_id; for (;;) { NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); CMPL_PREFETCH_NEXT(cpr, cpr->cons); cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons]; flags_type = le16toh(cmp->type); type = flags_type & CMPL_BASE_TYPE_MASK; switch (type) { case CMPL_BASE_TYPE_RX_L2: return bnxt_pkt_get_l2(softc, ri, cpr, flags_type); case CMPL_BASE_TYPE_RX_TPA_END: return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type); case CMPL_BASE_TYPE_RX_TPA_START: rtpa = (void *)&cmp_q[cpr->cons]; agg_id = (rtpa->agg_id & RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT; softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa; NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); CMPL_PREFETCH_NEXT(cpr, cpr->cons); softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high = ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons]; break; default: device_printf(softc->dev, "Unhandled completion type %d on RXQ %d get\n", type, ri->iri_qsidx); if (type & 1) { NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit); ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx); CMPL_PREFETCH_NEXT(cpr, cpr->cons); } break; } } return 0; } static int bnxt_intr(void *sc) { struct bnxt_softc *softc = (struct bnxt_softc *)sc; device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__); return ENOSYS; } Index: head/sys/dev/e1000/em_txrx.c =================================================================== --- head/sys/dev/e1000/em_txrx.c (revision 332408) +++ head/sys/dev/e1000/em_txrx.c (revision 332409) @@ -1,816 +1,816 @@ /*- * Copyright (c) 2016 Nicole Graziano * Copyright (c) 2017 Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "if_em.h" #ifdef RSS #include #include #endif #ifdef VERBOSE_DEBUG #define DPRINTF device_printf #else #define DPRINTF(...) #endif /********************************************************************* * Local Function prototypes *********************************************************************/ static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower); static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower); static int em_isc_txd_encap(void *arg, if_pkt_info_t pi); static void em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); static int em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); static void em_isc_rxd_refill(void *arg, if_rxd_update_t iru); static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx); static int em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget); static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static void lem_isc_rxd_refill(void *arg, if_rxd_update_t iru); static int lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget); static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri); static void em_receive_checksum(uint32_t status, if_rxd_info_t ri); static int em_determine_rsstype(u32 pkt_info); extern int em_intr(void *arg); struct if_txrx em_txrx = { - em_isc_txd_encap, - em_isc_txd_flush, - em_isc_txd_credits_update, - em_isc_rxd_available, - em_isc_rxd_pkt_get, - em_isc_rxd_refill, - em_isc_rxd_flush, - em_intr + .ift_txd_encap = em_isc_txd_encap, + .ift_txd_flush = em_isc_txd_flush, + .ift_txd_credits_update = em_isc_txd_credits_update, + .ift_rxd_available = em_isc_rxd_available, + .ift_rxd_pkt_get = em_isc_rxd_pkt_get, + .ift_rxd_refill = em_isc_rxd_refill, + .ift_rxd_flush = em_isc_rxd_flush, + .ift_legacy_intr = em_intr }; struct if_txrx lem_txrx = { - em_isc_txd_encap, - em_isc_txd_flush, - em_isc_txd_credits_update, - lem_isc_rxd_available, - lem_isc_rxd_pkt_get, - lem_isc_rxd_refill, - em_isc_rxd_flush, - em_intr + .ift_txd_encap = em_isc_txd_encap, + .ift_txd_flush = em_isc_txd_flush, + .ift_txd_credits_update = em_isc_txd_credits_update, + .ift_rxd_available = lem_isc_rxd_available, + .ift_rxd_pkt_get = lem_isc_rxd_pkt_get, + .ift_rxd_refill = lem_isc_rxd_refill, + .ift_rxd_flush = em_isc_rxd_flush, + .ift_legacy_intr = em_intr }; extern if_shared_ctx_t em_sctx; void em_dump_rs(struct adapter *adapter) { if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que; struct tx_ring *txr; qidx_t i, ntxd, qid, cur; int16_t rs_cidx; uint8_t status; printf("\n"); ntxd = scctx->isc_ntxd[0]; for (qid = 0; qid < adapter->tx_num_queues; qid++) { que = &adapter->tx_queues[qid]; txr = &que->txr; rs_cidx = txr->tx_rs_cidx; if (rs_cidx != txr->tx_rs_pidx) { cur = txr->tx_rsq[rs_cidx]; status = txr->tx_base[cur].upper.fields.status; if (!(status & E1000_TXD_STAT_DD)) printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur); } else { rs_cidx = (rs_cidx-1)&(ntxd-1); cur = txr->tx_rsq[rs_cidx]; printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur); } printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx); for (i = 0; i < ntxd; i++) { if (txr->tx_base[i].upper.fields.status & E1000_TXD_STAT_DD) printf("%d set ", i); } printf("\n"); } } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) on * adapters using advanced tx descriptors * **********************************************************************/ static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower) { if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; struct e1000_context_desc *TXD; int cur, hdr_len; hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D | /* Data descr type */ E1000_TXD_CMD_TSE); /* Do TSE on this packet */ /* IP and/or TCP header checksum calculation and insertion. */ *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; cur = pi->ipi_pidx; TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place put the checksum. */ TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen; TXD->lower_setup.ip_fields.ipcse = htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1); TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum); /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen; TXD->upper_setup.tcp_fields.tucse = 0; TXD->upper_setup.tcp_fields.tucso = pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum); /* * Payload size per packet w/o any headers. * Length of all headers up to payload. */ TXD->tcp_seg_setup.fields.mss = htole16(pi->ipi_tso_segsz); TXD->tcp_seg_setup.fields.hdr_len = hdr_len; TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | /* Extended descr */ E1000_TXD_CMD_TSE | /* TSE context */ E1000_TXD_CMD_IP | /* Do IP csum */ E1000_TXD_CMD_TCP | /* Do TCP checksum */ (pi->ipi_len - hdr_len)); /* Total len */ txr->tx_tso = TRUE; if (++cur == scctx->isc_ntxd[0]) { cur = 0; } DPRINTF(iflib_get_dev(adapter->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__, pi->ipi_pidx, cur); return (cur); } #define TSO_WORKAROUND 4 #define DONT_FORCE_CTX 1 /********************************************************************* * The offload context is protocol specific (TCP/UDP) and thus * only needs to be set when the protocol changes. The occasion * of a context change can be a performance detriment, and * might be better just disabled. The reason arises in the way * in which the controller supports pipelined requests from the * Tx data DMA. Up to four requests can be pipelined, and they may * belong to the same packet or to multiple packets. However all * requests for one packet are issued before a request is issued * for a subsequent packet and if a request for the next packet * requires a context change, that request will be stalled * until the previous request completes. This means setting up * a new context effectively disables pipelined Tx data DMA which * in turn greatly slow down performance to send small sized * frames. **********************************************************************/ static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower) { struct e1000_context_desc *TXD = NULL; if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; int csum_flags = pi->ipi_csum_flags; int cur, hdr_len; u32 cmd; cur = pi->ipi_pidx; hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen; cmd = adapter->txd_cmd; /* * The 82574L can only remember the *last* context used * regardless of queue that it was use for. We cannot reuse * contexts on this hardware platform and must generate a new * context every time. 82574L hardware spec, section 7.2.6, * second note. */ if (DONT_FORCE_CTX && adapter->tx_num_queues == 1 && txr->csum_lhlen == pi->ipi_ehdrlen && txr->csum_iphlen == pi->ipi_ip_hlen && txr->csum_flags == csum_flags) { /* * Same csum offload context as the previous packets; * just return. */ *txd_upper = txr->csum_txd_upper; *txd_lower = txr->csum_txd_lower; return (cur); } TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; if (csum_flags & CSUM_IP) { *txd_upper |= E1000_TXD_POPTS_IXSM << 8; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen; TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len); TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum); cmd |= E1000_TXD_CMD_IP; } if (csum_flags & (CSUM_TCP|CSUM_UDP)) { uint8_t tucso; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; if (csum_flags & CSUM_TCP) { tucso = hdr_len + offsetof(struct tcphdr, th_sum); cmd |= E1000_TXD_CMD_TCP; } else tucso = hdr_len + offsetof(struct udphdr, uh_sum); TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; } txr->csum_lhlen = pi->ipi_ehdrlen; txr->csum_iphlen = pi->ipi_ip_hlen; txr->csum_flags = csum_flags; txr->csum_txd_upper = *txd_upper; txr->csum_txd_lower = *txd_lower; TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); if (++cur == scctx->isc_ntxd[0]) { cur = 0; } DPRINTF(iflib_get_dev(adapter->ctx), "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n", csum_flags, *txd_upper, *txd_lower, hdr_len, cmd); return (cur); } static int em_isc_txd_encap(void *arg, if_pkt_info_t pi) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; bus_dma_segment_t *segs = pi->ipi_segs; int nsegs = pi->ipi_nsegs; int csum_flags = pi->ipi_csum_flags; int i, j, first, pidx_last; u32 txd_flags, txd_upper = 0, txd_lower = 0; struct e1000_tx_desc *ctxd = NULL; bool do_tso, tso_desc; qidx_t ntxd; txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_TXD_CMD_RS : 0; i = first = pi->ipi_pidx; do_tso = (csum_flags & CSUM_TSO); tso_desc = FALSE; ntxd = scctx->isc_ntxd[0]; /* * TSO Hardware workaround, if this packet is not * TSO, and is only a single descriptor long, and * it follows a TSO burst, then we need to add a * sentinel descriptor to prevent premature writeback. */ if ((!do_tso) && (txr->tx_tso == TRUE)) { if (nsegs == 1) tso_desc = TRUE; txr->tx_tso = FALSE; } /* Do hardware assists */ if (do_tso) { i = em_tso_setup(sc, pi, &txd_upper, &txd_lower); tso_desc = TRUE; } else if (csum_flags & EM_CSUM_OFFLOAD) { i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower); } if (pi->ipi_mflags & M_VLANTAG) { /* Set the vlan id. */ txd_upper |= htole16(pi->ipi_vtag) << 16; /* Tell hardware to add tag */ txd_lower |= htole32(E1000_TXD_CMD_VLE); } DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i); /* XXX adapter->pcix_82544 -- lem_fill_descriptors */ /* Set up our transmit descriptors */ for (j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; uint32_t cmd; ctxd = &txr->tx_base[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd; /* * TSO Workaround: * If this is the last descriptor, we want to * split it so we have a small final sentinel */ if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) { seg_len -= TSO_WORKAROUND; ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32(cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); if (++i == scctx->isc_ntxd[0]) i = 0; /* Now make the sentinel */ ctxd = &txr->tx_base[i]; ctxd->buffer_addr = htole64(seg_addr + seg_len); ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND); ctxd->upper.data = htole32(txd_upper); pidx_last = i; if (++i == scctx->isc_ntxd[0]) i = 0; DPRINTF(iflib_get_dev(sc->ctx), "TSO path pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]); } else { ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32(cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); pidx_last = i; if (++i == scctx->isc_ntxd[0]) i = 0; DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]); } } /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ if (txd_flags) { txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; DPRINTF(iflib_get_dev(sc->ctx), "setting to RS on %d rs_pidx %d first: %d\n", pidx_last, txr->tx_rs_pidx, first); txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); } ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | txd_flags); DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i); pi->ipi_new_pidx = i; return (0); } static void em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) { struct adapter *adapter = arg; struct em_tx_queue *que = &adapter->tx_queues[txqid]; struct tx_ring *txr = &que->txr; E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx); } static int em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[txqid]; struct tx_ring *txr = &que->txr; qidx_t processed = 0; int updated; qidx_t cur, prev, ntxd, rs_cidx; int32_t delta; uint8_t status; rs_cidx = txr->tx_rs_cidx; if (rs_cidx == txr->tx_rs_pidx) return (0); cur = txr->tx_rsq[rs_cidx]; MPASS(cur != QIDX_INVALID); status = txr->tx_base[cur].upper.fields.status; updated = !!(status & E1000_TXD_STAT_DD); if (clear == false || updated == 0) return (updated); prev = txr->tx_cidx_processed; ntxd = scctx->isc_ntxd[0]; do { delta = (int32_t)cur - (int32_t)prev; MPASS(prev == 0 || delta != 0); if (delta < 0) delta += ntxd; DPRINTF(iflib_get_dev(adapter->ctx), "%s: cidx_processed=%u cur=%u clear=%d delta=%d\n", __FUNCTION__, prev, cur, clear, delta); processed += delta; prev = cur; rs_cidx = (rs_cidx + 1) & (ntxd-1); if (rs_cidx == txr->tx_rs_pidx) break; cur = txr->tx_rsq[rs_cidx]; MPASS(cur != QIDX_INVALID); status = txr->tx_base[cur].upper.fields.status; } while ((status & E1000_TXD_STAT_DD)); txr->tx_rs_cidx = rs_cidx; txr->tx_cidx_processed = prev; return(processed); } static void lem_isc_rxd_refill(void *arg, if_rxd_update_t iru) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[iru->iru_qsidx]; struct rx_ring *rxr = &que->rxr; struct e1000_rx_desc *rxd; uint64_t *paddrs; uint32_t next_pidx, pidx; uint16_t count; int i; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; for (i = 0, next_pidx = pidx; i < count; i++) { rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx]; rxd->buffer_addr = htole64(paddrs[i]); /* status bits must be cleared */ rxd->status = 0; if (++next_pidx == scctx->isc_nrxd[0]) next_pidx = 0; } } static void em_isc_rxd_refill(void *arg, if_rxd_update_t iru) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; uint16_t rxqid = iru->iru_qsidx; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; union e1000_rx_desc_extended *rxd; uint64_t *paddrs; uint32_t next_pidx, pidx; uint16_t count; int i; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; for (i = 0, next_pidx = pidx; i < count; i++) { rxd = &rxr->rx_base[next_pidx]; rxd->read.buffer_addr = htole64(paddrs[i]); /* DD bits must be cleared */ rxd->wb.upper.status_error = 0; if (++next_pidx == scctx->isc_nrxd[0]) next_pidx = 0; } } static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) { struct adapter *sc = arg; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); } static int lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; struct e1000_rx_desc *rxd; u32 staterr = 0; int cnt, i; if (budget == 1) { rxd = (struct e1000_rx_desc *)&rxr->rx_base[idx]; staterr = rxd->status; return (staterr & E1000_RXD_STAT_DD); } for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { rxd = (struct e1000_rx_desc *)&rxr->rx_base[i]; staterr = rxd->status; if ((staterr & E1000_RXD_STAT_DD) == 0) break; if (++i == scctx->isc_nrxd[0]) i = 0; if (staterr & E1000_RXD_STAT_EOP) cnt++; } return (cnt); } static int em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; union e1000_rx_desc_extended *rxd; u32 staterr = 0; int cnt, i; if (budget == 1) { rxd = &rxr->rx_base[idx]; staterr = le32toh(rxd->wb.upper.status_error); return (staterr & E1000_RXD_STAT_DD); } for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { rxd = &rxr->rx_base[i]; staterr = le32toh(rxd->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; if (++i == scctx->isc_nrxd[0]) { i = 0; } if (staterr & E1000_RXD_STAT_EOP) cnt++; } return (cnt); } static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; struct e1000_rx_desc *rxd; u16 len; u32 status, errors; bool eop; int i, cidx; status = errors = i = 0; cidx = ri->iri_cidx; do { rxd = (struct e1000_rx_desc *)&rxr->rx_base[cidx]; status = rxd->status; errors = rxd->errors; /* Error Checking then decrement count */ MPASS ((status & E1000_RXD_STAT_DD) != 0); len = le16toh(rxd->length); ri->iri_len += len; eop = (status & E1000_RXD_STAT_EOP) != 0; /* Make sure bad packets are discarded */ if (errors & E1000_RXD_ERR_FRAME_ERR_MASK) { adapter->dropped_pkts++; /* XXX fixup if common */ return (EBADMSG); } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; /* Zero out the receive descriptors status. */ rxd->status = 0; if (++cidx == scctx->isc_nrxd[0]) cidx = 0; i++; } while (!eop); /* XXX add a faster way to look this up */ if (adapter->hw.mac.type >= e1000_82543 && !(status & E1000_RXD_STAT_IXSM)) lem_receive_checksum(status, errors, ri); if (status & E1000_RXD_STAT_VP) { ri->iri_vtag = le16toh(rxd->special); ri->iri_flags |= M_VLANTAG; } ri->iri_nfrags = i; return (0); } static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; union e1000_rx_desc_extended *rxd; u16 len; u32 pkt_info; u32 staterr = 0; bool eop; int i, cidx, vtag; i = vtag = 0; cidx = ri->iri_cidx; do { rxd = &rxr->rx_base[cidx]; staterr = le32toh(rxd->wb.upper.status_error); pkt_info = le32toh(rxd->wb.lower.mrq); /* Error Checking then decrement count */ MPASS ((staterr & E1000_RXD_STAT_DD) != 0); len = le16toh(rxd->wb.upper.length); ri->iri_len += len; eop = (staterr & E1000_RXD_STAT_EOP) != 0; /* Make sure bad packets are discarded */ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { adapter->dropped_pkts++; return EBADMSG; } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; /* Zero out the receive descriptors status. */ rxd->wb.upper.status_error &= htole32(~0xFF); if (++cidx == scctx->isc_nrxd[0]) cidx = 0; i++; } while (!eop); /* XXX add a faster way to look this up */ if (adapter->hw.mac.type >= e1000_82543) em_receive_checksum(staterr, ri); if (staterr & E1000_RXD_STAT_VP) { vtag = le16toh(rxd->wb.upper.vlan); } ri->iri_vtag = vtag; if (vtag) ri->iri_flags |= M_VLANTAG; ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss); ri->iri_rsstype = em_determine_rsstype(pkt_info); ri->iri_nfrags = i; return (0); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri) { /* Did it pass? */ if (status & E1000_RXD_STAT_IPCS && !(errors & E1000_RXD_ERR_IPE)) ri->iri_csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID); if (status & E1000_RXD_STAT_TCPCS) { /* Did it pass? */ if (!(errors & E1000_RXD_ERR_TCPE)) { ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); ri->iri_csum_data = htons(0xffff); } } } /******************************************************************** * * Parse the packet type to determine the appropriate hash * ******************************************************************/ static int em_determine_rsstype(u32 pkt_info) { switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { case E1000_RXDADV_RSSTYPE_IPV4_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case E1000_RXDADV_RSSTYPE_IPV4: return M_HASHTYPE_RSS_IPV4; case E1000_RXDADV_RSSTYPE_IPV6_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case E1000_RXDADV_RSSTYPE_IPV6_EX: return M_HASHTYPE_RSS_IPV6_EX; case E1000_RXDADV_RSSTYPE_IPV6: return M_HASHTYPE_RSS_IPV6; case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: return M_HASHTYPE_RSS_TCP_IPV6_EX; default: return M_HASHTYPE_OPAQUE; } } static void em_receive_checksum(uint32_t status, if_rxd_info_t ri) { ri->iri_csum_flags = 0; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return; /* If the IP checksum exists and there is no IP Checksum error */ if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == E1000_RXD_STAT_IPCS) { ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } /* TCP or UDP checksum */ if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == E1000_RXD_STAT_TCPCS) { ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); ri->iri_csum_data = htons(0xffff); } if (status & E1000_RXD_STAT_UDPCS) { ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); ri->iri_csum_data = htons(0xffff); } } Index: head/sys/dev/e1000/igb_txrx.c =================================================================== --- head/sys/dev/e1000/igb_txrx.c (revision 332408) +++ head/sys/dev/e1000/igb_txrx.c (revision 332409) @@ -1,584 +1,584 @@ /*- * Copyright (c) 2016 Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "if_em.h" #ifdef RSS #include #include #endif #ifdef VERBOSE_DEBUG #define DPRINTF device_printf #else #define DPRINTF(...) #endif /********************************************************************* * Local Function prototypes *********************************************************************/ static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi); static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru); static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx); static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget); static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status); static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status); static void igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype); static int igb_determine_rsstype(u16 pkt_info); extern void igb_if_enable_intr(if_ctx_t ctx); extern int em_intr(void *arg); struct if_txrx igb_txrx = { - igb_isc_txd_encap, - igb_isc_txd_flush, - igb_isc_txd_credits_update, - igb_isc_rxd_available, - igb_isc_rxd_pkt_get, - igb_isc_rxd_refill, - igb_isc_rxd_flush, - em_intr + .ift_txd_encap = igb_isc_txd_encap, + .ift_txd_flush = igb_isc_txd_flush, + .ift_txd_credits_update = igb_isc_txd_credits_update, + .ift_rxd_available = igb_isc_rxd_available, + .ift_rxd_pkt_get = igb_isc_rxd_pkt_get, + .ift_rxd_refill = igb_isc_rxd_refill, + .ift_rxd_flush = igb_isc_rxd_flush, + .ift_legacy_intr = em_intr }; extern if_shared_ctx_t em_sctx; /********************************************************************** * * Setup work for hardware segmentation offload (TSO) on * adapters using advanced tx descriptors * **********************************************************************/ static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status) { struct e1000_adv_tx_context_desc *TXD; struct adapter *adapter = txr->adapter; u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 paylen; switch(pi->ipi_etype) { case ETHERTYPE_IPV6: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; case ETHERTYPE_IP: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; /* Tell transmit desc to also do IPv4 checksum. */ *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; break; default: panic("%s: CSUM_TSO but no supported IP version (0x%04x)", __func__, ntohs(pi->ipi_etype)); break; } TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; /* This is used in the transmit desc in encap */ paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; /* VLAN MACLEN IPLEN */ if (pi->ipi_mflags & M_VLANTAG) { vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); } vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= pi->ipi_ip_hlen; TXD->vlan_macip_lens = htole32(vlan_macip_lens); /* ADV DTYPE TUCMD */ type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); /* MSS L4LEN IDX */ mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx |= txr->me << 4; TXD->mss_l4len_idx = htole32(mss_l4len_idx); TXD->seqnum_seed = htole32(0); *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; return (1); } /********************************************************************* * * Advanced Context Descriptor setup for VLAN, CSUM or TSO * **********************************************************************/ static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status) { struct e1000_adv_tx_context_desc *TXD; struct adapter *adapter = txr->adapter; u32 vlan_macip_lens, type_tucmd_mlhl; u32 mss_l4len_idx; mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; int offload = TRUE; /* First check if TSO is to be used */ if (pi->ipi_csum_flags & CSUM_TSO) return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status)); /* Indicate the whole packet as payload when not doing TSO */ *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT; /* Now ready a context descriptor */ TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx]; /* ** In advanced descriptors the vlan tag must ** be placed into the context descriptor. Hence ** we need to make one even if not doing offloads. */ if (pi->ipi_mflags & M_VLANTAG) { vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT); } else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) { return (0); } /* Set the ether header length */ vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; switch(pi->ipi_etype) { case ETHERTYPE_IP: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; break; case ETHERTYPE_IPV6: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; default: offload = FALSE; break; } vlan_macip_lens |= pi->ipi_ip_hlen; type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; switch (pi->ipi_ipproto) { case IPPROTO_TCP: if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; break; case IPPROTO_UDP: if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; break; case IPPROTO_SCTP: if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; default: offload = FALSE; break; } if (offload) /* For the TX descriptor setup */ *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx = txr->me << 4; /* Now copy bits into descriptor */ TXD->vlan_macip_lens = htole32(vlan_macip_lens); TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); TXD->seqnum_seed = htole32(0); TXD->mss_l4len_idx = htole32(mss_l4len_idx); return (1); } static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; int nsegs = pi->ipi_nsegs; bus_dma_segment_t *segs = pi->ipi_segs; union e1000_adv_tx_desc *txd = NULL; int i, j, pidx_last; u32 olinfo_status, cmd_type_len, txd_flags; qidx_t ntxd; pidx_last = olinfo_status = 0; /* Basic descriptor defines */ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); if (pi->ipi_mflags & M_VLANTAG) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; i = pi->ipi_pidx; ntxd = scctx->isc_ntxd[0]; txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0; /* Consume the first descriptor */ i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status); if (i == scctx->isc_ntxd[0]) i = 0; /* 82575 needs the queue index added */ if (sc->hw.mac.type == e1000_82575) olinfo_status |= txr->me << 4; for (j = 0; j < nsegs; j++) { bus_size_t seglen; bus_addr_t segaddr; txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; seglen = segs[j].ds_len; segaddr = htole64(segs[j].ds_addr); txd->read.buffer_addr = segaddr; txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | cmd_type_len | seglen); txd->read.olinfo_status = htole32(olinfo_status); pidx_last = i; if (++i == scctx->isc_ntxd[0]) { i = 0; } } if (txd_flags) { txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1); MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); } txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags); pi->ipi_new_pidx = i; return (0); } static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) { struct adapter *adapter = arg; struct em_tx_queue *que = &adapter->tx_queues[txqid]; struct tx_ring *txr = &que->txr; E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx); } static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[txqid]; struct tx_ring *txr = &que->txr; qidx_t processed = 0; int updated; qidx_t cur, prev, ntxd, rs_cidx; int32_t delta; uint8_t status; rs_cidx = txr->tx_rs_cidx; if (rs_cidx == txr->tx_rs_pidx) return (0); cur = txr->tx_rsq[rs_cidx]; status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; updated = !!(status & E1000_TXD_STAT_DD); if (!clear || !updated) return (updated); prev = txr->tx_cidx_processed; ntxd = scctx->isc_ntxd[0]; do { delta = (int32_t)cur - (int32_t)prev; MPASS(prev == 0 || delta != 0); if (delta < 0) delta += ntxd; processed += delta; prev = cur; rs_cidx = (rs_cidx + 1) & (ntxd-1); if (rs_cidx == txr->tx_rs_pidx) break; cur = txr->tx_rsq[rs_cidx]; status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status; } while ((status & E1000_TXD_STAT_DD)); txr->tx_rs_cidx = rs_cidx; txr->tx_cidx_processed = prev; return (processed); } static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; uint16_t rxqid = iru->iru_qsidx; struct em_rx_queue *que = &sc->rx_queues[rxqid]; union e1000_adv_rx_desc *rxd; struct rx_ring *rxr = &que->rxr; uint64_t *paddrs; uint32_t next_pidx, pidx; uint16_t count; int i; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; for (i = 0, next_pidx = pidx; i < count; i++) { rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx]; rxd->read.pkt_addr = htole64(paddrs[i]); if (++next_pidx == scctx->isc_nrxd[0]) next_pidx = 0; } } static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) { struct adapter *sc = arg; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); } static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; union e1000_adv_rx_desc *rxd; u32 staterr = 0; int cnt, i, iter; if (budget == 1) { rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[idx]; staterr = le32toh(rxd->wb.upper.status_error); return (staterr & E1000_RXD_STAT_DD); } for (iter = cnt = 0, i = idx; iter < scctx->isc_nrxd[0] && iter <= budget;) { rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i]; staterr = le32toh(rxd->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; if (++i == scctx->isc_nrxd[0]) { i = 0; } if (staterr & E1000_RXD_STAT_EOP) cnt++; iter++; } return (cnt); } /**************************************************************** * Routine sends data which has been dma'ed into host memory * to upper layer. Initialize ri structure. * * Returns 0 upon success, errno on failure ***************************************************************/ static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = iflib_get_ifp(adapter->ctx); union e1000_adv_rx_desc *rxd; u16 pkt_info, len; u16 vtag = 0; u32 ptype; u32 staterr = 0; bool eop; int i = 0; int cidx = ri->iri_cidx; do { rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx]; staterr = le32toh(rxd->wb.upper.status_error); pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); MPASS ((staterr & E1000_RXD_STAT_DD) != 0); len = le16toh(rxd->wb.upper.length); ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; ri->iri_len += len; rxr->rx_bytes += ri->iri_len; rxd->wb.upper.status_error = 0; eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); if (((adapter->hw.mac.type == e1000_i350) || (adapter->hw.mac.type == e1000_i354)) && (staterr & E1000_RXDEXT_STATERR_LB)) vtag = be16toh(rxd->wb.upper.vlan); else vtag = le16toh(rxd->wb.upper.vlan); /* Make sure bad packets are discarded */ if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { adapter->dropped_pkts++; ++rxr->rx_discarded; return (EBADMSG); } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; if (++cidx == scctx->isc_nrxd[0]) cidx = 0; #ifdef notyet if (rxr->hdr_split == TRUE) { ri->iri_frags[i].irf_flid = 1; ri->iri_frags[i].irf_idx = cidx; if (++cidx == scctx->isc_nrxd[0]) cidx = 0; } #endif i++; } while (!eop); rxr->rx_packets++; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) igb_rx_checksum(staterr, ri, ptype); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (staterr & E1000_RXD_STAT_VP) != 0) { ri->iri_vtag = vtag; ri->iri_flags |= M_VLANTAG; } ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss); ri->iri_rsstype = igb_determine_rsstype(pkt_info); ri->iri_nfrags = i; return (0); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype) { u16 status = (u16)staterr; u8 errors = (u8) (staterr >> 24); bool sctp = FALSE; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) { ri->iri_csum_flags = 0; return; } if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) sctp = 1; else sctp = 0; if (status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ ri->iri_csum_flags = CSUM_IP_CHECKED; ri->iri_csum_flags |= CSUM_IP_VALID; } else ri->iri_csum_flags = 0; } if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); if (sctp) /* reassign */ type = CSUM_SCTP_VALID; /* Did it pass? */ if (!(errors & E1000_RXD_ERR_TCPE)) { ri->iri_csum_flags |= type; if (sctp == 0) ri->iri_csum_data = htons(0xffff); } } return; } /******************************************************************** * * Parse the packet type to determine the appropriate hash * ******************************************************************/ static int igb_determine_rsstype(u16 pkt_info) { switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) { case E1000_RXDADV_RSSTYPE_IPV4_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case E1000_RXDADV_RSSTYPE_IPV4: return M_HASHTYPE_RSS_IPV4; case E1000_RXDADV_RSSTYPE_IPV6_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case E1000_RXDADV_RSSTYPE_IPV6_EX: return M_HASHTYPE_RSS_IPV6_EX; case E1000_RXDADV_RSSTYPE_IPV6: return M_HASHTYPE_RSS_IPV6; case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX: return M_HASHTYPE_RSS_TCP_IPV6_EX; default: return M_HASHTYPE_OPAQUE; } } Index: head/sys/dev/ixgbe/ix_txrx.c =================================================================== --- head/sys/dev/ixgbe/ix_txrx.c (revision 332408) +++ head/sys/dev/ixgbe/ix_txrx.c (revision 332409) @@ -1,546 +1,546 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef IXGBE_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #endif #include "ixgbe.h" /************************************************************************ * Local Function prototypes ************************************************************************/ static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi); static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru); static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx); static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget); static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype); static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t); extern void ixgbe_if_enable_intr(if_ctx_t ctx); static int ixgbe_determine_rsstype(u16 pkt_info); struct if_txrx ixgbe_txrx = { - ixgbe_isc_txd_encap, - ixgbe_isc_txd_flush, - ixgbe_isc_txd_credits_update, - ixgbe_isc_rxd_available, - ixgbe_isc_rxd_pkt_get, - ixgbe_isc_rxd_refill, - ixgbe_isc_rxd_flush, - NULL + .ift_txd_encap = ixgbe_isc_txd_encap, + .ift_txd_flush = ixgbe_isc_txd_flush, + .ift_txd_credits_update = ixgbe_isc_txd_credits_update, + .ift_rxd_available = ixgbe_isc_rxd_available, + .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get, + .ift_rxd_refill = ixgbe_isc_rxd_refill, + .ift_rxd_flush = ixgbe_isc_rxd_flush, + .ift_legacy_intr = NULL }; extern if_shared_ctx_t ixgbe_sctx; /************************************************************************ * ixgbe_tx_ctx_setup * * Advanced Context Descriptor setup for VLAN, CSUM or TSO * ************************************************************************/ static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi) { u32 vlan_macip_lens, type_tucmd_mlhl; u32 olinfo_status, mss_l4len_idx, pktlen, offload; u8 ehdrlen; offload = TRUE; olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; /* VLAN MACLEN IPLEN */ vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT); /* * Some of our VF devices need a context descriptor for every * packet. That means the ehdrlen needs to be non-zero in order * for the host driver not to flag a malicious event. The stack * will most likely populate this for all other reasons of why * this function was called. */ if (pi->ipi_ehdrlen == 0) { ehdrlen = ETHER_HDR_LEN; ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0; } else ehdrlen = pi->ipi_ehdrlen; vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; pktlen = pi->ipi_len; /* First check if TSO is to be used */ if (pi->ipi_csum_flags & CSUM_TSO) { /* This is used in the transmit desc in encap */ pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); } olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT; if (pi->ipi_flags & IPI_TX_IPV4) { type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; /* Tell transmit desc to also do IPv4 checksum. */ if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; } else if (pi->ipi_flags & IPI_TX_IPV6) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; else offload = FALSE; vlan_macip_lens |= pi->ipi_ip_hlen; switch (pi->ipi_ipproto) { case IPPROTO_TCP: if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; else offload = FALSE; break; case IPPROTO_UDP: if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; else offload = FALSE; break; case IPPROTO_SCTP: if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; else offload = FALSE; break; default: offload = FALSE; break; } /* Insert L4 checksum into data descriptors */ if (offload) olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; /* Now copy bits into descriptor */ TXD->vlan_macip_lens = htole32(vlan_macip_lens); TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); TXD->seqnum_seed = htole32(0); TXD->mss_l4len_idx = htole32(mss_l4len_idx); return (olinfo_status); } /* ixgbe_tx_ctx_setup */ /************************************************************************ * ixgbe_isc_txd_encap ************************************************************************/ static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; int nsegs = pi->ipi_nsegs; bus_dma_segment_t *segs = pi->ipi_segs; union ixgbe_adv_tx_desc *txd = NULL; struct ixgbe_adv_tx_context_desc *TXD; int i, j, first, pidx_last; u32 olinfo_status, cmd, flags; qidx_t ntxd; cmd = (IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); if (pi->ipi_mflags & M_VLANTAG) cmd |= IXGBE_ADVTXD_DCMD_VLE; i = first = pi->ipi_pidx; flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0; ntxd = scctx->isc_ntxd[0]; TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first]; if ((pi->ipi_csum_flags & CSUM_OFFLOAD) || (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) || pi->ipi_vtag) { /********************************************* * Set up the appropriate offload context * this will consume the first descriptor *********************************************/ olinfo_status = ixgbe_tx_ctx_setup(TXD, pi); if (pi->ipi_csum_flags & CSUM_TSO) { cmd |= IXGBE_ADVTXD_DCMD_TSE; ++txr->tso_tx; } if (++i == scctx->isc_ntxd[0]) i = 0; } else { /* Indicate the whole packet as payload when not doing TSO */ olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT; } olinfo_status |= IXGBE_ADVTXD_CC; for (j = 0; j < nsegs; j++) { bus_size_t seglen; txd = &txr->tx_base[i]; seglen = segs[j].ds_len; txd->read.buffer_addr = htole64(segs[j].ds_addr); txd->read.cmd_type_len = htole32(cmd | seglen); txd->read.olinfo_status = htole32(olinfo_status); pidx_last = i; if (++i == scctx->isc_ntxd[0]) { i = 0; } } if (flags) { txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1); } txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags); txr->bytes += pi->ipi_len; pi->ipi_new_pidx = i; ++txr->total_packets; return (0); } /* ixgbe_isc_txd_encap */ /************************************************************************ * ixgbe_isc_txd_flush ************************************************************************/ static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) { struct adapter *sc = arg; struct ix_tx_queue *que = &sc->tx_queues[txqid]; struct tx_ring *txr = &que->txr; IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx); } /* ixgbe_isc_txd_flush */ /************************************************************************ * ixgbe_isc_txd_credits_update ************************************************************************/ static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct ix_tx_queue *que = &sc->tx_queues[txqid]; struct tx_ring *txr = &que->txr; qidx_t processed = 0; int updated; qidx_t cur, prev, ntxd, rs_cidx; int32_t delta; uint8_t status; rs_cidx = txr->tx_rs_cidx; if (rs_cidx == txr->tx_rs_pidx) return (0); cur = txr->tx_rsq[rs_cidx]; status = txr->tx_base[cur].wb.status; updated = !!(status & IXGBE_TXD_STAT_DD); if (clear == false || updated == 0) return (updated); prev = txr->tx_cidx_processed; ntxd = scctx->isc_ntxd[0]; do { delta = (int32_t)cur - (int32_t)prev; if (delta < 0) delta += ntxd; processed += delta; prev = cur; rs_cidx = (rs_cidx + 1) & (ntxd - 1); if (rs_cidx == txr->tx_rs_pidx) break; cur = txr->tx_rsq[rs_cidx]; status = txr->tx_base[cur].wb.status; } while ((status & IXGBE_TXD_STAT_DD)); txr->tx_rs_cidx = rs_cidx; txr->tx_cidx_processed = prev; return (processed); } /* ixgbe_isc_txd_credits_update */ /************************************************************************ * ixgbe_isc_rxd_refill ************************************************************************/ static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru) { struct adapter *sc = arg; struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx]; struct rx_ring *rxr = &que->rxr; uint64_t *paddrs; int i; uint32_t next_pidx, pidx; uint16_t count; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; for (i = 0, next_pidx = pidx; i < count; i++) { rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]); if (++next_pidx == sc->shared->isc_nrxd[0]) next_pidx = 0; } } /* ixgbe_isc_rxd_refill */ /************************************************************************ * ixgbe_isc_rxd_flush ************************************************************************/ static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx) { struct adapter *sc = arg; struct ix_rx_queue *que = &sc->rx_queues[qsidx]; struct rx_ring *rxr = &que->rxr; IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx); } /* ixgbe_isc_rxd_flush */ /************************************************************************ * ixgbe_isc_rxd_available ************************************************************************/ static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget) { struct adapter *sc = arg; struct ix_rx_queue *que = &sc->rx_queues[qsidx]; struct rx_ring *rxr = &que->rxr; union ixgbe_adv_rx_desc *rxd; u32 staterr; int cnt, i, nrxd; if (budget == 1) { rxd = &rxr->rx_base[pidx]; staterr = le32toh(rxd->wb.upper.status_error); return (staterr & IXGBE_RXD_STAT_DD); } nrxd = sc->shared->isc_nrxd[0]; // em has cnt < nrxd. off by 1 here or there? // for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) { for (cnt = 0, i = pidx; cnt < nrxd-1 && cnt <= budget;) { rxd = &rxr->rx_base[i]; staterr = le32toh(rxd->wb.upper.status_error); if ((staterr & IXGBE_RXD_STAT_DD) == 0) break; if (++i == nrxd) i = 0; if (staterr & IXGBE_RXD_STAT_EOP) cnt++; } return (cnt); } /* ixgbe_isc_rxd_available */ /************************************************************************ * ixgbe_isc_rxd_pkt_get * * Routine sends data which has been dma'ed into host memory * to upper layer. Initialize ri structure. * * Returns 0 upon success, errno on failure ************************************************************************/ static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; struct ix_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = iflib_get_ifp(adapter->ctx); union ixgbe_adv_rx_desc *rxd; u16 pkt_info, len, cidx, i; u16 vtag = 0; u32 ptype; u32 staterr = 0; bool eop; i = 0; cidx = ri->iri_cidx; do { rxd = &rxr->rx_base[cidx]; staterr = le32toh(rxd->wb.upper.status_error); pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); /* Error Checking then decrement count */ MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0); len = le16toh(rxd->wb.upper.length); ptype = le32toh(rxd->wb.lower.lo_dword.data) & IXGBE_RXDADV_PKTTYPE_MASK; ri->iri_len += len; rxr->bytes += len; rxd->wb.upper.status_error = 0; eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); if (staterr & IXGBE_RXD_STAT_VP) { vtag = le16toh(rxd->wb.upper.vlan); } else { vtag = 0; } /* Make sure bad packets are discarded */ if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { #if __FreeBSD_version >= 1100036 if (adapter->feat_en & IXGBE_FEATURE_VF) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #endif rxr->rx_discarded++; return (EBADMSG); } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; if (++cidx == adapter->shared->isc_nrxd[0]) cidx = 0; i++; /* even a 16K packet shouldn't consume more than 8 clusters */ MPASS(i < 9); } while (!eop); rxr->rx_packets++; rxr->packets++; rxr->rx_bytes += ri->iri_len; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) ixgbe_rx_checksum(staterr, ri, ptype); ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss); ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info); ri->iri_vtag = vtag; ri->iri_nfrags = i; if (vtag) ri->iri_flags |= M_VLANTAG; return (0); } /* ixgbe_isc_rxd_pkt_get */ /************************************************************************ * ixgbe_rx_checksum * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. ************************************************************************/ static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype) { u16 status = (u16)staterr; u8 errors = (u8)(staterr >> 24); bool sctp = false; if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) sctp = TRUE; /* IPv4 checksum */ if (status & IXGBE_RXD_STAT_IPCS) { if (!(errors & IXGBE_RXD_ERR_IPE)) { /* IP Checksum Good */ ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; } else ri->iri_csum_flags = 0; } /* TCP/UDP/SCTP checksum */ if (status & IXGBE_RXD_STAT_L4CS) { u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); #if __FreeBSD_version >= 800000 if (sctp) type = CSUM_SCTP_VALID; #endif if (!(errors & IXGBE_RXD_ERR_TCPE)) { ri->iri_csum_flags |= type; if (!sctp) ri->iri_csum_data = htons(0xffff); } } } /* ixgbe_rx_checksum */ /************************************************************************ * ixgbe_determine_rsstype * * Parse the packet type to determine the appropriate hash ************************************************************************/ static int ixgbe_determine_rsstype(u16 pkt_info) { switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) { case IXGBE_RXDADV_RSSTYPE_IPV4_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case IXGBE_RXDADV_RSSTYPE_IPV4: return M_HASHTYPE_RSS_IPV4; case IXGBE_RXDADV_RSSTYPE_IPV6_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case IXGBE_RXDADV_RSSTYPE_IPV6_EX: return M_HASHTYPE_RSS_IPV6_EX; case IXGBE_RXDADV_RSSTYPE_IPV6: return M_HASHTYPE_RSS_IPV6; case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX: return M_HASHTYPE_RSS_TCP_IPV6_EX; case IXGBE_RXDADV_RSSTYPE_IPV4_UDP: return M_HASHTYPE_RSS_UDP_IPV4; case IXGBE_RXDADV_RSSTYPE_IPV6_UDP: return M_HASHTYPE_RSS_UDP_IPV6; case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX: return M_HASHTYPE_RSS_UDP_IPV6_EX; default: return M_HASHTYPE_OPAQUE; } } /* ixgbe_determine_rsstype */