Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/cxgbe/t4_sge.c
Show First 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/socketvar.h> | #include <sys/socketvar.h> | ||||
#include <sys/counter.h> | #include <sys/counter.h> | ||||
#include <net/bpf.h> | #include <net/bpf.h> | ||||
#include <net/ethernet.h> | #include <net/ethernet.h> | ||||
#include <net/if.h> | #include <net/if.h> | ||||
#include <net/if_vlan_var.h> | #include <net/if_vlan_var.h> | ||||
#include <net/if_vxlan.h> | |||||
#include <netinet/in.h> | #include <netinet/in.h> | ||||
#include <netinet/ip.h> | #include <netinet/ip.h> | ||||
#include <netinet/ip6.h> | #include <netinet/ip6.h> | ||||
#include <netinet/tcp.h> | #include <netinet/tcp.h> | ||||
#include <netinet/udp.h> | #include <netinet/udp.h> | ||||
#include <machine/in_cksum.h> | #include <machine/in_cksum.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
▲ Show 20 Lines • Show All 195 Lines • ▼ Show 20 Lines | |||||
static int refill_fl(struct adapter *, struct sge_fl *, int); | static int refill_fl(struct adapter *, struct sge_fl *, int); | ||||
static void refill_sfl(void *); | static void refill_sfl(void *); | ||||
static int alloc_fl_sdesc(struct sge_fl *); | static int alloc_fl_sdesc(struct sge_fl *); | ||||
static void free_fl_sdesc(struct adapter *, struct sge_fl *); | static void free_fl_sdesc(struct adapter *, struct sge_fl *); | ||||
static int find_refill_source(struct adapter *, int, bool); | static int find_refill_source(struct adapter *, int, bool); | ||||
static void add_fl_to_sfl(struct adapter *, struct sge_fl *); | static void add_fl_to_sfl(struct adapter *, struct sge_fl *); | ||||
static inline void get_pkt_gl(struct mbuf *, struct sglist *); | static inline void get_pkt_gl(struct mbuf *, struct sglist *); | ||||
static inline u_int txpkt_len16(u_int, u_int); | static inline u_int txpkt_len16(u_int, const u_int); | ||||
static inline u_int txpkt_vm_len16(u_int, u_int); | static inline u_int txpkt_vm_len16(u_int, const u_int); | ||||
static inline void calculate_mbuf_len16(struct adapter *, struct mbuf *); | |||||
static inline u_int txpkts0_len16(u_int); | static inline u_int txpkts0_len16(u_int); | ||||
static inline u_int txpkts1_len16(void); | static inline u_int txpkts1_len16(void); | ||||
static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); | static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); | ||||
static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, | static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, | ||||
u_int); | u_int); | ||||
static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, | static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, | ||||
struct mbuf *); | struct mbuf *); | ||||
static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, | static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, | ||||
▲ Show 20 Lines • Show All 1,627 Lines • ▼ Show 20 Lines | eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d, | ||||
struct mbuf *m0; | struct mbuf *m0; | ||||
struct ifnet *ifp = rxq->ifp; | struct ifnet *ifp = rxq->ifp; | ||||
struct sge_fl *fl = &rxq->fl; | struct sge_fl *fl = &rxq->fl; | ||||
struct vi_info *vi = ifp->if_softc; | struct vi_info *vi = ifp->if_softc; | ||||
const struct cpl_rx_pkt *cpl; | const struct cpl_rx_pkt *cpl; | ||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
struct lro_ctrl *lro = &rxq->lro; | struct lro_ctrl *lro = &rxq->lro; | ||||
#endif | #endif | ||||
uint16_t err_vec, tnl_type, tnlhdr_len; | |||||
static const int sw_hashtype[4][2] = { | static const int sw_hashtype[4][2] = { | ||||
{M_HASHTYPE_NONE, M_HASHTYPE_NONE}, | {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, | ||||
{M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, | {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, | ||||
{M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, | {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, | ||||
{M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, | {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, | ||||
}; | }; | ||||
MPASS(plen > sc->params.sge.fl_pktshift); | MPASS(plen > sc->params.sge.fl_pktshift); | ||||
Show All 30 Lines | #endif | ||||
m0->m_data += sc->params.sge.fl_pktshift; | m0->m_data += sc->params.sge.fl_pktshift; | ||||
have_mbuf: | have_mbuf: | ||||
m0->m_pkthdr.rcvif = ifp; | m0->m_pkthdr.rcvif = ifp; | ||||
M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); | M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); | ||||
m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); | m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); | ||||
cpl = (const void *)(&d->rss + 1); | cpl = (const void *)(&d->rss + 1); | ||||
if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { | if (sc->params.tp.rx_pkt_encap) { | ||||
if (ifp->if_capenable & IFCAP_RXCSUM && | const uint16_t ev = be16toh(cpl->err_vec); | ||||
cpl->l2info & htobe32(F_RXF_IP)) { | |||||
m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | | |||||
CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | |||||
rxq->rxcsum++; | |||||
} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && | |||||
cpl->l2info & htobe32(F_RXF_IP6)) { | |||||
m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | | |||||
CSUM_PSEUDO_HDR); | |||||
rxq->rxcsum++; | |||||
} | |||||
if (__predict_false(cpl->ip_frag)) | err_vec = G_T6_COMPR_RXERR_VEC(ev); | ||||
tnl_type = G_T6_RX_TNL_TYPE(ev); | |||||
tnlhdr_len = G_T6_RX_TNLHDR_LEN(ev); | |||||
} else { | |||||
err_vec = be16toh(cpl->err_vec); | |||||
tnl_type = 0; | |||||
} | |||||
if (cpl->csum_calc && err_vec == 0 && | |||||
((ifp->if_capenable & IFCAP_RXCSUM && cpl->l2info & htobe32(F_RXF_IP)) || | |||||
(ifp->if_capenable & IFCAP_RXCSUM_IPV6 && cpl->l2info & htobe32(F_RXF_IP6)))) { | |||||
m0->m_pkthdr.csum_data = be16toh(cpl->csum); | m0->m_pkthdr.csum_data = be16toh(cpl->csum); | ||||
else | if (tnl_type == 0) { | ||||
m0->m_pkthdr.csum_flags = CSUM_L3_CALC | CSUM_L3_VALID | | |||||
CSUM_L4_CALC | CSUM_L4_VALID; | |||||
rxq->rxcsum++; | |||||
} else { | |||||
MPASS(tnl_type == RX_PKT_TNL_TYPE_VXLAN); | |||||
if (__predict_false(cpl->ip_frag)) { | |||||
/* | |||||
* csum_data is for the inner frame (which is an | |||||
* IP fragment) and is not 0xffff. There is no | |||||
* way to pass the inner csum_data to the stack. | |||||
* We don't want the stack to use the inner | |||||
* csum_data to validate the outer frame or it | |||||
* will get rejected. So we fix csum_data here | |||||
* and let sw do the checksum of inner IP | |||||
* fragments. | |||||
* | |||||
* XXX: Need 32b for csum_data2 in an rx mbuf. | |||||
* Maybe stuff it into rcv_tstmp? | |||||
*/ | |||||
m0->m_pkthdr.csum_data = 0xffff; | m0->m_pkthdr.csum_data = 0xffff; | ||||
m0->m_pkthdr.csum_flags = CSUM_L3_CALC | | |||||
CSUM_L3_VALID | CSUM_L4_CALC | | |||||
CSUM_L4_VALID; | |||||
} else { | |||||
m0->m_pkthdr.csum_flags = CSUM_ENCAP_VXLAN | | |||||
CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | | |||||
CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID | | |||||
CSUM_L3_CALC | CSUM_L3_VALID | | |||||
CSUM_L4_CALC | CSUM_L4_VALID; | |||||
MPASS(m0->m_pkthdr.csum_data == 0xffff); | |||||
} | } | ||||
rxq->vxlan_rxcsum++; | |||||
} | |||||
} | |||||
if (cpl->vlan_ex) { | if (cpl->vlan_ex) { | ||||
m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); | m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); | ||||
m0->m_flags |= M_VLANTAG; | m0->m_flags |= M_VLANTAG; | ||||
rxq->vlan_extraction++; | rxq->vlan_extraction++; | ||||
} | } | ||||
if (rxq->iq.flags & IQ_RX_TIMESTAMP) { | if (rxq->iq.flags & IQ_RX_TIMESTAMP) { | ||||
Show All 10 Lines | #ifdef notyet | ||||
m0->m_flags |= M_TSTMP; | m0->m_flags |= M_TSTMP; | ||||
#endif | #endif | ||||
} | } | ||||
#ifdef NUMA | #ifdef NUMA | ||||
m0->m_pkthdr.numa_domain = ifp->if_numa_domain; | m0->m_pkthdr.numa_domain = ifp->if_numa_domain; | ||||
#endif | #endif | ||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
if (rxq->iq.flags & IQ_LRO_ENABLED && | if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && | ||||
(M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || | (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || | ||||
M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { | M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { | ||||
if (sort_before_lro(lro)) { | if (sort_before_lro(lro)) { | ||||
tcp_lro_queue_mbuf(lro, m0); | tcp_lro_queue_mbuf(lro, m0); | ||||
return (0); /* queued for sort, then LRO */ | return (0); /* queued for sort, then LRO */ | ||||
} | } | ||||
if (tcp_lro_rx(lro, m0, 0) == 0) | if (tcp_lro_rx(lro, m0, 0) == 0) | ||||
return (0); /* queued for LRO */ | return (0); /* queued for LRO */ | ||||
▲ Show 20 Lines • Show All 154 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
} | } | ||||
static inline int | static inline int | ||||
mbuf_nsegs(struct mbuf *m) | mbuf_nsegs(struct mbuf *m) | ||||
{ | { | ||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
KASSERT(m->m_pkthdr.l5hlen > 0, | KASSERT(m->m_pkthdr.inner_l5hlen > 0, | ||||
("%s: mbuf %p missing information on # of segments.", __func__, m)); | ("%s: mbuf %p missing information on # of segments.", __func__, m)); | ||||
return (m->m_pkthdr.l5hlen); | return (m->m_pkthdr.inner_l5hlen); | ||||
} | } | ||||
static inline void | static inline void | ||||
set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) | set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) | ||||
{ | { | ||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
m->m_pkthdr.l5hlen = nsegs; | m->m_pkthdr.inner_l5hlen = nsegs; | ||||
} | } | ||||
static inline int | static inline int | ||||
mbuf_cflags(struct mbuf *m) | mbuf_cflags(struct mbuf *m) | ||||
{ | { | ||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.PH_loc.eight[4]); | return (m->m_pkthdr.PH_loc.eight[4]); | ||||
▲ Show 20 Lines • Show All 109 Lines • ▼ Show 20 Lines | if (m == NULL) | ||||
return (NULL); | return (NULL); | ||||
m->m_pkthdr.len = len; | m->m_pkthdr.len = len; | ||||
m->m_len = len; | m->m_len = len; | ||||
set_mbuf_cflags(m, MC_RAW_WR); | set_mbuf_cflags(m, MC_RAW_WR); | ||||
set_mbuf_len16(m, howmany(len, 16)); | set_mbuf_len16(m, howmany(len, 16)); | ||||
return (m); | return (m); | ||||
} | } | ||||
static inline int | static inline bool | ||||
needs_hwcsum(struct mbuf *m) | needs_hwcsum(struct mbuf *m) | ||||
{ | { | ||||
const uint32_t csum_flags = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | | |||||
CSUM_IP_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | | |||||
CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_IP6_UDP | | |||||
CSUM_IP6_TCP | CSUM_IP6_TSO | CSUM_INNER_IP6_UDP | | |||||
CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP | | return (m->m_pkthdr.csum_flags & csum_flags); | ||||
CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6)); | |||||
} | } | ||||
static inline int | static inline bool | ||||
needs_tso(struct mbuf *m) | needs_tso(struct mbuf *m) | ||||
{ | { | ||||
const uint32_t csum_flags = CSUM_IP_TSO | CSUM_IP6_TSO | | |||||
CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.csum_flags & CSUM_TSO); | return (m->m_pkthdr.csum_flags & csum_flags); | ||||
} | } | ||||
static inline int | static inline bool | ||||
needs_vxlan_csum(struct mbuf *m) | |||||
{ | |||||
M_ASSERTPKTHDR(m); | |||||
return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); | |||||
} | |||||
static inline bool | |||||
needs_vxlan_tso(struct mbuf *m) | |||||
{ | |||||
const uint32_t csum_flags = CSUM_ENCAP_VXLAN | CSUM_INNER_IP_TSO | | |||||
CSUM_INNER_IP6_TSO; | |||||
M_ASSERTPKTHDR(m); | |||||
return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && | |||||
(m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); | |||||
} | |||||
static inline bool | |||||
needs_inner_tcp_csum(struct mbuf *m) | |||||
{ | |||||
const uint32_t csum_flags = CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; | |||||
M_ASSERTPKTHDR(m); | |||||
return (m->m_pkthdr.csum_flags & csum_flags); | |||||
} | |||||
static inline bool | |||||
needs_l3_csum(struct mbuf *m) | needs_l3_csum(struct mbuf *m) | ||||
{ | { | ||||
const uint32_t csum_flags = CSUM_IP | CSUM_IP_TSO | CSUM_INNER_IP | | |||||
CSUM_INNER_IP_TSO; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)); | return (m->m_pkthdr.csum_flags & csum_flags); | ||||
} | } | ||||
static inline int | static inline bool | ||||
needs_tcp_csum(struct mbuf *m) | needs_outer_tcp_csum(struct mbuf *m) | ||||
{ | { | ||||
const uint32_t csum_flags = CSUM_IP_TCP | CSUM_IP_TSO | CSUM_IP6_TCP | | |||||
CSUM_IP6_TSO; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TCP_IPV6 | CSUM_TSO)); | |||||
return (m->m_pkthdr.csum_flags & csum_flags); | |||||
} | } | ||||
#ifdef RATELIMIT | #ifdef RATELIMIT | ||||
static inline int | static inline bool | ||||
needs_l4_csum(struct mbuf *m) | needs_outer_l4_csum(struct mbuf *m) | ||||
{ | { | ||||
const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_TSO | | |||||
CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_TSO; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | | return (m->m_pkthdr.csum_flags & csum_flags); | ||||
CSUM_TCP_IPV6 | CSUM_TSO)); | |||||
} | } | ||||
static inline int | static inline bool | ||||
needs_udp_csum(struct mbuf *m) | needs_outer_udp_csum(struct mbuf *m) | ||||
{ | { | ||||
const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP6_UDP; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)); | |||||
return (m->m_pkthdr.csum_flags & csum_flags); | |||||
} | } | ||||
#endif | #endif | ||||
static inline int | static inline bool | ||||
needs_vlan_insertion(struct mbuf *m) | needs_vlan_insertion(struct mbuf *m) | ||||
{ | { | ||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
return (m->m_flags & M_VLANTAG); | return (m->m_flags & M_VLANTAG); | ||||
} | } | ||||
kib: Hmm, does it make sense to extract all that helpers into some common infrastructure header ? | |||||
Done Inline ActionsThey could be moved but are all needed in t4_sge.c only. np: They could be moved but are all needed in t4_sge.c only. | |||||
Not Done Inline ActionsI mean, this seems to be useful for other drivers. kib: I mean, this seems to be useful for other drivers. | |||||
static void * | static void * | ||||
m_advance(struct mbuf **pm, int *poffset, int len) | m_advance(struct mbuf **pm, int *poffset, int len) | ||||
{ | { | ||||
struct mbuf *m = *pm; | struct mbuf *m = *pm; | ||||
int offset = *poffset; | int offset = *poffset; | ||||
uintptr_t p = 0; | uintptr_t p = 0; | ||||
▲ Show 20 Lines • Show All 117 Lines • ▼ Show 20 Lines | if (paddr == nextaddr) | ||||
nsegs--; | nsegs--; | ||||
nextaddr = pmap_kextract(va + len - 1) + 1; | nextaddr = pmap_kextract(va + len - 1) + 1; | ||||
} | } | ||||
return (nsegs); | return (nsegs); | ||||
} | } | ||||
/* | /* | ||||
* The maximum number of segments that can fit in a WR. | |||||
*/ | |||||
static int | |||||
max_nsegs_allowed(struct mbuf *m) | |||||
{ | |||||
if (needs_tso(m)) { | |||||
if (needs_vxlan_tso(m)) | |||||
return (TX_SGL_SEGS_VXLAN_TSO); | |||||
else | |||||
return (TX_SGL_SEGS_TSO); | |||||
} | |||||
return (TX_SGL_SEGS); | |||||
} | |||||
/* | |||||
* Analyze the mbuf to determine its tx needs. The mbuf passed in may change: | * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: | ||||
* a) caller can assume it's been freed if this function returns with an error. | * a) caller can assume it's been freed if this function returns with an error. | ||||
* b) it may get defragged up if the gather list is too long for the hardware. | * b) it may get defragged up if the gather list is too long for the hardware. | ||||
*/ | */ | ||||
int | int | ||||
parse_pkt(struct adapter *sc, struct mbuf **mp) | parse_pkt(struct adapter *sc, struct mbuf **mp) | ||||
{ | { | ||||
struct mbuf *m0 = *mp, *m; | struct mbuf *m0 = *mp, *m; | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | if (cst != NULL && cst->type == IF_SND_TAG_TYPE_TLS) { | ||||
rc = t6_ktls_parse_pkt(m0, &nsegs, &len16); | rc = t6_ktls_parse_pkt(m0, &nsegs, &len16); | ||||
if (rc != 0) | if (rc != 0) | ||||
goto fail; | goto fail; | ||||
set_mbuf_nsegs(m0, nsegs); | set_mbuf_nsegs(m0, nsegs); | ||||
set_mbuf_len16(m0, len16); | set_mbuf_len16(m0, len16); | ||||
return (0); | return (0); | ||||
} | } | ||||
#endif | #endif | ||||
if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { | if (nsegs > max_nsegs_allowed(m0)) { | ||||
if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { | if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { | ||||
rc = EFBIG; | rc = EFBIG; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
*mp = m0 = m; /* update caller's copy after defrag */ | *mp = m0 = m; /* update caller's copy after defrag */ | ||||
goto restart; | goto restart; | ||||
} | } | ||||
if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && | if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && | ||||
!(cflags & MC_NOMAP))) { | !(cflags & MC_NOMAP))) { | ||||
m0 = m_pullup(m0, m0->m_pkthdr.len); | m0 = m_pullup(m0, m0->m_pkthdr.len); | ||||
if (m0 == NULL) { | if (m0 == NULL) { | ||||
/* Should have left well enough alone. */ | /* Should have left well enough alone. */ | ||||
rc = EFBIG; | rc = EFBIG; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
*mp = m0; /* update caller's copy after pullup */ | *mp = m0; /* update caller's copy after pullup */ | ||||
goto restart; | goto restart; | ||||
} | } | ||||
set_mbuf_nsegs(m0, nsegs); | set_mbuf_nsegs(m0, nsegs); | ||||
set_mbuf_cflags(m0, cflags); | set_mbuf_cflags(m0, cflags); | ||||
if (sc->flags & IS_VF) | calculate_mbuf_len16(sc, m0); | ||||
set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); | |||||
else | |||||
set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); | |||||
#ifdef RATELIMIT | #ifdef RATELIMIT | ||||
/* | /* | ||||
* Ethofld is limited to TCP and UDP for now, and only when L4 hw | * Ethofld is limited to TCP and UDP for now, and only when L4 hw | ||||
* checksumming is enabled. needs_l4_csum happens to check for all the | * checksumming is enabled. needs_outer_l4_csum happens to check for | ||||
* right things. | * all the right things. | ||||
*/ | */ | ||||
if (__predict_false(needs_eo(cst) && !needs_l4_csum(m0))) { | if (__predict_false(needs_eo(cst) && !needs_outer_l4_csum(m0))) { | ||||
m_snd_tag_rele(m0->m_pkthdr.snd_tag); | m_snd_tag_rele(m0->m_pkthdr.snd_tag); | ||||
m0->m_pkthdr.snd_tag = NULL; | m0->m_pkthdr.snd_tag = NULL; | ||||
m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; | m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; | ||||
cst = NULL; | cst = NULL; | ||||
} | } | ||||
#endif | #endif | ||||
if (!needs_hwcsum(m0) | if (!needs_hwcsum(m0) | ||||
Show All 15 Lines | if (eh_type == ETHERTYPE_VLAN) { | ||||
m0->m_pkthdr.l2hlen = sizeof(*eh); | m0->m_pkthdr.l2hlen = sizeof(*eh); | ||||
offset = 0; | offset = 0; | ||||
l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); | l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); | ||||
switch (eh_type) { | switch (eh_type) { | ||||
#ifdef INET6 | #ifdef INET6 | ||||
case ETHERTYPE_IPV6: | case ETHERTYPE_IPV6: | ||||
m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); | |||||
break; | |||||
#endif | |||||
#ifdef INET | |||||
case ETHERTYPE_IP: | |||||
{ | { | ||||
struct ip6_hdr *ip6 = l3hdr; | struct ip *ip = l3hdr; | ||||
MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); | if (needs_vxlan_csum(m0)) { | ||||
/* Driver will do the outer IP hdr checksum. */ | |||||
ip->ip_sum = 0; | |||||
if (needs_vxlan_tso(m0)) { | |||||
const uint16_t ipl = ip->ip_len; | |||||
m0->m_pkthdr.l3hlen = sizeof(*ip6); | ip->ip_len = 0; | ||||
ip->ip_sum = ~in_cksum_hdr(ip); | |||||
ip->ip_len = ipl; | |||||
} else | |||||
ip->ip_sum = in_cksum_hdr(ip); | |||||
} | |||||
m0->m_pkthdr.l3hlen = ip->ip_hl << 2; | |||||
break; | break; | ||||
} | } | ||||
#endif | #endif | ||||
default: | |||||
panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" | |||||
" with the same INET/INET6 options as the kernel.", | |||||
__func__, eh_type); | |||||
} | |||||
if (needs_vxlan_csum(m0)) { | |||||
m0->m_pkthdr.l4hlen = sizeof(struct udphdr); | |||||
m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); | |||||
/* Inner headers. */ | |||||
eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + | |||||
sizeof(struct udphdr) + sizeof(struct vxlan_header)); | |||||
eh_type = ntohs(eh->ether_type); | |||||
if (eh_type == ETHERTYPE_VLAN) { | |||||
struct ether_vlan_header *evh = (void *)eh; | |||||
eh_type = ntohs(evh->evl_proto); | |||||
m0->m_pkthdr.inner_l2hlen = sizeof(*evh); | |||||
} else | |||||
m0->m_pkthdr.inner_l2hlen = sizeof(*eh); | |||||
l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); | |||||
switch (eh_type) { | |||||
#ifdef INET6 | |||||
case ETHERTYPE_IPV6: | |||||
m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); | |||||
break; | |||||
#endif | |||||
#ifdef INET | #ifdef INET | ||||
case ETHERTYPE_IP: | case ETHERTYPE_IP: | ||||
{ | { | ||||
struct ip *ip = l3hdr; | struct ip *ip = l3hdr; | ||||
m0->m_pkthdr.l3hlen = ip->ip_hl * 4; | m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; | ||||
break; | break; | ||||
} | } | ||||
#endif | #endif | ||||
default: | default: | ||||
panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" | panic("%s: VXLAN hw offload requested with unknown " | ||||
"ethertype 0x%04x. if_cxgbe must be compiled" | |||||
" with the same INET/INET6 options as the kernel.", | " with the same INET/INET6 options as the kernel.", | ||||
__func__, eh_type); | __func__, eh_type); | ||||
} | } | ||||
#if defined(INET) || defined(INET6) | |||||
if (needs_inner_tcp_csum(m0)) { | |||||
tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); | |||||
m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; | |||||
} | |||||
#endif | |||||
MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); | |||||
m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | | |||||
CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | | |||||
CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | | |||||
CSUM_ENCAP_VXLAN; | |||||
} | |||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
if (needs_tcp_csum(m0)) { | if (needs_outer_tcp_csum(m0)) { | ||||
tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); | tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); | ||||
m0->m_pkthdr.l4hlen = tcp->th_off * 4; | m0->m_pkthdr.l4hlen = tcp->th_off * 4; | ||||
#ifdef RATELIMIT | #ifdef RATELIMIT | ||||
if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { | if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { | ||||
set_mbuf_eo_tsclk_tsoff(m0, | set_mbuf_eo_tsclk_tsoff(m0, | ||||
V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | | V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | | ||||
V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); | V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); | ||||
} else | } else | ||||
set_mbuf_eo_tsclk_tsoff(m0, 0); | set_mbuf_eo_tsclk_tsoff(m0, 0); | ||||
} else if (needs_udp_csum(m0)) { | } else if (needs_outer_udp_csum(m0)) { | ||||
m0->m_pkthdr.l4hlen = sizeof(struct udphdr); | m0->m_pkthdr.l4hlen = sizeof(struct udphdr); | ||||
#endif | #endif | ||||
} | } | ||||
#ifdef RATELIMIT | #ifdef RATELIMIT | ||||
if (needs_eo(cst)) { | if (needs_eo(cst)) { | ||||
u_int immhdrs; | u_int immhdrs; | ||||
/* EO WRs have the headers in the WR and not the GL. */ | /* EO WRs have the headers in the WR and not the GL. */ | ||||
▲ Show 20 Lines • Show All 938 Lines • ▼ Show 20 Lines | #if defined(INET) || defined(INET6) | ||||
SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, | SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, | ||||
&rxq->lro.lro_flushed, 0, NULL); | &rxq->lro.lro_flushed, 0, NULL); | ||||
#endif | #endif | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, | ||||
&rxq->rxcsum, "# of times hardware assisted with checksum"); | &rxq->rxcsum, "# of times hardware assisted with checksum"); | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", | ||||
CTLFLAG_RD, &rxq->vlan_extraction, | CTLFLAG_RD, &rxq->vlan_extraction, | ||||
"# of times hardware extracted 802.1Q tag"); | "# of times hardware extracted 802.1Q tag"); | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_rxcsum", | |||||
CTLFLAG_RD, &rxq->vxlan_rxcsum, | |||||
"# of times hardware assisted with inner checksum (VXLAN) "); | |||||
add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); | add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); | ||||
return (rc); | return (rc); | ||||
} | } | ||||
static int | static int | ||||
free_rxq(struct vi_info *vi, struct sge_rxq *rxq) | free_rxq(struct vi_info *vi, struct sge_rxq *rxq) | ||||
▲ Show 20 Lines • Show All 638 Lines • ▼ Show 20 Lines | alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", | ||||
CTLFLAG_RD, &txq->txpkts0_pkts, | CTLFLAG_RD, &txq->txpkts0_pkts, | ||||
"# of frames tx'd using type0 txpkts work requests"); | "# of frames tx'd using type0 txpkts work requests"); | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", | ||||
CTLFLAG_RD, &txq->txpkts1_pkts, | CTLFLAG_RD, &txq->txpkts1_pkts, | ||||
"# of frames tx'd using type1 txpkts work requests"); | "# of frames tx'd using type1 txpkts work requests"); | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, | ||||
&txq->raw_wrs, "# of raw work requests (non-packets)"); | &txq->raw_wrs, "# of raw work requests (non-packets)"); | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_tso_wrs", | |||||
CTLFLAG_RD, &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); | |||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_txcsum", | |||||
CTLFLAG_RD, &txq->vxlan_txcsum, | |||||
"# of times hardware assisted with inner checksums (VXLAN)"); | |||||
#ifdef KERN_TLS | #ifdef KERN_TLS | ||||
if (sc->flags & KERN_TLS_OK) { | if (sc->flags & KERN_TLS_OK) { | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, | ||||
"kern_tls_records", CTLFLAG_RD, &txq->kern_tls_records, | "kern_tls_records", CTLFLAG_RD, &txq->kern_tls_records, | ||||
"# of NIC TLS records transmitted"); | "# of NIC TLS records transmitted"); | ||||
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, | SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, | ||||
"kern_tls_short", CTLFLAG_RD, &txq->kern_tls_short, | "kern_tls_short", CTLFLAG_RD, &txq->kern_tls_short, | ||||
▲ Show 20 Lines • Show All 273 Lines • ▼ Show 20 Lines | get_pkt_gl(struct mbuf *m, struct sglist *gl) | ||||
if (__predict_false(rc != 0)) { | if (__predict_false(rc != 0)) { | ||||
panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " | panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " | ||||
"with %d.", __func__, m, mbuf_nsegs(m), rc); | "with %d.", __func__, m, mbuf_nsegs(m), rc); | ||||
} | } | ||||
KASSERT(gl->sg_nseg == mbuf_nsegs(m), | KASSERT(gl->sg_nseg == mbuf_nsegs(m), | ||||
("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, | ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, | ||||
mbuf_nsegs(m), gl->sg_nseg)); | mbuf_nsegs(m), gl->sg_nseg)); | ||||
KASSERT(gl->sg_nseg > 0 && | KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m), | ||||
gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), | |||||
("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, | ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, | ||||
gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); | gl->sg_nseg, max_nsegs_allowed(m))); | ||||
} | } | ||||
/* | /* | ||||
* len16 for a txpkt WR with a GL. Includes the firmware work request header. | * len16 for a txpkt WR with a GL. Includes the firmware work request header. | ||||
*/ | */ | ||||
static inline u_int | static inline u_int | ||||
txpkt_len16(u_int nsegs, u_int tso) | txpkt_len16(u_int nsegs, const u_int extra) | ||||
{ | { | ||||
u_int n; | u_int n; | ||||
MPASS(nsegs > 0); | MPASS(nsegs > 0); | ||||
nsegs--; /* first segment is part of ulptx_sgl */ | nsegs--; /* first segment is part of ulptx_sgl */ | ||||
n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + | n = extra + sizeof(struct fw_eth_tx_pkt_wr) + | ||||
sizeof(struct cpl_tx_pkt_core) + | |||||
sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); | sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); | ||||
if (tso) | |||||
n += sizeof(struct cpl_tx_pkt_lso_core); | |||||
return (howmany(n, 16)); | return (howmany(n, 16)); | ||||
} | } | ||||
/* | /* | ||||
* len16 for a txpkt_vm WR with a GL. Includes the firmware work | * len16 for a txpkt_vm WR with a GL. Includes the firmware work | ||||
* request header. | * request header. | ||||
*/ | */ | ||||
static inline u_int | static inline u_int | ||||
txpkt_vm_len16(u_int nsegs, u_int tso) | txpkt_vm_len16(u_int nsegs, const u_int extra) | ||||
{ | { | ||||
u_int n; | u_int n; | ||||
MPASS(nsegs > 0); | MPASS(nsegs > 0); | ||||
nsegs--; /* first segment is part of ulptx_sgl */ | nsegs--; /* first segment is part of ulptx_sgl */ | ||||
n = sizeof(struct fw_eth_tx_pkt_vm_wr) + | n = extra + sizeof(struct fw_eth_tx_pkt_vm_wr) + | ||||
sizeof(struct cpl_tx_pkt_core) + | sizeof(struct cpl_tx_pkt_core) + | ||||
sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); | sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); | ||||
if (tso) | |||||
n += sizeof(struct cpl_tx_pkt_lso_core); | |||||
return (howmany(n, 16)); | return (howmany(n, 16)); | ||||
} | } | ||||
static inline void | |||||
calculate_mbuf_len16(struct adapter *sc, struct mbuf *m) | |||||
{ | |||||
const int lso = sizeof(struct cpl_tx_pkt_lso_core); | |||||
const int tnl_lso = sizeof(struct cpl_tx_tnl_lso); | |||||
if (sc->flags & IS_VF) { | |||||
if (needs_tso(m)) | |||||
set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), lso)); | |||||
else | |||||
set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), 0)); | |||||
return; | |||||
} | |||||
if (needs_tso(m)) { | |||||
if (needs_vxlan_tso(m)) | |||||
set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), tnl_lso)); | |||||
else | |||||
set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), lso)); | |||||
} else | |||||
set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), 0)); | |||||
} | |||||
/* | /* | ||||
* len16 for a txpkts type 0 WR with a GL. Does not include the firmware work | * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work | ||||
* request header. | * request header. | ||||
*/ | */ | ||||
static inline u_int | static inline u_int | ||||
txpkts0_len16(u_int nsegs) | txpkts0_len16(u_int nsegs) | ||||
{ | { | ||||
u_int n; | u_int n; | ||||
Show All 32 Lines | imm_payload(u_int ndesc) | ||||
return (n); | return (n); | ||||
} | } | ||||
static inline uint64_t | static inline uint64_t | ||||
csum_to_ctrl(struct adapter *sc, struct mbuf *m) | csum_to_ctrl(struct adapter *sc, struct mbuf *m) | ||||
{ | { | ||||
uint64_t ctrl; | uint64_t ctrl; | ||||
int csum_type; | int csum_type, l2hlen, l3hlen; | ||||
int x, y; | |||||
static const int csum_types[3][2] = { | |||||
{TX_CSUM_TCPIP, TX_CSUM_TCPIP6}, | |||||
{TX_CSUM_UDPIP, TX_CSUM_UDPIP6}, | |||||
{TX_CSUM_IP, 0} | |||||
}; | |||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
if (needs_hwcsum(m) == 0) | if (!needs_hwcsum(m)) | ||||
return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); | return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); | ||||
MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); | |||||
MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); | |||||
if (needs_vxlan_csum(m)) { | |||||
MPASS(m->m_pkthdr.l4hlen > 0); | |||||
MPASS(m->m_pkthdr.l5hlen > 0); | |||||
MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); | |||||
MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); | |||||
l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + | |||||
m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + | |||||
m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; | |||||
l3hlen = m->m_pkthdr.inner_l3hlen; | |||||
} else { | |||||
l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; | |||||
l3hlen = m->m_pkthdr.l3hlen; | |||||
} | |||||
ctrl = 0; | ctrl = 0; | ||||
if (needs_l3_csum(m) == 0) | if (!needs_l3_csum(m)) | ||||
ctrl |= F_TXPKT_IPCSUM_DIS; | ctrl |= F_TXPKT_IPCSUM_DIS; | ||||
switch (m->m_pkthdr.csum_flags & | |||||
(CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)) { | |||||
case CSUM_IP_TCP: | |||||
csum_type = TX_CSUM_TCPIP; | |||||
break; | |||||
case CSUM_IP_UDP: | |||||
csum_type = TX_CSUM_UDPIP; | |||||
break; | |||||
case CSUM_IP6_TCP: | |||||
csum_type = TX_CSUM_TCPIP6; | |||||
break; | |||||
case CSUM_IP6_UDP: | |||||
csum_type = TX_CSUM_UDPIP6; | |||||
break; | |||||
default: | |||||
/* needs_hwcsum told us that at least some hwcsum is needed. */ | |||||
MPASS(ctrl == 0); | |||||
MPASS(m->m_pkthdr.csum_flags & CSUM_IP); | |||||
ctrl |= F_TXPKT_L4CSUM_DIS; | |||||
csum_type = TX_CSUM_IP; | |||||
break; | |||||
} | |||||
MPASS(m->m_pkthdr.l2hlen > 0); | if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | | ||||
MPASS(m->m_pkthdr.l3hlen > 0); | CSUM_IP6_TCP | CSUM_INNER_IP6_TCP)) | ||||
ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | | x = 0; /* TCP */ | ||||
V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); | else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | | ||||
CSUM_IP6_UDP | CSUM_INNER_IP6_UDP)) | |||||
x = 1; /* UDP */ | |||||
else | |||||
x = 2; | |||||
if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | | |||||
CSUM_INNER_IP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP)) | |||||
y = 0; /* IPv4 */ | |||||
else { | |||||
MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | | |||||
CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP)); | |||||
y = 1; /* IPv6 */ | |||||
} | |||||
/* | |||||
* needs_hwcsum returned true earlier so there must be some kind of | |||||
* checksum to calculate. | |||||
*/ | |||||
csum_type = csum_types[x][y]; | |||||
MPASS(csum_type != 0); | |||||
if (csum_type == TX_CSUM_IP) | |||||
ctrl |= F_TXPKT_L4CSUM_DIS; | |||||
ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | V_TXPKT_IPHDR_LEN(l3hlen); | |||||
if (chip_id(sc) <= CHELSIO_T5) | if (chip_id(sc) <= CHELSIO_T5) | ||||
ctrl |= V_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN); | ctrl |= V_TXPKT_ETHHDR_LEN(l2hlen); | ||||
else | else | ||||
ctrl |= V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN); | ctrl |= V_T6_TXPKT_ETHHDR_LEN(l2hlen); | ||||
return (ctrl); | return (ctrl); | ||||
} | } | ||||
static inline void * | |||||
write_lso_cpl(void *cpl, struct mbuf *m0) | |||||
{ | |||||
struct cpl_tx_pkt_lso_core *lso; | |||||
uint32_t ctrl; | |||||
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && | |||||
m0->m_pkthdr.l4hlen > 0, | |||||
("%s: mbuf %p needs TSO but missing header lengths", | |||||
__func__, m0)); | |||||
ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | | |||||
F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | | |||||
V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | | |||||
V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | | |||||
V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); | |||||
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) | |||||
ctrl |= F_LSO_IPV6; | |||||
lso = cpl; | |||||
lso->lso_ctrl = htobe32(ctrl); | |||||
lso->ipid_ofst = htobe16(0); | |||||
lso->mss = htobe16(m0->m_pkthdr.tso_segsz); | |||||
lso->seqno_offset = htobe32(0); | |||||
lso->len = htobe32(m0->m_pkthdr.len); | |||||
return (lso + 1); | |||||
} | |||||
static void * | |||||
write_tnl_lso_cpl(void *cpl, struct mbuf *m0) | |||||
{ | |||||
struct cpl_tx_tnl_lso *tnl_lso = cpl; | |||||
uint32_t ctrl; | |||||
KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && | |||||
m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && | |||||
m0->m_pkthdr.inner_l5hlen > 0, | |||||
("%s: mbuf %p needs VXLAN_TSO but missing inner header lengths", | |||||
__func__, m0)); | |||||
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && | |||||
m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, | |||||
("%s: mbuf %p needs VXLAN_TSO but missing outer header lengths", | |||||
__func__, m0)); | |||||
/* Outer headers. */ | |||||
ctrl = V_CPL_TX_TNL_LSO_OPCODE(CPL_TX_TNL_LSO) | | |||||
F_CPL_TX_TNL_LSO_FIRST | F_CPL_TX_TNL_LSO_LAST | | |||||
V_CPL_TX_TNL_LSO_ETHHDRLENOUT( | |||||
(m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | | |||||
V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | | |||||
F_CPL_TX_TNL_LSO_IPLENSETOUT; | |||||
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) | |||||
ctrl |= F_CPL_TX_TNL_LSO_IPV6OUT; | |||||
else { | |||||
ctrl |= F_CPL_TX_TNL_LSO_IPHDRCHKOUT | | |||||
F_CPL_TX_TNL_LSO_IPIDINCOUT; | |||||
} | |||||
tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); | |||||
tnl_lso->IpIdOffsetOut = 0; | |||||
tnl_lso->UdpLenSetOut_to_TnlHdrLen = | |||||
htobe16(F_CPL_TX_TNL_LSO_UDPCHKCLROUT | | |||||
F_CPL_TX_TNL_LSO_UDPLENSETOUT | | |||||
V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + | |||||
m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + | |||||
m0->m_pkthdr.l5hlen) | | |||||
V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN)); | |||||
tnl_lso->r1 = 0; | |||||
/* Inner headers. */ | |||||
ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN( | |||||
(m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | | |||||
V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | | |||||
V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); | |||||
if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) | |||||
ctrl |= F_CPL_TX_TNL_LSO_IPV6; | |||||
tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); | |||||
tnl_lso->IpIdOffset = 0; | |||||
tnl_lso->IpIdSplit_to_Mss = | |||||
htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); | |||||
tnl_lso->TCPSeqOffset = 0; | |||||
tnl_lso->EthLenOffset_Size = | |||||
htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); | |||||
return (tnl_lso + 1); | |||||
} | |||||
#define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */ | #define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */ | ||||
/* | /* | ||||
* Write a VM txpkt WR for this packet to the hardware descriptors, update the | * Write a VM txpkt WR for this packet to the hardware descriptors, update the | ||||
* software descriptor, and advance the pidx. It is guaranteed that enough | * software descriptor, and advance the pidx. It is guaranteed that enough | ||||
* descriptors are available. | * descriptors are available. | ||||
* | * | ||||
* The return value is the # of hardware descriptors used. | * The return value is the # of hardware descriptors used. | ||||
Show All 37 Lines | write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) | ||||
* vlantci is ignored unless the ethtype is 0x8100, so it's | * vlantci is ignored unless the ethtype is 0x8100, so it's | ||||
* simpler to always copy it rather than making it | * simpler to always copy it rather than making it | ||||
* conditional. Also, it seems that we do not have to set | * conditional. Also, it seems that we do not have to set | ||||
* vlantci or fake the ethtype when doing VLAN tag insertion. | * vlantci or fake the ethtype when doing VLAN tag insertion. | ||||
*/ | */ | ||||
m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); | m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); | ||||
if (needs_tso(m0)) { | if (needs_tso(m0)) { | ||||
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); | cpl = write_lso_cpl(wr + 1, m0); | ||||
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && | |||||
m0->m_pkthdr.l4hlen > 0, | |||||
("%s: mbuf %p needs TSO but missing header lengths", | |||||
__func__, m0)); | |||||
ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | | |||||
F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - | |||||
ETHER_HDR_LEN) >> 2) | | |||||
V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | | |||||
V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); | |||||
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) | |||||
ctrl |= F_LSO_IPV6; | |||||
lso->lso_ctrl = htobe32(ctrl); | |||||
lso->ipid_ofst = htobe16(0); | |||||
lso->mss = htobe16(m0->m_pkthdr.tso_segsz); | |||||
lso->seqno_offset = htobe32(0); | |||||
lso->len = htobe32(pktlen); | |||||
cpl = (void *)(lso + 1); | |||||
txq->tso_wrs++; | txq->tso_wrs++; | ||||
} else | } else | ||||
cpl = (void *)(wr + 1); | cpl = (void *)(wr + 1); | ||||
/* Checksum offload */ | /* Checksum offload */ | ||||
ctrl1 = csum_to_ctrl(sc, m0); | ctrl1 = csum_to_ctrl(sc, m0); | ||||
if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) | if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) | ||||
txq->txcsum++; /* some hardware assistance provided */ | txq->txcsum++; /* some hardware assistance provided */ | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, | ||||
TXQ_LOCK_ASSERT_OWNED(txq); | TXQ_LOCK_ASSERT_OWNED(txq); | ||||
M_ASSERTPKTHDR(m0); | M_ASSERTPKTHDR(m0); | ||||
len16 = mbuf_len16(m0); | len16 = mbuf_len16(m0); | ||||
nsegs = mbuf_nsegs(m0); | nsegs = mbuf_nsegs(m0); | ||||
pktlen = m0->m_pkthdr.len; | pktlen = m0->m_pkthdr.len; | ||||
ctrl = sizeof(struct cpl_tx_pkt_core); | ctrl = sizeof(struct cpl_tx_pkt_core); | ||||
if (needs_tso(m0)) | if (needs_tso(m0)) { | ||||
if (needs_vxlan_tso(m0)) | |||||
ctrl += sizeof(struct cpl_tx_tnl_lso); | |||||
else | |||||
ctrl += sizeof(struct cpl_tx_pkt_lso_core); | ctrl += sizeof(struct cpl_tx_pkt_lso_core); | ||||
else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && | } else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && | ||||
available >= 2) { | available >= 2) { | ||||
/* Immediate data. Recalculate len16 and set nsegs to 0. */ | /* Immediate data. Recalculate len16 and set nsegs to 0. */ | ||||
ctrl += pktlen; | ctrl += pktlen; | ||||
len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + | len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + | ||||
sizeof(struct cpl_tx_pkt_core) + pktlen, 16); | sizeof(struct cpl_tx_pkt_core) + pktlen, 16); | ||||
nsegs = 0; | nsegs = 0; | ||||
} | } | ||||
ndesc = tx_len16_to_desc(len16); | ndesc = tx_len16_to_desc(len16); | ||||
MPASS(ndesc <= available); | MPASS(ndesc <= available); | ||||
/* Firmware work request header */ | /* Firmware work request header */ | ||||
eq = &txq->eq; | eq = &txq->eq; | ||||
wr = (void *)&eq->desc[eq->pidx]; | wr = (void *)&eq->desc[eq->pidx]; | ||||
wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | | wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | | ||||
V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); | V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); | ||||
ctrl = V_FW_WR_LEN16(len16); | ctrl = V_FW_WR_LEN16(len16); | ||||
wr->equiq_to_len16 = htobe32(ctrl); | wr->equiq_to_len16 = htobe32(ctrl); | ||||
wr->r3 = 0; | wr->r3 = 0; | ||||
if (needs_tso(m0)) { | if (needs_tso(m0)) { | ||||
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); | if (needs_vxlan_tso(m0)) { | ||||
cpl = write_tnl_lso_cpl(wr + 1, m0); | |||||
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && | txq->vxlan_tso_wrs++; | ||||
m0->m_pkthdr.l4hlen > 0, | } else { | ||||
("%s: mbuf %p needs TSO but missing header lengths", | cpl = write_lso_cpl(wr + 1, m0); | ||||
__func__, m0)); | |||||
ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | | |||||
F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - | |||||
ETHER_HDR_LEN) >> 2) | | |||||
V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | | |||||
V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); | |||||
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) | |||||
ctrl |= F_LSO_IPV6; | |||||
lso->lso_ctrl = htobe32(ctrl); | |||||
lso->ipid_ofst = htobe16(0); | |||||
lso->mss = htobe16(m0->m_pkthdr.tso_segsz); | |||||
lso->seqno_offset = htobe32(0); | |||||
lso->len = htobe32(pktlen); | |||||
cpl = (void *)(lso + 1); | |||||
txq->tso_wrs++; | txq->tso_wrs++; | ||||
} | |||||
} else | } else | ||||
cpl = (void *)(wr + 1); | cpl = (void *)(wr + 1); | ||||
/* Checksum offload */ | /* Checksum offload */ | ||||
ctrl1 = csum_to_ctrl(sc, m0); | ctrl1 = csum_to_ctrl(sc, m0); | ||||
if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) | if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { | ||||
txq->txcsum++; /* some hardware assistance provided */ | /* some hardware assistance provided */ | ||||
if (needs_vxlan_csum(m0)) | |||||
txq->vxlan_txcsum++; | |||||
else | |||||
txq->txcsum++; | |||||
} | |||||
/* VLAN tag insertion */ | /* VLAN tag insertion */ | ||||
if (needs_vlan_insertion(m0)) { | if (needs_vlan_insertion(m0)) { | ||||
ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); | ctrl1 |= F_TXPKT_VLAN_VLD | | ||||
V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); | |||||
txq->vlan_insertion++; | txq->vlan_insertion++; | ||||
} | } | ||||
/* CPL header */ | /* CPL header */ | ||||
cpl->ctrl0 = txq->cpl_ctrl0; | cpl->ctrl0 = txq->cpl_ctrl0; | ||||
cpl->pack = 0; | cpl->pack = 0; | ||||
cpl->len = htobe16(pktlen); | cpl->len = htobe16(pktlen); | ||||
cpl->ctrl1 = htobe64(ctrl1); | cpl->ctrl1 = htobe64(ctrl1); | ||||
/* SGL */ | /* SGL */ | ||||
dst = (void *)(cpl + 1); | dst = (void *)(cpl + 1); | ||||
if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) | |||||
dst = (caddr_t)&eq->desc[0]; | |||||
if (nsegs > 0) { | if (nsegs > 0) { | ||||
write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); | write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); | ||||
txq->sgl_wrs++; | txq->sgl_wrs++; | ||||
} else { | } else { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
for (m = m0; m != NULL; m = m->m_next) { | for (m = m0; m != NULL; m = m->m_next) { | ||||
▲ Show 20 Lines • Show All 229 Lines • ▼ Show 20 Lines | if (txp->wr_type == 0) { | ||||
(uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) | (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) | ||||
cpl = (void *)&eq->desc[0]; | cpl = (void *)&eq->desc[0]; | ||||
} else { | } else { | ||||
cpl = flitp; | cpl = flitp; | ||||
} | } | ||||
/* Checksum offload */ | /* Checksum offload */ | ||||
ctrl1 = csum_to_ctrl(sc, m); | ctrl1 = csum_to_ctrl(sc, m); | ||||
if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) | if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { | ||||
txq->txcsum++; /* some hardware assistance provided */ | /* some hardware assistance provided */ | ||||
if (needs_vxlan_csum(m)) | |||||
txq->vxlan_txcsum++; | |||||
else | |||||
txq->txcsum++; | |||||
} | |||||
/* VLAN tag insertion */ | /* VLAN tag insertion */ | ||||
if (needs_vlan_insertion(m)) { | if (needs_vlan_insertion(m)) { | ||||
ctrl1 |= F_TXPKT_VLAN_VLD | | ctrl1 |= F_TXPKT_VLAN_VLD | | ||||
V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); | ||||
txq->vlan_insertion++; | txq->vlan_insertion++; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 742 Lines • ▼ Show 20 Lines | write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr, | ||||
immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; | immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; | ||||
ctrl += immhdrs; | ctrl += immhdrs; | ||||
wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | | wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | | ||||
V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); | V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); | ||||
wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | | wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | | ||||
V_FW_WR_FLOWID(cst->etid)); | V_FW_WR_FLOWID(cst->etid)); | ||||
wr->r3 = 0; | wr->r3 = 0; | ||||
if (needs_udp_csum(m0)) { | if (needs_outer_udp_csum(m0)) { | ||||
wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; | wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; | ||||
wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; | wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; | ||||
wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); | wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); | ||||
wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; | wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; | ||||
wr->u.udpseg.rtplen = 0; | wr->u.udpseg.rtplen = 0; | ||||
wr->u.udpseg.r4 = 0; | wr->u.udpseg.r4 = 0; | ||||
wr->u.udpseg.mss = htobe16(pktlen - immhdrs); | wr->u.udpseg.mss = htobe16(pktlen - immhdrs); | ||||
wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; | wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; | ||||
wr->u.udpseg.plen = htobe32(pktlen - immhdrs); | wr->u.udpseg.plen = htobe32(pktlen - immhdrs); | ||||
cpl = (void *)(wr + 1); | cpl = (void *)(wr + 1); | ||||
} else { | } else { | ||||
MPASS(needs_tcp_csum(m0)); | MPASS(needs_outer_tcp_csum(m0)); | ||||
wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; | wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; | ||||
wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; | wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; | ||||
wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); | wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); | ||||
wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; | wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; | ||||
wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); | wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); | ||||
wr->u.tcpseg.r4 = 0; | wr->u.tcpseg.r4 = 0; | ||||
wr->u.tcpseg.r5 = 0; | wr->u.tcpseg.r5 = 0; | ||||
wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); | wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); | ||||
Show All 20 Lines | if (needs_tso(m0)) { | ||||
cpl = (void *)(lso + 1); | cpl = (void *)(lso + 1); | ||||
} else { | } else { | ||||
wr->u.tcpseg.mss = htobe16(0xffff); | wr->u.tcpseg.mss = htobe16(0xffff); | ||||
cpl = (void *)(wr + 1); | cpl = (void *)(wr + 1); | ||||
} | } | ||||
} | } | ||||
/* Checksum offload must be requested for ethofld. */ | /* Checksum offload must be requested for ethofld. */ | ||||
MPASS(needs_l4_csum(m0)); | MPASS(needs_outer_l4_csum(m0)); | ||||
ctrl1 = csum_to_ctrl(cst->adapter, m0); | ctrl1 = csum_to_ctrl(cst->adapter, m0); | ||||
/* VLAN tag insertion */ | /* VLAN tag insertion */ | ||||
if (needs_vlan_insertion(m0)) { | if (needs_vlan_insertion(m0)) { | ||||
ctrl1 |= F_TXPKT_VLAN_VLD | | ctrl1 |= F_TXPKT_VLAN_VLD | | ||||
V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 262 Lines • Show Last 20 Lines |
Hmm, does it make sense to extract all that helpers into some common infrastructure header ?