Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/mlx4/mlx4_en/mlx4_en_rx.c
Show First 20 Lines • Show All 549 Lines • ▼ Show 20 Lines | mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, | ||||
if (unlikely(length > mb->m_len)) | if (unlikely(length > mb->m_len)) | ||||
length = mb->m_len; | length = mb->m_len; | ||||
/* update total packet length in packet header */ | /* update total packet length in packet header */ | ||||
mb->m_len = mb->m_pkthdr.len = length; | mb->m_len = mb->m_pkthdr.len = length; | ||||
return (mb); | return (mb); | ||||
} | } | ||||
static __inline int | |||||
mlx4_en_rss_hash(__be16 status, int udp_rss) | |||||
{ | |||||
const __be16 status_all = cpu_to_be16( | |||||
MLX4_CQE_STATUS_IPV4 | | |||||
MLX4_CQE_STATUS_IPV4F | | |||||
MLX4_CQE_STATUS_IPV6 | | |||||
MLX4_CQE_STATUS_TCP | | |||||
MLX4_CQE_STATUS_UDP); | |||||
const __be16 status_ipv4_tcp = cpu_to_be16( | |||||
MLX4_CQE_STATUS_IPV4 | | |||||
MLX4_CQE_STATUS_TCP); | |||||
const __be16 status_ipv6_tcp = cpu_to_be16( | |||||
MLX4_CQE_STATUS_IPV6 | | |||||
MLX4_CQE_STATUS_TCP); | |||||
const __be16 status_ipv4_udp = cpu_to_be16( | |||||
MLX4_CQE_STATUS_IPV4 | | |||||
MLX4_CQE_STATUS_UDP); | |||||
const __be16 status_ipv6_udp = cpu_to_be16( | |||||
MLX4_CQE_STATUS_IPV6 | | |||||
MLX4_CQE_STATUS_UDP); | |||||
const __be16 status_ipv4 = cpu_to_be16(MLX4_CQE_STATUS_IPV4); | |||||
const __be16 status_ipv6 = cpu_to_be16(MLX4_CQE_STATUS_IPV6); | |||||
status &= status_all; | |||||
switch (status) { | |||||
case status_ipv4_tcp: | |||||
return (M_HASHTYPE_RSS_TCP_IPV4); | |||||
case status_ipv6_tcp: | |||||
return (M_HASHTYPE_RSS_TCP_IPV6); | |||||
case status_ipv4_udp: | |||||
return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV4 | |||||
: M_HASHTYPE_RSS_IPV4); | |||||
case status_ipv6_udp: | |||||
return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV6 | |||||
: M_HASHTYPE_RSS_IPV6); | |||||
default: | |||||
if (status & status_ipv4) | |||||
return (M_HASHTYPE_RSS_IPV4); | |||||
if (status & status_ipv6) | |||||
return (M_HASHTYPE_RSS_IPV6); | |||||
return (M_HASHTYPE_OPAQUE_HASH); | |||||
} | |||||
} | |||||
/* For cpu arch with cache line of 64B the performance is better when cqe size==64B | /* For cpu arch with cache line of 64B the performance is better when cqe size==64B | ||||
* To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc) | * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc) | ||||
* was added in the beginning of each cqe (the real data is in the corresponding 32B). | * was added in the beginning of each cqe (the real data is in the corresponding 32B). | ||||
* The following calc ensures that when factor==1, it means we are aligned to 64B | * The following calc ensures that when factor==1, it means we are aligned to 64B | ||||
* and we get the real cqe data*/ | * and we get the real cqe data*/ | ||||
#define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor) | #define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor) | ||||
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | ||||
{ | { | ||||
struct mlx4_en_priv *priv = netdev_priv(dev); | struct mlx4_en_priv *priv = netdev_priv(dev); | ||||
struct mlx4_cqe *cqe; | struct mlx4_cqe *cqe; | ||||
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; | struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; | ||||
struct mlx4_en_rx_mbuf *mb_list; | struct mlx4_en_rx_mbuf *mb_list; | ||||
struct mlx4_en_rx_desc *rx_desc; | struct mlx4_en_rx_desc *rx_desc; | ||||
struct mbuf *mb; | struct mbuf *mb; | ||||
struct mlx4_cq *mcq = &cq->mcq; | struct mlx4_cq *mcq = &cq->mcq; | ||||
struct mlx4_cqe *buf = cq->buf; | struct mlx4_cqe *buf = cq->buf; | ||||
int index; | int index; | ||||
unsigned int length; | unsigned int length; | ||||
int polled = 0; | int polled = 0; | ||||
u32 cons_index = mcq->cons_index; | u32 cons_index = mcq->cons_index; | ||||
u32 size_mask = ring->size_mask; | u32 size_mask = ring->size_mask; | ||||
int size = cq->size; | int size = cq->size; | ||||
int factor = priv->cqe_factor; | int factor = priv->cqe_factor; | ||||
const int udp_rss = priv->mdev->profile.udp_rss; | |||||
if (!priv->port_up) | if (!priv->port_up) | ||||
return 0; | return 0; | ||||
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | ||||
* descriptor offset can be deducted from the CQE index instead of | * descriptor offset can be deducted from the CQE index instead of | ||||
* reading 'cqe->index' */ | * reading 'cqe->index' */ | ||||
index = cons_index & size_mask; | index = cons_index & size_mask; | ||||
Show All 31 Lines | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | ||||
if (unlikely(priv->validate_loopback)) { | if (unlikely(priv->validate_loopback)) { | ||||
validate_loopback(priv, mb); | validate_loopback(priv, mb); | ||||
goto next; | goto next; | ||||
} | } | ||||
/* forward Toeplitz compatible hash value */ | /* forward Toeplitz compatible hash value */ | ||||
mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid); | mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid); | ||||
M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH); | M_HASHTYPE_SET(mb, mlx4_en_rss_hash(cqe->status, udp_rss)); | ||||
mb->m_pkthdr.rcvif = dev; | mb->m_pkthdr.rcvif = dev; | ||||
if (be32_to_cpu(cqe->vlan_my_qpn) & | if (be32_to_cpu(cqe->vlan_my_qpn) & | ||||
MLX4_CQE_VLAN_PRESENT_MASK) { | MLX4_CQE_VLAN_PRESENT_MASK) { | ||||
mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid); | mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid); | ||||
mb->m_flags |= M_VLANTAG; | mb->m_flags |= M_VLANTAG; | ||||
} | } | ||||
if (likely(dev->if_capenable & | if (likely(dev->if_capenable & | ||||
(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) && | (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) && | ||||
▲ Show 20 Lines • Show All 309 Lines • Show Last 20 Lines |