Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixgbe/ix_txrx.c
Show All 34 Lines | |||||
#ifndef IXGBE_STANDALONE_BUILD | #ifndef IXGBE_STANDALONE_BUILD | ||||
#include "opt_inet.h" | #include "opt_inet.h" | ||||
#include "opt_inet6.h" | #include "opt_inet6.h" | ||||
#endif | #endif | ||||
#include "ixgbe.h" | #include "ixgbe.h" | ||||
#ifdef RSS | |||||
#include <net/rss_config.h> | |||||
#include <netinet/in_rss.h> | |||||
#endif | |||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
#include <net/netmap.h> | #include <net/netmap.h> | ||||
#include <sys/selinfo.h> | #include <sys/selinfo.h> | ||||
#include <dev/netmap/netmap_kern.h> | #include <dev/netmap/netmap_kern.h> | ||||
extern int ix_crcstrip; | extern int ix_crcstrip; | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 137 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
int | int | ||||
ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) | ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) | ||||
{ | { | ||||
struct adapter *adapter = ifp->if_softc; | struct adapter *adapter = ifp->if_softc; | ||||
struct ix_queue *que; | struct ix_queue *que; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
int i, err = 0; | int i, err = 0; | ||||
#ifdef RSS | |||||
uint32_t bucket_id; | |||||
#endif | |||||
/* | /* | ||||
* When doing RSS, map it to the same outbound queue | * When doing RSS, map it to the same outbound queue | ||||
* as the incoming flow would be mapped to. | * as the incoming flow would be mapped to. | ||||
* | * | ||||
* If everything is setup correctly, it should be the | * If everything is setup correctly, it should be the | ||||
* same bucket that the current CPU we're on is. | * same bucket that the current CPU we're on is. | ||||
*/ | */ | ||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) | if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { | ||||
i = m->m_pkthdr.flowid % adapter->num_queues; | #ifdef RSS | ||||
if (rss_hash2bucket(m->m_pkthdr.flowid, | |||||
M_HASHTYPE_GET(m), &bucket_id) == 0) | |||||
/* TODO: spit out something if bucket_id > num_queues? */ | |||||
i = bucket_id % adapter->num_queues; | |||||
else | else | ||||
#endif | |||||
i = m->m_pkthdr.flowid % adapter->num_queues; | |||||
} else | |||||
i = curcpu % adapter->num_queues; | i = curcpu % adapter->num_queues; | ||||
/* Check for a hung queue and pick alternative */ | /* Check for a hung queue and pick alternative */ | ||||
if (((1 << i) & adapter->active_queues) == 0) | if (((1 << i) & adapter->active_queues) == 0) | ||||
i = ffsl(adapter->active_queues); | i = ffsl(adapter->active_queues); | ||||
txr = &adapter->tx_rings[i]; | txr = &adapter->tx_rings[i]; | ||||
que = &adapter->queues[i]; | que = &adapter->queues[i]; | ||||
▲ Show 20 Lines • Show All 338 Lines • ▼ Show 20 Lines | |||||
* Initialize a transmit ring. | * Initialize a transmit ring. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
ixgbe_setup_transmit_ring(struct tx_ring *txr) | ixgbe_setup_transmit_ring(struct tx_ring *txr) | ||||
{ | { | ||||
struct adapter *adapter = txr->adapter; | struct adapter *adapter = txr->adapter; | ||||
struct ixgbe_tx_buf *txbuf; | struct ixgbe_tx_buf *txbuf; | ||||
int i; | |||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
struct netmap_adapter *na = NA(adapter->ifp); | struct netmap_adapter *na = NA(adapter->ifp); | ||||
struct netmap_slot *slot; | struct netmap_slot *slot; | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
/* Clear the old ring contents */ | /* Clear the old ring contents */ | ||||
IXGBE_TX_LOCK(txr); | IXGBE_TX_LOCK(txr); | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
/* | /* | ||||
* (under lock): if in netmap mode, do some consistency | * (under lock): if in netmap mode, do some consistency | ||||
* checks and set slot to entry 0 of the netmap ring. | * checks and set slot to entry 0 of the netmap ring. | ||||
*/ | */ | ||||
slot = netmap_reset(na, NR_TX, txr->me, 0); | slot = netmap_reset(na, NR_TX, txr->me, 0); | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
bzero((void *)txr->tx_base, | bzero((void *)txr->tx_base, | ||||
(sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); | (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); | ||||
/* Reset indices */ | /* Reset indices */ | ||||
txr->next_avail_desc = 0; | txr->next_avail_desc = 0; | ||||
txr->next_to_clean = 0; | txr->next_to_clean = 0; | ||||
/* Free any existing tx buffers. */ | /* Free any existing tx buffers. */ | ||||
txbuf = txr->tx_buffers; | txbuf = txr->tx_buffers; | ||||
for (i = 0; i < txr->num_desc; i++, txbuf++) { | for (int i = 0; i < txr->num_desc; i++, txbuf++) { | ||||
smh: This change is actually against style(9) but something that should be fixed in head. | |||||
if (txbuf->m_head != NULL) { | if (txbuf->m_head != NULL) { | ||||
bus_dmamap_sync(txr->txtag, txbuf->map, | bus_dmamap_sync(txr->txtag, txbuf->map, | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(txr->txtag, txbuf->map); | bus_dmamap_unload(txr->txtag, txbuf->map); | ||||
m_freem(txbuf->m_head); | m_freem(txbuf->m_head); | ||||
txbuf->m_head = NULL; | txbuf->m_head = NULL; | ||||
} | } | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
/* | /* | ||||
* In netmap mode, set the map for the packet buffer. | * In netmap mode, set the map for the packet buffer. | ||||
* NOTE: Some drivers (not this one) also need to set | * NOTE: Some drivers (not this one) also need to set | ||||
* the physical buffer address in the NIC ring. | * the physical buffer address in the NIC ring. | ||||
* Slots in the netmap ring (indexed by "si") are | * Slots in the netmap ring (indexed by "si") are | ||||
* kring->nkr_hwofs positions "ahead" wrt the | * kring->nkr_hwofs positions "ahead" wrt the | ||||
* corresponding slot in the NIC ring. In some drivers | * corresponding slot in the NIC ring. In some drivers | ||||
* (not here) nkr_hwofs can be negative. Function | * (not here) nkr_hwofs can be negative. Function | ||||
* netmap_idx_n2k() handles wraparounds properly. | * netmap_idx_n2k() handles wraparounds properly. | ||||
*/ | */ | ||||
if (slot) { | if (slot) { | ||||
int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); | int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); | ||||
netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si)); | netmap_load_map(na, txr->txtag, | ||||
txbuf->map, NMB(na, slot + si)); | |||||
} | } | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
/* Clear the EOP descriptor pointer */ | /* Clear the EOP descriptor pointer */ | ||||
txbuf->eop = NULL; | txbuf->eop = NULL; | ||||
} | } | ||||
#ifdef IXGBE_FDIR | #ifdef IXGBE_FDIR | ||||
/* Set the rate at which we sample packets */ | /* Set the rate at which we sample packets */ | ||||
▲ Show 20 Lines • Show All 138 Lines • ▼ Show 20 Lines | ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, | ||||
/* | /* | ||||
** In advanced descriptors the vlan tag must | ** In advanced descriptors the vlan tag must | ||||
** be placed into the context descriptor. Hence | ** be placed into the context descriptor. Hence | ||||
** we need to make one even if not doing offloads. | ** we need to make one even if not doing offloads. | ||||
*/ | */ | ||||
if (mp->m_flags & M_VLANTAG) { | if (mp->m_flags & M_VLANTAG) { | ||||
vtag = htole16(mp->m_pkthdr.ether_vtag); | vtag = htole16(mp->m_pkthdr.ether_vtag); | ||||
vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); | ||||
} | } else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE)) | ||||
else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE)) | |||||
return (0); | return (0); | ||||
/* | /* | ||||
* Determine where frame payload starts. | * Determine where frame payload starts. | ||||
* Jump over vlan headers if already present, | * Jump over vlan headers if already present, | ||||
* helpful for QinQ too. | * helpful for QinQ too. | ||||
*/ | */ | ||||
eh = mtod(mp, struct ether_vlan_header *); | eh = mtod(mp, struct ether_vlan_header *); | ||||
▲ Show 20 Lines • Show All 584 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
int | int | ||||
ixgbe_allocate_receive_buffers(struct rx_ring *rxr) | ixgbe_allocate_receive_buffers(struct rx_ring *rxr) | ||||
{ | { | ||||
struct adapter *adapter = rxr->adapter; | struct adapter *adapter = rxr->adapter; | ||||
device_t dev = adapter->dev; | device_t dev = adapter->dev; | ||||
struct ixgbe_rx_buf *rxbuf; | struct ixgbe_rx_buf *rxbuf; | ||||
int i, bsize, error; | int bsize, error; | ||||
bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; | bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; | ||||
if (!(rxr->rx_buffers = | if (!(rxr->rx_buffers = | ||||
(struct ixgbe_rx_buf *) malloc(bsize, | (struct ixgbe_rx_buf *) malloc(bsize, | ||||
M_DEVBUF, M_NOWAIT | M_ZERO))) { | M_DEVBUF, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate rx_buffer memory\n"); | device_printf(dev, "Unable to allocate rx_buffer memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto fail; | ||||
Show All 10 Lines | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&rxr->ptag))) { | &rxr->ptag))) { | ||||
device_printf(dev, "Unable to create RX DMA tag\n"); | device_printf(dev, "Unable to create RX DMA tag\n"); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
for (i = 0; i < rxr->num_desc; i++, rxbuf++) { | for (int i = 0; i < rxr->num_desc; i++, rxbuf++) { | ||||
Not Done Inline ActionsAs above. smh: As above. | |||||
rxbuf = &rxr->rx_buffers[i]; | rxbuf = &rxr->rx_buffers[i]; | ||||
error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); | error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "Unable to create RX dma map\n"); | device_printf(dev, "Unable to create RX dma map\n"); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
} | } | ||||
return (0); | return (0); | ||||
fail: | fail: | ||||
/* Frees all, but can handle partial completion */ | /* Frees all, but can handle partial completion */ | ||||
ixgbe_free_receive_structures(adapter); | ixgbe_free_receive_structures(adapter); | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | static void | ||||
ixgbe_free_receive_ring(struct rx_ring *rxr) | ixgbe_free_receive_ring(struct rx_ring *rxr) | ||||
{ | { | ||||
struct ixgbe_rx_buf *rxbuf; | struct ixgbe_rx_buf *rxbuf; | ||||
int i; | |||||
for (i = 0; i < rxr->num_desc; i++) { | for (int i = 0; i < rxr->num_desc; i++) { | ||||
Not Done Inline ActionsAnd again smh: And again | |||||
rxbuf = &rxr->rx_buffers[i]; | rxbuf = &rxr->rx_buffers[i]; | ||||
if (rxbuf->buf != NULL) { | if (rxbuf->buf != NULL) { | ||||
bus_dmamap_sync(rxr->ptag, rxbuf->pmap, | bus_dmamap_sync(rxr->ptag, rxbuf->pmap, | ||||
BUS_DMASYNC_POSTREAD); | BUS_DMASYNC_POSTREAD); | ||||
bus_dmamap_unload(rxr->ptag, rxbuf->pmap); | bus_dmamap_unload(rxr->ptag, rxbuf->pmap); | ||||
rxbuf->buf->m_flags |= M_PKTHDR; | rxbuf->buf->m_flags |= M_PKTHDR; | ||||
m_freem(rxbuf->buf); | m_freem(rxbuf->buf); | ||||
rxbuf->buf = NULL; | rxbuf->buf = NULL; | ||||
▲ Show 20 Lines • Show All 466 Lines • ▼ Show 20 Lines | if (eop == 0) { | ||||
vtag = le16toh(cur->wb.upper.vlan); | vtag = le16toh(cur->wb.upper.vlan); | ||||
if (vtag) { | if (vtag) { | ||||
sendmp->m_pkthdr.ether_vtag = vtag; | sendmp->m_pkthdr.ether_vtag = vtag; | ||||
sendmp->m_flags |= M_VLANTAG; | sendmp->m_flags |= M_VLANTAG; | ||||
} | } | ||||
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) | ||||
ixgbe_rx_checksum(staterr, sendmp, ptype); | ixgbe_rx_checksum(staterr, sendmp, ptype); | ||||
/* | /* | ||||
* In case of multiqueue, we have RXCSUM.PCSD bit set | * In case of multiqueue, we have RXCSUM.PCSD bit set | ||||
* and never cleared. This means we have RSS hash | * and never cleared. This means we have RSS hash | ||||
* available to be used. | * available to be used. | ||||
*/ | */ | ||||
if (adapter->num_queues > 1) { | if (adapter->num_queues > 1) { | ||||
sendmp->m_pkthdr.flowid = | sendmp->m_pkthdr.flowid = | ||||
le32toh(cur->wb.lower.hi_dword.rss); | le32toh(cur->wb.lower.hi_dword.rss); | ||||
/* | /* | ||||
* Full RSS support is not avilable in | * Full RSS support is not avilable in | ||||
* FreeBSD 10 so setting the hash type to | * FreeBSD 10 so setting the hash type to | ||||
* OPAQUE. | * OPAQUE. | ||||
*/ | */ | ||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | ||||
} else { | } else { | ||||
#if __FreeBSD_version >= 800000 | |||||
sendmp->m_pkthdr.flowid = que->msix; | sendmp->m_pkthdr.flowid = que->msix; | ||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | ||||
#endif /* FreeBSD_version */ | |||||
} | } | ||||
} | } | ||||
next_desc: | next_desc: | ||||
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | ||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | ||||
/* Advance our pointers to the next descriptor. */ | /* Advance our pointers to the next descriptor. */ | ||||
if (++i == rxr->num_desc) | if (++i == rxr->num_desc) | ||||
▲ Show 20 Lines • Show All 166 Lines • ▼ Show 20 Lines | |||||
ixgbe_allocate_queues(struct adapter *adapter) | ixgbe_allocate_queues(struct adapter *adapter) | ||||
{ | { | ||||
device_t dev = adapter->dev; | device_t dev = adapter->dev; | ||||
struct ix_queue *que; | struct ix_queue *que; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
int rsize, tsize, error = IXGBE_SUCCESS; | int rsize, tsize, error = IXGBE_SUCCESS; | ||||
int txconf = 0, rxconf = 0; | int txconf = 0, rxconf = 0; | ||||
#ifdef PCI_IOV | |||||
enum ixgbe_iov_mode iov_mode; | |||||
#endif | |||||
/* First allocate the top level queue structs */ | /* First allocate the top level queue structs */ | ||||
if (!(adapter->queues = | if (!(adapter->queues = | ||||
(struct ix_queue *) malloc(sizeof(struct ix_queue) * | (struct ix_queue *) malloc(sizeof(struct ix_queue) * | ||||
adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate queue memory\n"); | device_printf(dev, "Unable to allocate queue memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto fail; | ||||
Show All 16 Lines | if (!(adapter->rx_rings = | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto rx_fail; | goto rx_fail; | ||||
} | } | ||||
/* For the ring itself */ | /* For the ring itself */ | ||||
tsize = roundup2(adapter->num_tx_desc * | tsize = roundup2(adapter->num_tx_desc * | ||||
sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); | sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); | ||||
#ifdef PCI_IOV | |||||
iov_mode = ixgbe_get_iov_mode(adapter); | |||||
adapter->pool = ixgbe_max_vfs(iov_mode); | |||||
#else | |||||
adapter->pool = 0; | |||||
#endif | |||||
/* | /* | ||||
* Now set up the TX queues, txconf is needed to handle the | * Now set up the TX queues, txconf is needed to handle the | ||||
* possibility that things fail midcourse and we need to | * possibility that things fail midcourse and we need to | ||||
* undo memory gracefully | * undo memory gracefully | ||||
*/ | */ | ||||
for (int i = 0; i < adapter->num_queues; i++, txconf++) { | for (int i = 0; i < adapter->num_queues; i++, txconf++) { | ||||
/* Set up some basics */ | /* Set up some basics */ | ||||
txr = &adapter->tx_rings[i]; | txr = &adapter->tx_rings[i]; | ||||
txr->adapter = adapter; | txr->adapter = adapter; | ||||
#ifdef PCI_IOV | |||||
txr->me = ixgbe_pf_que_index(iov_mode, i); | |||||
#else | |||||
txr->me = i; | txr->me = i; | ||||
#endif | |||||
txr->num_desc = adapter->num_tx_desc; | txr->num_desc = adapter->num_tx_desc; | ||||
/* Initialize the TX side lock */ | /* Initialize the TX side lock */ | ||||
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | ||||
device_get_nameunit(dev), txr->me); | device_get_nameunit(dev), txr->me); | ||||
mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); | mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); | ||||
if (ixgbe_dma_malloc(adapter, tsize, | if (ixgbe_dma_malloc(adapter, tsize, | ||||
Show All 30 Lines | #endif | ||||
* Next the RX queues... | * Next the RX queues... | ||||
*/ | */ | ||||
rsize = roundup2(adapter->num_rx_desc * | rsize = roundup2(adapter->num_rx_desc * | ||||
sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); | sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); | ||||
for (int i = 0; i < adapter->num_queues; i++, rxconf++) { | for (int i = 0; i < adapter->num_queues; i++, rxconf++) { | ||||
rxr = &adapter->rx_rings[i]; | rxr = &adapter->rx_rings[i]; | ||||
/* Set up some basics */ | /* Set up some basics */ | ||||
rxr->adapter = adapter; | rxr->adapter = adapter; | ||||
#ifdef PCI_IOV | |||||
rxr->me = ixgbe_pf_que_index(iov_mode, i); | |||||
#else | |||||
rxr->me = i; | rxr->me = i; | ||||
#endif | |||||
rxr->num_desc = adapter->num_rx_desc; | rxr->num_desc = adapter->num_rx_desc; | ||||
/* Initialize the RX side lock */ | /* Initialize the RX side lock */ | ||||
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | ||||
device_get_nameunit(dev), rxr->me); | device_get_nameunit(dev), rxr->me); | ||||
mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); | mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); | ||||
if (ixgbe_dma_malloc(adapter, rsize, | if (ixgbe_dma_malloc(adapter, rsize, | ||||
▲ Show 20 Lines • Show All 45 Lines • Show Last 20 Lines |
This change is actually against style(9) but something that should be fixed in head.