Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/ixl_txrx.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | |||||
static inline void ixl_rx_input(struct rx_ring *, struct ifnet *, | static inline void ixl_rx_input(struct rx_ring *, struct ifnet *, | ||||
struct mbuf *, u8); | struct mbuf *, u8); | ||||
static inline bool ixl_tso_detect_sparse(struct mbuf *mp); | static inline bool ixl_tso_detect_sparse(struct mbuf *mp); | ||||
static inline u32 ixl_get_tx_head(struct ixl_queue *que); | static inline u32 ixl_get_tx_head(struct ixl_queue *que); | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
#include <dev/netmap/if_ixl_netmap.h> | #include <dev/netmap/if_ixl_netmap.h> | ||||
#if __FreeBSD_version >= 1200000 | |||||
int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1; | int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1; | ||||
#endif | |||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
/* | /* | ||||
* @key key is saved into this parameter | * @key key is saved into this parameter | ||||
*/ | */ | ||||
void | void | ||||
ixl_get_default_rss_key(u32 *key) | ixl_get_default_rss_key(u32 *key) | ||||
{ | { | ||||
MPASS(key != NULL); | MPASS(key != NULL); | ||||
u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, | u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, | ||||
0x183cfd8c, 0xce880440, 0x580cbc3c, | 0x183cfd8c, 0xce880440, 0x580cbc3c, | ||||
0x35897377, 0x328b25e1, 0x4fa98922, | 0x35897377, 0x328b25e1, 0x4fa98922, | ||||
0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, | 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, | ||||
0x0, 0x0, 0x0}; | 0x0, 0x0, 0x0}; | ||||
bcopy(rss_seed, key, IXL_RSS_KEY_SIZE); | bcopy(rss_seed, key, IXL_RSS_KEY_SIZE); | ||||
} | } | ||||
/** | |||||
* i40e_vc_stat_str - convert virtchnl status err code to a string | |||||
* @hw: pointer to the HW structure | |||||
* @stat_err: the status error code to convert | |||||
**/ | |||||
const char * | |||||
i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err) | |||||
{ | |||||
switch (stat_err) { | |||||
case VIRTCHNL_STATUS_SUCCESS: | |||||
return "OK"; | |||||
case VIRTCHNL_ERR_PARAM: | |||||
return "VIRTCHNL_ERR_PARAM"; | |||||
case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: | |||||
return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH"; | |||||
case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: | |||||
return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR"; | |||||
case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: | |||||
return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID"; | |||||
case VIRTCHNL_STATUS_NOT_SUPPORTED: | |||||
return "VIRTCHNL_STATUS_NOT_SUPPORTED"; | |||||
} | |||||
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); | |||||
return hw->err_str; | |||||
} | |||||
/* | /* | ||||
* PCI BUSMASTER needs to be set for proper operation. | |||||
*/ | |||||
void | |||||
ixl_set_busmaster(device_t dev) | |||||
{ | |||||
u16 pci_cmd_word; | |||||
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); | |||||
pci_cmd_word |= PCIM_CMD_BUSMASTEREN; | |||||
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); | |||||
} | |||||
/* | |||||
* Rewrite the ENABLE bit in the MSIX control register | |||||
*/ | |||||
void | |||||
ixl_set_msix_enable(device_t dev) | |||||
{ | |||||
int msix_ctrl, rid; | |||||
pci_find_cap(dev, PCIY_MSIX, &rid); | |||||
rid += PCIR_MSIX_CTRL; | |||||
msix_ctrl = pci_read_config(dev, rid, 2); | |||||
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; | |||||
pci_write_config(dev, rid, msix_ctrl, 2); | |||||
} | |||||
/* | |||||
** Multiqueue Transmit driver | ** Multiqueue Transmit driver | ||||
*/ | */ | ||||
int | int | ||||
ixl_mq_start(struct ifnet *ifp, struct mbuf *m) | ixl_mq_start(struct ifnet *ifp, struct mbuf *m) | ||||
{ | { | ||||
struct ixl_vsi *vsi = ifp->if_softc; | struct ixl_vsi *vsi = ifp->if_softc; | ||||
struct ixl_queue *que; | struct ixl_queue *que; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
int err, i; | int err, i; | ||||
#ifdef RSS | #ifdef RSS | ||||
u32 bucket_id; | u32 bucket_id; | ||||
#endif | #endif | ||||
/* | /* | ||||
** Which queue to use: | * Which queue to use: | ||||
** | * | ||||
** When doing RSS, map it to the same outbound | * When doing RSS, map it to the same outbound | ||||
** queue as the incoming flow would be mapped to. | * queue as the incoming flow would be mapped to. | ||||
** If everything is setup correctly, it should be | * If everything is setup correctly, it should be | ||||
** the same bucket that the current CPU we're on is. | * the same bucket that the current CPU we're on is. | ||||
*/ | */ | ||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { | if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { | ||||
#ifdef RSS | #ifdef RSS | ||||
if (rss_hash2bucket(m->m_pkthdr.flowid, | if (rss_hash2bucket(m->m_pkthdr.flowid, | ||||
M_HASHTYPE_GET(m), &bucket_id) == 0) { | M_HASHTYPE_GET(m), &bucket_id) == 0) { | ||||
i = bucket_id % vsi->num_queues; | i = bucket_id % vsi->num_queues; | ||||
} else | } else | ||||
#endif | #endif | ||||
i = m->m_pkthdr.flowid % vsi->num_queues; | i = m->m_pkthdr.flowid % vsi->num_queues; | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | struct ixl_vsi *vsi = ifp->if_softc; | ||||
IXL_TX_LOCK(txr); | IXL_TX_LOCK(txr); | ||||
while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) | while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) | ||||
m_freem(m); | m_freem(m); | ||||
IXL_TX_UNLOCK(txr); | IXL_TX_UNLOCK(txr); | ||||
} | } | ||||
if_qflush(ifp); | if_qflush(ifp); | ||||
} | } | ||||
/* | |||||
** Find mbuf chains passed to the driver | |||||
** that are 'sparse', using more than 8 | |||||
** mbufs to deliver an mss-size chunk of data | |||||
*/ | |||||
static inline bool | static inline bool | ||||
ixl_tso_detect_sparse(struct mbuf *mp) | ixl_tso_detect_sparse(struct mbuf *mp) | ||||
{ | { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
int num, mss; | int num, mss; | ||||
num = 0; | num = 0; | ||||
mss = mp->m_pkthdr.tso_segsz; | mss = mp->m_pkthdr.tso_segsz; | ||||
/* Exclude first mbuf; assume it contains all headers */ | /* Exclude first mbuf; assume it contains all headers */ | ||||
for (m = mp->m_next; m != NULL; m = m->m_next) { | for (m = mp->m_next; m != NULL; m = m->m_next) { | ||||
if (m == NULL) | if (m == NULL) | ||||
break; | break; | ||||
num++; | num++; | ||||
mss -= m->m_len % mp->m_pkthdr.tso_segsz; | mss -= m->m_len % mp->m_pkthdr.tso_segsz; | ||||
if (mss < 1) { | |||||
if (num > IXL_SPARSE_CHAIN) | if (num > IXL_SPARSE_CHAIN) | ||||
return (true); | return (true); | ||||
if (mss < 1) { | |||||
num = (mss == 0) ? 0 : 1; | num = (mss == 0) ? 0 : 1; | ||||
mss += mp->m_pkthdr.tso_segsz; | mss += mp->m_pkthdr.tso_segsz; | ||||
} | } | ||||
} | } | ||||
return (false); | return (false); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 122 Lines • ▼ Show 20 Lines | txd->cmd_type_offset_bsz = | ||||
htole64(I40E_TX_DESC_DTYPE_DATA | htole64(I40E_TX_DESC_DTYPE_DATA | ||||
| ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) | | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) | ||||
| ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) | | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) | ||||
| ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | ||||
| ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT)); | | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT)); | ||||
last = i; /* descriptor that will get completion IRQ */ | last = i; /* descriptor that will get completion IRQ */ | ||||
if (++i == que->num_desc) | if (++i == que->num_tx_desc) | ||||
i = 0; | i = 0; | ||||
buf->m_head = NULL; | buf->m_head = NULL; | ||||
buf->eop_index = -1; | buf->eop_index = -1; | ||||
} | } | ||||
/* Set the last descriptor for report */ | /* Set the last descriptor for report */ | ||||
txd->cmd_type_offset_bsz |= | txd->cmd_type_offset_bsz |= | ||||
htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); | htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); | ||||
txr->avail -= nsegs; | txr->avail -= nsegs; | ||||
txr->next_avail = i; | txr->next_avail = i; | ||||
buf->m_head = m_head; | buf->m_head = m_head; | ||||
/* Swap the dma map between the first and last descriptor */ | /* Swap the dma map between the first and last descriptor. | ||||
* The descriptor that gets checked on completion will now | |||||
* have the real map from the first descriptor. | |||||
*/ | |||||
txr->buffers[first].map = buf->map; | txr->buffers[first].map = buf->map; | ||||
buf->map = map; | buf->map = map; | ||||
bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE); | bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE); | ||||
/* Set the index of the descriptor that will be marked done */ | /* Set the index of the descriptor that will be marked done */ | ||||
buf = &txr->buffers[first]; | buf = &txr->buffers[first]; | ||||
buf->eop_index = last; | buf->eop_index = last; | ||||
Show All 25 Lines | |||||
**********************************************************************/ | **********************************************************************/ | ||||
int | int | ||||
ixl_allocate_tx_data(struct ixl_queue *que) | ixl_allocate_tx_data(struct ixl_queue *que) | ||||
{ | { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
device_t dev = vsi->dev; | device_t dev = vsi->dev; | ||||
struct ixl_tx_buf *buf; | struct ixl_tx_buf *buf; | ||||
int error = 0; | int i, error = 0; | ||||
/* | /* | ||||
* Setup DMA descriptor areas. | * Setup DMA descriptor areas. | ||||
*/ | */ | ||||
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
IXL_TSO_SIZE, /* maxsize */ | IXL_TSO_SIZE, /* maxsize */ | ||||
IXL_MAX_TX_SEGS, /* nsegments */ | IXL_MAX_TX_SEGS, /* nsegments */ | ||||
PAGE_SIZE, /* maxsegsize */ | IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&txr->tx_tag))) { | &txr->tx_tag))) { | ||||
device_printf(dev,"Unable to allocate TX DMA tag\n"); | device_printf(dev,"Unable to allocate TX DMA tag\n"); | ||||
goto fail; | return (error); | ||||
} | } | ||||
/* Make a special tag for TSO */ | /* Make a special tag for TSO */ | ||||
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
IXL_TSO_SIZE, /* maxsize */ | IXL_TSO_SIZE, /* maxsize */ | ||||
IXL_MAX_TSO_SEGS, /* nsegments */ | IXL_MAX_TSO_SEGS, /* nsegments */ | ||||
PAGE_SIZE, /* maxsegsize */ | IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&txr->tso_tag))) { | &txr->tso_tag))) { | ||||
device_printf(dev,"Unable to allocate TX TSO DMA tag\n"); | device_printf(dev,"Unable to allocate TX TSO DMA tag\n"); | ||||
goto fail; | goto free_tx_dma; | ||||
} | } | ||||
if (!(txr->buffers = | if (!(txr->buffers = | ||||
(struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) * | (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) * | ||||
que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { | que->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate tx_buffer memory\n"); | device_printf(dev, "Unable to allocate tx_buffer memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto free_tx_tso_dma; | ||||
} | } | ||||
/* Create the descriptor buffer default dma maps */ | /* Create the descriptor buffer default dma maps */ | ||||
buf = txr->buffers; | buf = txr->buffers; | ||||
for (int i = 0; i < que->num_desc; i++, buf++) { | for (i = 0; i < que->num_tx_desc; i++, buf++) { | ||||
buf->tag = txr->tx_tag; | buf->tag = txr->tx_tag; | ||||
error = bus_dmamap_create(buf->tag, 0, &buf->map); | error = bus_dmamap_create(buf->tag, 0, &buf->map); | ||||
if (error != 0) { | if (error != 0) { | ||||
device_printf(dev, "Unable to create TX DMA map\n"); | device_printf(dev, "Unable to create TX DMA map\n"); | ||||
goto fail; | goto free_buffers; | ||||
} | } | ||||
} | } | ||||
fail: | |||||
return 0; | |||||
free_buffers: | |||||
while (i--) { | |||||
buf--; | |||||
bus_dmamap_destroy(buf->tag, buf->map); | |||||
} | |||||
free(txr->buffers, M_DEVBUF); | |||||
txr->buffers = NULL; | |||||
free_tx_tso_dma: | |||||
bus_dma_tag_destroy(txr->tso_tag); | |||||
txr->tso_tag = NULL; | |||||
free_tx_dma: | |||||
bus_dma_tag_destroy(txr->tx_tag); | |||||
txr->tx_tag = NULL; | |||||
return (error); | return (error); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* (Re)Initialize a queue transmit ring. | * (Re)Initialize a queue transmit ring. | ||||
* - called by init, it clears the descriptor ring, | * - called by init, it clears the descriptor ring, | ||||
Show All 17 Lines | #ifdef DEV_NETMAP | ||||
/* | /* | ||||
* (under lock): if in netmap mode, do some consistency | * (under lock): if in netmap mode, do some consistency | ||||
* checks and set slot to entry 0 of the netmap ring. | * checks and set slot to entry 0 of the netmap ring. | ||||
*/ | */ | ||||
slot = netmap_reset(na, NR_TX, que->me, 0); | slot = netmap_reset(na, NR_TX, que->me, 0); | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
bzero((void *)txr->base, | bzero((void *)txr->base, | ||||
(sizeof(struct i40e_tx_desc)) * que->num_desc); | (sizeof(struct i40e_tx_desc)) * que->num_tx_desc); | ||||
/* Reset indices */ | /* Reset indices */ | ||||
txr->next_avail = 0; | txr->next_avail = 0; | ||||
txr->next_to_clean = 0; | txr->next_to_clean = 0; | ||||
/* Reset watchdog status */ | /* Reset watchdog status */ | ||||
txr->watchdog_timer = 0; | txr->watchdog_timer = 0; | ||||
#ifdef IXL_FDIR | |||||
/* Initialize flow director */ | |||||
txr->atr_rate = ixl_atr_rate; | |||||
txr->atr_count = 0; | |||||
#endif | |||||
/* Free any existing tx mbufs. */ | /* Free any existing tx mbufs. */ | ||||
buf = txr->buffers; | buf = txr->buffers; | ||||
for (int i = 0; i < que->num_desc; i++, buf++) { | for (int i = 0; i < que->num_tx_desc; i++, buf++) { | ||||
if (buf->m_head != NULL) { | if (buf->m_head != NULL) { | ||||
bus_dmamap_sync(buf->tag, buf->map, | bus_dmamap_sync(buf->tag, buf->map, | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(buf->tag, buf->map); | bus_dmamap_unload(buf->tag, buf->map); | ||||
m_freem(buf->m_head); | m_freem(buf->m_head); | ||||
buf->m_head = NULL; | buf->m_head = NULL; | ||||
} | } | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
Show All 9 Lines | if (slot) { | ||||
netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si)); | netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si)); | ||||
} | } | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
/* Clear the EOP index */ | /* Clear the EOP index */ | ||||
buf->eop_index = -1; | buf->eop_index = -1; | ||||
} | } | ||||
/* Set number of descriptors available */ | /* Set number of descriptors available */ | ||||
txr->avail = que->num_desc; | txr->avail = que->num_tx_desc; | ||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map, | bus_dmamap_sync(txr->dma.tag, txr->dma.map, | ||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | ||||
IXL_TX_UNLOCK(txr); | IXL_TX_UNLOCK(txr); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Free transmit ring related data structures. | * Free transmit ring related data structures. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
void | void | ||||
ixl_free_que_tx(struct ixl_queue *que) | ixl_free_que_tx(struct ixl_queue *que) | ||||
{ | { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct ixl_tx_buf *buf; | struct ixl_tx_buf *buf; | ||||
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); | INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); | ||||
for (int i = 0; i < que->num_desc; i++) { | for (int i = 0; i < que->num_tx_desc; i++) { | ||||
buf = &txr->buffers[i]; | buf = &txr->buffers[i]; | ||||
if (buf->m_head != NULL) { | if (buf->m_head != NULL) { | ||||
bus_dmamap_sync(buf->tag, buf->map, | bus_dmamap_sync(buf->tag, buf->map, | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(buf->tag, | |||||
buf->map); | |||||
m_freem(buf->m_head); | m_freem(buf->m_head); | ||||
buf->m_head = NULL; | buf->m_head = NULL; | ||||
if (buf->map != NULL) { | |||||
bus_dmamap_destroy(buf->tag, | |||||
buf->map); | |||||
buf->map = NULL; | |||||
} | } | ||||
} else if (buf->map != NULL) { | bus_dmamap_unload(buf->tag, buf->map); | ||||
bus_dmamap_unload(buf->tag, | bus_dmamap_destroy(buf->tag, buf->map); | ||||
buf->map); | |||||
bus_dmamap_destroy(buf->tag, | |||||
buf->map); | |||||
buf->map = NULL; | |||||
} | } | ||||
} | |||||
if (txr->br != NULL) | |||||
buf_ring_free(txr->br, M_DEVBUF); | |||||
if (txr->buffers != NULL) { | if (txr->buffers != NULL) { | ||||
free(txr->buffers, M_DEVBUF); | free(txr->buffers, M_DEVBUF); | ||||
txr->buffers = NULL; | txr->buffers = NULL; | ||||
} | } | ||||
if (txr->tx_tag != NULL) { | if (txr->tx_tag != NULL) { | ||||
bus_dma_tag_destroy(txr->tx_tag); | bus_dma_tag_destroy(txr->tx_tag); | ||||
txr->tx_tag = NULL; | txr->tx_tag = NULL; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | #endif | ||||
switch (ipproto) { | switch (ipproto) { | ||||
case IPPROTO_TCP: | case IPPROTO_TCP: | ||||
tcp_hlen = th->th_off << 2; | tcp_hlen = th->th_off << 2; | ||||
if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) { | if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) { | ||||
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; | *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; | ||||
*off |= (tcp_hlen >> 2) << | *off |= (tcp_hlen >> 2) << | ||||
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | ||||
} | } | ||||
#ifdef IXL_FDIR | |||||
ixl_atr(que, th, etype); | |||||
#endif | |||||
break; | break; | ||||
case IPPROTO_UDP: | case IPPROTO_UDP: | ||||
if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) { | if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) { | ||||
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; | *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; | ||||
*off |= (sizeof(struct udphdr) >> 2) << | *off |= (sizeof(struct udphdr) >> 2) << | ||||
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | ||||
} | } | ||||
break; | break; | ||||
case IPPROTO_SCTP: | case IPPROTO_SCTP: | ||||
if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) { | if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) { | ||||
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; | *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; | ||||
*off |= (sizeof(struct sctphdr) >> 2) << | *off |= (sizeof(struct sctphdr) >> 2) << | ||||
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | ||||
} | } | ||||
/* Fall Thru */ | /* Fall Thru */ | ||||
default: | default: | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | | ||||
((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | | ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | | ||||
((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT); | ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT); | ||||
TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss); | TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss); | ||||
TXD->tunneling_params = htole32(0); | TXD->tunneling_params = htole32(0); | ||||
buf->m_head = NULL; | buf->m_head = NULL; | ||||
buf->eop_index = -1; | buf->eop_index = -1; | ||||
if (++idx == que->num_desc) | if (++idx == que->num_tx_desc) | ||||
idx = 0; | idx = 0; | ||||
txr->avail--; | txr->avail--; | ||||
txr->next_avail = idx; | txr->next_avail = idx; | ||||
return TRUE; | return TRUE; | ||||
} | } | ||||
/* | /* | ||||
** ixl_get_tx_head - Retrieve the value from the | * ixl_get_tx_head - Retrieve the value from the | ||||
** location the HW records its HEAD index | * location the HW records its HEAD index | ||||
*/ | */ | ||||
static inline u32 | static inline u32 | ||||
ixl_get_tx_head(struct ixl_queue *que) | ixl_get_tx_head(struct ixl_queue *que) | ||||
{ | { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
void *head = &txr->base[que->num_desc]; | void *head = &txr->base[que->num_tx_desc]; | ||||
return LE32_TO_CPU(*(volatile __le32 *)head); | return LE32_TO_CPU(*(volatile __le32 *)head); | ||||
} | } | ||||
/********************************************************************** | /********************************************************************** | ||||
* | * | ||||
* Examine each tx_buffer in the used queue. If the hardware is done | * Get index of last used descriptor/buffer from hardware, and clean | ||||
* processing the packet then free associated resources. The | * the descriptors/buffers up to that index. | ||||
* tx_buffer is put back on the free queue. | |||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
bool | static bool | ||||
ixl_txeof(struct ixl_queue *que) | ixl_txeof_hwb(struct ixl_queue *que) | ||||
{ | { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
u32 first, last, head, done, processed; | u32 first, last, head, done; | ||||
struct ixl_tx_buf *buf; | struct ixl_tx_buf *buf; | ||||
struct i40e_tx_desc *tx_desc, *eop_desc; | struct i40e_tx_desc *tx_desc, *eop_desc; | ||||
mtx_assert(&txr->mtx, MA_OWNED); | mtx_assert(&txr->mtx, MA_OWNED); | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
// XXX todo: implement moderation | // XXX todo: implement moderation | ||||
if (netmap_tx_irq(que->vsi->ifp, que->me)) | if (netmap_tx_irq(que->vsi->ifp, que->me)) | ||||
return FALSE; | return FALSE; | ||||
#endif /* DEF_NETMAP */ | #endif /* DEF_NETMAP */ | ||||
/* These are not the descriptors you seek, move along :) */ | /* These are not the descriptors you seek, move along :) */ | ||||
if (txr->avail == que->num_desc) { | if (txr->avail == que->num_tx_desc) { | ||||
atomic_store_rel_32(&txr->watchdog_timer, 0); | atomic_store_rel_32(&txr->watchdog_timer, 0); | ||||
return FALSE; | return FALSE; | ||||
} | } | ||||
processed = 0; | |||||
first = txr->next_to_clean; | first = txr->next_to_clean; | ||||
buf = &txr->buffers[first]; | buf = &txr->buffers[first]; | ||||
tx_desc = (struct i40e_tx_desc *)&txr->base[first]; | tx_desc = (struct i40e_tx_desc *)&txr->base[first]; | ||||
last = buf->eop_index; | last = buf->eop_index; | ||||
if (last == -1) | if (last == -1) | ||||
return FALSE; | return FALSE; | ||||
eop_desc = (struct i40e_tx_desc *)&txr->base[last]; | eop_desc = (struct i40e_tx_desc *)&txr->base[last]; | ||||
/* Sync DMA before reading head index from ring */ | |||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map, | |||||
BUS_DMASYNC_POSTREAD); | |||||
/* Get the Head WB value */ | /* Get the Head WB value */ | ||||
head = ixl_get_tx_head(que); | head = ixl_get_tx_head(que); | ||||
/* | /* | ||||
** Get the index of the first descriptor | ** Get the index of the first descriptor | ||||
** BEYOND the EOP and call that 'done'. | ** BEYOND the EOP and call that 'done'. | ||||
** I do this so the comparison in the | ** I do this so the comparison in the | ||||
** inner while loop below can be simple | ** inner while loop below can be simple | ||||
*/ | */ | ||||
if (++last == que->num_desc) last = 0; | if (++last == que->num_tx_desc) last = 0; | ||||
done = last; | done = last; | ||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map, | |||||
BUS_DMASYNC_POSTREAD); | |||||
/* | /* | ||||
** The HEAD index of the ring is written in a | ** The HEAD index of the ring is written in a | ||||
** defined location, this rather than a done bit | ** defined location, this rather than a done bit | ||||
** is what is used to keep track of what must be | ** is what is used to keep track of what must be | ||||
** 'cleaned'. | ** 'cleaned'. | ||||
*/ | */ | ||||
while (first != head) { | while (first != head) { | ||||
/* We clean the range of the packet */ | /* We clean the range of the packet */ | ||||
while (first != done) { | while (first != done) { | ||||
++txr->avail; | ++txr->avail; | ||||
++processed; | |||||
if (buf->m_head) { | if (buf->m_head) { | ||||
txr->bytes += /* for ITR adjustment */ | txr->bytes += /* for ITR adjustment */ | ||||
buf->m_head->m_pkthdr.len; | buf->m_head->m_pkthdr.len; | ||||
txr->tx_bytes += /* for TX stats */ | txr->tx_bytes += /* for TX stats */ | ||||
buf->m_head->m_pkthdr.len; | buf->m_head->m_pkthdr.len; | ||||
bus_dmamap_sync(buf->tag, | bus_dmamap_sync(buf->tag, | ||||
buf->map, | buf->map, | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(buf->tag, | bus_dmamap_unload(buf->tag, | ||||
buf->map); | buf->map); | ||||
m_freem(buf->m_head); | m_freem(buf->m_head); | ||||
buf->m_head = NULL; | buf->m_head = NULL; | ||||
} | } | ||||
buf->eop_index = -1; | buf->eop_index = -1; | ||||
if (++first == que->num_desc) | if (++first == que->num_tx_desc) | ||||
first = 0; | first = 0; | ||||
buf = &txr->buffers[first]; | buf = &txr->buffers[first]; | ||||
tx_desc = &txr->base[first]; | tx_desc = &txr->base[first]; | ||||
} | } | ||||
++txr->packets; | ++txr->packets; | ||||
/* If a packet was successfully cleaned, reset the watchdog timer */ | |||||
atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); | |||||
/* See if there is more work now */ | /* See if there is more work now */ | ||||
last = buf->eop_index; | last = buf->eop_index; | ||||
if (last != -1) { | if (last != -1) { | ||||
eop_desc = &txr->base[last]; | eop_desc = &txr->base[last]; | ||||
/* Get next done point */ | /* Get next done point */ | ||||
if (++last == que->num_desc) last = 0; | if (++last == que->num_tx_desc) last = 0; | ||||
done = last; | done = last; | ||||
} else | } else | ||||
break; | break; | ||||
} | } | ||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map, | bus_dmamap_sync(txr->dma.tag, txr->dma.map, | ||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | ||||
txr->next_to_clean = first; | txr->next_to_clean = first; | ||||
/* | /* | ||||
* If there are no pending descriptors, clear the timeout. | * If there are no pending descriptors, clear the timeout. | ||||
*/ | */ | ||||
if (txr->avail == que->num_desc) { | if (txr->avail == que->num_tx_desc) { | ||||
atomic_store_rel_32(&txr->watchdog_timer, 0); | atomic_store_rel_32(&txr->watchdog_timer, 0); | ||||
return FALSE; | return FALSE; | ||||
} | } | ||||
return TRUE; | return TRUE; | ||||
} | } | ||||
/********************************************************************** | |||||
* | |||||
* Use index kept by driver and the flag on each descriptor to find used | |||||
* descriptor/buffers and clean them up for re-use. | |||||
* | |||||
* This method of reclaiming descriptors is current incompatible with | |||||
* DEV_NETMAP. | |||||
* | |||||
* Returns TRUE if there are more descriptors to be cleaned after this | |||||
* function exits. | |||||
* | |||||
**********************************************************************/ | |||||
static bool | |||||
ixl_txeof_dwb(struct ixl_queue *que) | |||||
{ | |||||
struct tx_ring *txr = &que->txr; | |||||
u32 first, last, done; | |||||
u32 limit = 256; | |||||
struct ixl_tx_buf *buf; | |||||
struct i40e_tx_desc *tx_desc, *eop_desc; | |||||
mtx_assert(&txr->mtx, MA_OWNED); | |||||
/* There are no descriptors to clean */ | |||||
if (txr->avail == que->num_tx_desc) { | |||||
atomic_store_rel_32(&txr->watchdog_timer, 0); | |||||
return FALSE; | |||||
} | |||||
/* Set starting index/descriptor/buffer */ | |||||
first = txr->next_to_clean; | |||||
buf = &txr->buffers[first]; | |||||
tx_desc = &txr->base[first]; | |||||
/* | |||||
* This function operates per-packet -- identifies the start of the | |||||
* packet and gets the index of the last descriptor of the packet from | |||||
* it, from eop_index. | |||||
* | |||||
* If the last descriptor is marked "done" by the hardware, then all | |||||
* of the descriptors for the packet are cleaned. | |||||
*/ | |||||
last = buf->eop_index; | |||||
if (last == -1) | |||||
return FALSE; | |||||
eop_desc = &txr->base[last]; | |||||
/* Sync DMA before reading from ring */ | |||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD); | |||||
/* | |||||
* Get the index of the first descriptor beyond the EOP and call that | |||||
* 'done'. Simplifies the comparison for the inner loop below. | |||||
*/ | |||||
if (++last == que->num_tx_desc) | |||||
last = 0; | |||||
done = last; | |||||
/* | |||||
* We find the last completed descriptor by examining each | |||||
* descriptor's status bits to see if it's done. | |||||
*/ | |||||
do { | |||||
/* Break if last descriptor in packet isn't marked done */ | |||||
if ((eop_desc->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK) | |||||
!= I40E_TX_DESC_DTYPE_DESC_DONE) | |||||
break; | |||||
/* Clean the descriptors that make up the processed packet */ | |||||
while (first != done) { | |||||
/* | |||||
* If there was a buffer attached to this descriptor, | |||||
* prevent the adapter from accessing it, and add its | |||||
* length to the queue's TX stats. | |||||
*/ | |||||
if (buf->m_head) { | |||||
txr->bytes += buf->m_head->m_pkthdr.len; | |||||
txr->tx_bytes += buf->m_head->m_pkthdr.len; | |||||
bus_dmamap_sync(buf->tag, buf->map, | |||||
BUS_DMASYNC_POSTWRITE); | |||||
bus_dmamap_unload(buf->tag, buf->map); | |||||
m_freem(buf->m_head); | |||||
buf->m_head = NULL; | |||||
} | |||||
buf->eop_index = -1; | |||||
++txr->avail; | |||||
if (++first == que->num_tx_desc) | |||||
first = 0; | |||||
buf = &txr->buffers[first]; | |||||
tx_desc = &txr->base[first]; | |||||
} | |||||
++txr->packets; | |||||
/* If a packet was successfully cleaned, reset the watchdog timer */ | |||||
atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); | |||||
/* | |||||
* Since buf is the first buffer after the one that was just | |||||
* cleaned, check if the packet it starts is done, too. | |||||
*/ | |||||
last = buf->eop_index; | |||||
if (last != -1) { | |||||
eop_desc = &txr->base[last]; | |||||
/* Get next done point */ | |||||
if (++last == que->num_tx_desc) last = 0; | |||||
done = last; | |||||
} else | |||||
break; | |||||
} while (--limit); | |||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map, | |||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | |||||
txr->next_to_clean = first; | |||||
/* | |||||
* If there are no pending descriptors, clear the watchdog timer. | |||||
*/ | |||||
if (txr->avail == que->num_tx_desc) { | |||||
atomic_store_rel_32(&txr->watchdog_timer, 0); | |||||
return FALSE; | |||||
} | |||||
return TRUE; | |||||
} | |||||
bool | |||||
ixl_txeof(struct ixl_queue *que) | |||||
{ | |||||
struct ixl_vsi *vsi = que->vsi; | |||||
return (vsi->enable_head_writeback) ? ixl_txeof_hwb(que) | |||||
: ixl_txeof_dwb(que); | |||||
} | |||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Refresh mbuf buffers for RX descriptor rings | * Refresh mbuf buffers for RX descriptor rings | ||||
* - now keeps its own state so discards due to resource | * - now keeps its own state so discards due to resource | ||||
* exhaustion are unnecessary, if an mbuf cannot be obtained | * exhaustion are unnecessary, if an mbuf cannot be obtained | ||||
* it just returns, keeping its placeholder, thus it can simply | * it just returns, keeping its placeholder, thus it can simply | ||||
* be recalled to try again. | * be recalled to try again. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
ixl_refresh_mbufs(struct ixl_queue *que, int limit) | ixl_refresh_mbufs(struct ixl_queue *que, int limit) | ||||
{ | { | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
bus_dma_segment_t hseg[1]; | bus_dma_segment_t hseg[1]; | ||||
bus_dma_segment_t pseg[1]; | bus_dma_segment_t pseg[1]; | ||||
struct ixl_rx_buf *buf; | struct ixl_rx_buf *buf; | ||||
struct mbuf *mh, *mp; | struct mbuf *mh, *mp; | ||||
int i, j, nsegs, error; | int i, j, nsegs, error; | ||||
bool refreshed = FALSE; | bool refreshed = FALSE; | ||||
i = j = rxr->next_refresh; | i = j = rxr->next_refresh; | ||||
/* Control the loop with one beyond */ | /* Control the loop with one beyond */ | ||||
if (++j == que->num_desc) | if (++j == que->num_rx_desc) | ||||
j = 0; | j = 0; | ||||
while (j != limit) { | while (j != limit) { | ||||
buf = &rxr->buffers[i]; | buf = &rxr->buffers[i]; | ||||
if (rxr->hdr_split == FALSE) | if (rxr->hdr_split == FALSE) | ||||
goto no_split; | goto no_split; | ||||
if (buf->m_head == NULL) { | if (buf->m_head == NULL) { | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | rxr->base[i].read.pkt_addr = | ||||
htole64(pseg[0].ds_addr); | htole64(pseg[0].ds_addr); | ||||
/* Used only when doing header split */ | /* Used only when doing header split */ | ||||
rxr->base[i].read.hdr_addr = 0; | rxr->base[i].read.hdr_addr = 0; | ||||
refreshed = TRUE; | refreshed = TRUE; | ||||
/* Next is precalculated */ | /* Next is precalculated */ | ||||
i = j; | i = j; | ||||
rxr->next_refresh = i; | rxr->next_refresh = i; | ||||
if (++j == que->num_desc) | if (++j == que->num_rx_desc) | ||||
j = 0; | j = 0; | ||||
} | } | ||||
update: | update: | ||||
if (refreshed) /* Update hardware tail index */ | if (refreshed) /* Update hardware tail index */ | ||||
wr32(vsi->hw, rxr->tail, rxr->next_refresh); | wr32(vsi->hw, rxr->tail, rxr->next_refresh); | ||||
return; | return; | ||||
} | } | ||||
Show All 10 Lines | |||||
ixl_allocate_rx_data(struct ixl_queue *que) | ixl_allocate_rx_data(struct ixl_queue *que) | ||||
{ | { | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
device_t dev = vsi->dev; | device_t dev = vsi->dev; | ||||
struct ixl_rx_buf *buf; | struct ixl_rx_buf *buf; | ||||
int i, bsize, error; | int i, bsize, error; | ||||
bsize = sizeof(struct ixl_rx_buf) * que->num_desc; | |||||
if (!(rxr->buffers = | |||||
(struct ixl_rx_buf *) malloc(bsize, | |||||
M_DEVBUF, M_NOWAIT | M_ZERO))) { | |||||
device_printf(dev, "Unable to allocate rx_buffer memory\n"); | |||||
error = ENOMEM; | |||||
return (error); | |||||
} | |||||
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
MSIZE, /* maxsize */ | MSIZE, /* maxsize */ | ||||
1, /* nsegments */ | 1, /* nsegments */ | ||||
MSIZE, /* maxsegsize */ | MSIZE, /* maxsegsize */ | ||||
Show All 13 Lines | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | ||||
MJUM16BYTES, /* maxsize */ | MJUM16BYTES, /* maxsize */ | ||||
1, /* nsegments */ | 1, /* nsegments */ | ||||
MJUM16BYTES, /* maxsegsize */ | MJUM16BYTES, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&rxr->ptag))) { | &rxr->ptag))) { | ||||
device_printf(dev, "Unable to create RX DMA ptag\n"); | device_printf(dev, "Unable to create RX DMA ptag\n"); | ||||
return (error); | goto free_rx_htag; | ||||
} | } | ||||
for (i = 0; i < que->num_desc; i++) { | bsize = sizeof(struct ixl_rx_buf) * que->num_rx_desc; | ||||
if (!(rxr->buffers = | |||||
(struct ixl_rx_buf *) malloc(bsize, | |||||
M_DEVBUF, M_NOWAIT | M_ZERO))) { | |||||
device_printf(dev, "Unable to allocate rx_buffer memory\n"); | |||||
error = ENOMEM; | |||||
goto free_rx_ptag; | |||||
} | |||||
for (i = 0; i < que->num_rx_desc; i++) { | |||||
buf = &rxr->buffers[i]; | buf = &rxr->buffers[i]; | ||||
error = bus_dmamap_create(rxr->htag, | error = bus_dmamap_create(rxr->htag, | ||||
BUS_DMA_NOWAIT, &buf->hmap); | BUS_DMA_NOWAIT, &buf->hmap); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "Unable to create RX head map\n"); | device_printf(dev, "Unable to create RX head map\n"); | ||||
break; | goto free_buffers; | ||||
} | } | ||||
error = bus_dmamap_create(rxr->ptag, | error = bus_dmamap_create(rxr->ptag, | ||||
BUS_DMA_NOWAIT, &buf->pmap); | BUS_DMA_NOWAIT, &buf->pmap); | ||||
if (error) { | if (error) { | ||||
bus_dmamap_destroy(rxr->htag, buf->hmap); | |||||
device_printf(dev, "Unable to create RX pkt map\n"); | device_printf(dev, "Unable to create RX pkt map\n"); | ||||
break; | goto free_buffers; | ||||
} | } | ||||
} | } | ||||
return 0; | |||||
free_buffers: | |||||
while (i--) { | |||||
buf = &rxr->buffers[i]; | |||||
bus_dmamap_destroy(rxr->ptag, buf->pmap); | |||||
bus_dmamap_destroy(rxr->htag, buf->hmap); | |||||
} | |||||
free(rxr->buffers, M_DEVBUF); | |||||
rxr->buffers = NULL; | |||||
free_rx_ptag: | |||||
bus_dma_tag_destroy(rxr->ptag); | |||||
rxr->ptag = NULL; | |||||
free_rx_htag: | |||||
bus_dma_tag_destroy(rxr->htag); | |||||
rxr->htag = NULL; | |||||
return (error); | return (error); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* (Re)Initialize the queue receive ring and its buffers. | * (Re)Initialize the queue receive ring and its buffers. | ||||
* | * | ||||
Show All 16 Lines | |||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
IXL_RX_LOCK(rxr); | IXL_RX_LOCK(rxr); | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
/* same as in ixl_init_tx_ring() */ | /* same as in ixl_init_tx_ring() */ | ||||
slot = netmap_reset(na, NR_RX, que->me, 0); | slot = netmap_reset(na, NR_RX, que->me, 0); | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
/* Clear the ring contents */ | /* Clear the ring contents */ | ||||
rsize = roundup2(que->num_desc * | rsize = roundup2(que->num_rx_desc * | ||||
sizeof(union i40e_rx_desc), DBA_ALIGN); | sizeof(union i40e_rx_desc), DBA_ALIGN); | ||||
bzero((void *)rxr->base, rsize); | bzero((void *)rxr->base, rsize); | ||||
/* Cleanup any existing buffers */ | /* Cleanup any existing buffers */ | ||||
for (int i = 0; i < que->num_desc; i++) { | for (int i = 0; i < que->num_rx_desc; i++) { | ||||
buf = &rxr->buffers[i]; | buf = &rxr->buffers[i]; | ||||
if (buf->m_head != NULL) { | if (buf->m_head != NULL) { | ||||
bus_dmamap_sync(rxr->htag, buf->hmap, | bus_dmamap_sync(rxr->htag, buf->hmap, | ||||
BUS_DMASYNC_POSTREAD); | BUS_DMASYNC_POSTREAD); | ||||
bus_dmamap_unload(rxr->htag, buf->hmap); | bus_dmamap_unload(rxr->htag, buf->hmap); | ||||
buf->m_head->m_flags |= M_PKTHDR; | buf->m_head->m_flags |= M_PKTHDR; | ||||
m_freem(buf->m_head); | m_freem(buf->m_head); | ||||
} | } | ||||
if (buf->m_pack != NULL) { | if (buf->m_pack != NULL) { | ||||
bus_dmamap_sync(rxr->ptag, buf->pmap, | bus_dmamap_sync(rxr->ptag, buf->pmap, | ||||
BUS_DMASYNC_POSTREAD); | BUS_DMASYNC_POSTREAD); | ||||
bus_dmamap_unload(rxr->ptag, buf->pmap); | bus_dmamap_unload(rxr->ptag, buf->pmap); | ||||
buf->m_pack->m_flags |= M_PKTHDR; | buf->m_pack->m_flags |= M_PKTHDR; | ||||
m_freem(buf->m_pack); | m_freem(buf->m_pack); | ||||
} | } | ||||
buf->m_head = NULL; | buf->m_head = NULL; | ||||
buf->m_pack = NULL; | buf->m_pack = NULL; | ||||
} | } | ||||
/* header split is off */ | /* header split is off */ | ||||
rxr->hdr_split = FALSE; | rxr->hdr_split = FALSE; | ||||
/* Now replenish the mbufs */ | /* Now replenish the mbufs */ | ||||
for (int j = 0; j != que->num_desc; ++j) { | for (int j = 0; j != que->num_rx_desc; ++j) { | ||||
struct mbuf *mh, *mp; | struct mbuf *mh, *mp; | ||||
buf = &rxr->buffers[j]; | buf = &rxr->buffers[j]; | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
/* | /* | ||||
* In netmap mode, fill the map and set the buffer | * In netmap mode, fill the map and set the buffer | ||||
* address in the NIC ring, considering the offset | * address in the NIC ring, considering the offset | ||||
* between the netmap and NIC rings (see comment in | * between the netmap and NIC rings (see comment in | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | skip_head: | ||||
/* Setup our descriptor indices */ | /* Setup our descriptor indices */ | ||||
rxr->next_check = 0; | rxr->next_check = 0; | ||||
rxr->next_refresh = 0; | rxr->next_refresh = 0; | ||||
rxr->lro_enabled = FALSE; | rxr->lro_enabled = FALSE; | ||||
rxr->split = 0; | rxr->split = 0; | ||||
rxr->bytes = 0; | rxr->bytes = 0; | ||||
rxr->discard = FALSE; | rxr->discard = FALSE; | ||||
wr32(vsi->hw, rxr->tail, que->num_desc - 1); | wr32(vsi->hw, rxr->tail, que->num_rx_desc - 1); | ||||
ixl_flush(vsi->hw); | ixl_flush(vsi->hw); | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
/* | /* | ||||
** Now set up the LRO interface: | ** Now set up the LRO interface: | ||||
*/ | */ | ||||
if (ifp->if_capenable & IFCAP_LRO) { | if (ifp->if_capenable & IFCAP_LRO) { | ||||
int err = tcp_lro_init(lro); | int err = tcp_lro_init(lro); | ||||
Show All 22 Lines | |||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
void | void | ||||
ixl_free_que_rx(struct ixl_queue *que) | ixl_free_que_rx(struct ixl_queue *que) | ||||
{ | { | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
struct ixl_rx_buf *buf; | struct ixl_rx_buf *buf; | ||||
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); | |||||
/* Cleanup any existing buffers */ | /* Cleanup any existing buffers */ | ||||
if (rxr->buffers != NULL) { | if (rxr->buffers != NULL) { | ||||
for (int i = 0; i < que->num_desc; i++) { | for (int i = 0; i < que->num_rx_desc; i++) { | ||||
buf = &rxr->buffers[i]; | buf = &rxr->buffers[i]; | ||||
if (buf->m_head != NULL) { | |||||
bus_dmamap_sync(rxr->htag, buf->hmap, | /* Free buffers and unload dma maps */ | ||||
BUS_DMASYNC_POSTREAD); | ixl_rx_discard(rxr, i); | ||||
bus_dmamap_unload(rxr->htag, buf->hmap); | |||||
buf->m_head->m_flags |= M_PKTHDR; | |||||
m_freem(buf->m_head); | |||||
} | |||||
if (buf->m_pack != NULL) { | |||||
bus_dmamap_sync(rxr->ptag, buf->pmap, | |||||
BUS_DMASYNC_POSTREAD); | |||||
bus_dmamap_unload(rxr->ptag, buf->pmap); | |||||
buf->m_pack->m_flags |= M_PKTHDR; | |||||
m_freem(buf->m_pack); | |||||
} | |||||
buf->m_head = NULL; | |||||
buf->m_pack = NULL; | |||||
if (buf->hmap != NULL) { | |||||
bus_dmamap_destroy(rxr->htag, buf->hmap); | bus_dmamap_destroy(rxr->htag, buf->hmap); | ||||
buf->hmap = NULL; | |||||
} | |||||
if (buf->pmap != NULL) { | |||||
bus_dmamap_destroy(rxr->ptag, buf->pmap); | bus_dmamap_destroy(rxr->ptag, buf->pmap); | ||||
buf->pmap = NULL; | |||||
} | } | ||||
} | |||||
if (rxr->buffers != NULL) { | |||||
free(rxr->buffers, M_DEVBUF); | free(rxr->buffers, M_DEVBUF); | ||||
rxr->buffers = NULL; | rxr->buffers = NULL; | ||||
} | } | ||||
} | |||||
if (rxr->htag != NULL) { | if (rxr->htag != NULL) { | ||||
bus_dma_tag_destroy(rxr->htag); | bus_dma_tag_destroy(rxr->htag); | ||||
rxr->htag = NULL; | rxr->htag = NULL; | ||||
} | } | ||||
if (rxr->ptag != NULL) { | if (rxr->ptag != NULL) { | ||||
bus_dma_tag_destroy(rxr->ptag); | bus_dma_tag_destroy(rxr->ptag); | ||||
rxr->ptag = NULL; | rxr->ptag = NULL; | ||||
} | } | ||||
INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); | |||||
return; | |||||
} | } | ||||
static inline void | static inline void | ||||
ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) | ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) | ||||
{ | { | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
/* | /* | ||||
Show All 20 Lines | |||||
} | } | ||||
static inline void | static inline void | ||||
ixl_rx_discard(struct rx_ring *rxr, int i) | ixl_rx_discard(struct rx_ring *rxr, int i) | ||||
{ | { | ||||
struct ixl_rx_buf *rbuf; | struct ixl_rx_buf *rbuf; | ||||
KASSERT(rxr != NULL, ("Receive ring pointer cannot be null")); | |||||
KASSERT(i < rxr->que->num_rx_desc, ("Descriptor index must be less than que->num_desc")); | |||||
rbuf = &rxr->buffers[i]; | rbuf = &rxr->buffers[i]; | ||||
if (rbuf->fmp != NULL) {/* Partial chain ? */ | /* Free the mbufs in the current chain for the packet */ | ||||
rbuf->fmp->m_flags |= M_PKTHDR; | if (rbuf->fmp != NULL) { | ||||
bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); | |||||
m_freem(rbuf->fmp); | m_freem(rbuf->fmp); | ||||
rbuf->fmp = NULL; | rbuf->fmp = NULL; | ||||
} | } | ||||
/* | /* | ||||
** With advanced descriptors the writeback | * Free the mbufs for the current descriptor; and let ixl_refresh_mbufs() | ||||
** clobbers the buffer addrs, so its easier | * assign new mbufs to these. | ||||
** to just free the existing mbufs and take | |||||
** the normal refresh path to get new buffers | |||||
** and mapping. | |||||
*/ | */ | ||||
if (rbuf->m_head) { | if (rbuf->m_head) { | ||||
bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD); | |||||
bus_dmamap_unload(rxr->htag, rbuf->hmap); | |||||
m_free(rbuf->m_head); | m_free(rbuf->m_head); | ||||
rbuf->m_head = NULL; | rbuf->m_head = NULL; | ||||
} | } | ||||
if (rbuf->m_pack) { | if (rbuf->m_pack) { | ||||
bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); | |||||
bus_dmamap_unload(rxr->ptag, rbuf->pmap); | |||||
m_free(rbuf->m_pack); | m_free(rbuf->m_pack); | ||||
rbuf->m_pack = NULL; | rbuf->m_pack = NULL; | ||||
} | } | ||||
return; | |||||
} | } | ||||
#ifdef RSS | #ifdef RSS | ||||
/* | /* | ||||
** i40e_ptype_to_hash: parse the packet type | ** i40e_ptype_to_hash: parse the packet type | ||||
** to determine the appropriate hash. | ** to determine the appropriate hash. | ||||
*/ | */ | ||||
static inline int | static inline int | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | ixl_rxeof(struct ixl_queue *que, int count) | ||||
struct ifnet *ifp = vsi->ifp; | struct ifnet *ifp = vsi->ifp; | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
struct lro_ctrl *lro = &rxr->lro; | struct lro_ctrl *lro = &rxr->lro; | ||||
#endif | #endif | ||||
int i, nextp, processed = 0; | int i, nextp, processed = 0; | ||||
union i40e_rx_desc *cur; | union i40e_rx_desc *cur; | ||||
struct ixl_rx_buf *rbuf, *nbuf; | struct ixl_rx_buf *rbuf, *nbuf; | ||||
IXL_RX_LOCK(rxr); | IXL_RX_LOCK(rxr); | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
if (netmap_rx_irq(ifp, que->me, &count)) { | if (netmap_rx_irq(ifp, que->me, &count)) { | ||||
IXL_RX_UNLOCK(rxr); | IXL_RX_UNLOCK(rxr); | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | */ | ||||
rxr->desc_errs++; | rxr->desc_errs++; | ||||
ixl_rx_discard(rxr, i); | ixl_rx_discard(rxr, i); | ||||
goto next_desc; | goto next_desc; | ||||
} | } | ||||
/* Prefetch the next buffer */ | /* Prefetch the next buffer */ | ||||
if (!eop) { | if (!eop) { | ||||
nextp = i + 1; | nextp = i + 1; | ||||
if (nextp == que->num_desc) | if (nextp == que->num_rx_desc) | ||||
nextp = 0; | nextp = 0; | ||||
nbuf = &rxr->buffers[nextp]; | nbuf = &rxr->buffers[nextp]; | ||||
prefetch(nbuf); | prefetch(nbuf); | ||||
} | } | ||||
/* | /* | ||||
** The header mbuf is ONLY used when header | ** The header mbuf is ONLY used when header | ||||
** split is enabled, otherwise we get normal | ** split is enabled, otherwise we get normal | ||||
▲ Show 20 Lines • Show All 105 Lines • ▼ Show 20 Lines | #else | ||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | ||||
#endif | #endif | ||||
} | } | ||||
next_desc: | next_desc: | ||||
bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, | bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, | ||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | ||||
/* Advance our pointers to the next descriptor. */ | /* Advance our pointers to the next descriptor. */ | ||||
if (++i == que->num_desc) | if (++i == que->num_rx_desc) | ||||
i = 0; | i = 0; | ||||
/* Now send to the stack or do LRO */ | /* Now send to the stack or do LRO */ | ||||
if (sendmp != NULL) { | if (sendmp != NULL) { | ||||
rxr->next_check = i; | rxr->next_check = i; | ||||
IXL_RX_UNLOCK(rxr); | IXL_RX_UNLOCK(rxr); | ||||
ixl_rx_input(rxr, ifp, sendmp, ptype); | ixl_rx_input(rxr, ifp, sendmp, ptype); | ||||
IXL_RX_LOCK(rxr); | IXL_RX_LOCK(rxr); | ||||
/* | |||||
* Update index used in loop in case another | |||||
* ixl_rxeof() call executes when lock is released | |||||
*/ | |||||
i = rxr->next_check; | i = rxr->next_check; | ||||
} | } | ||||
/* Every 8 descriptors we go to refresh mbufs */ | /* Every 8 descriptors we go to refresh mbufs */ | ||||
if (processed == 8) { | if (processed == 8) { | ||||
ixl_refresh_mbufs(que, i); | ixl_refresh_mbufs(que, i); | ||||
processed = 0; | processed = 0; | ||||
} | } | ||||
} | } | ||||
/* Refresh any remaining buf structs */ | /* Refresh any remaining buf structs */ | ||||
if (ixl_rx_unrefreshed(que)) | if (ixl_rx_unrefreshed(que)) | ||||
▲ Show 20 Lines • Show All 100 Lines • ▼ Show 20 Lines | ixl_get_counter(if_t ifp, ift_counter cnt) | ||||
case IFCOUNTER_NOPROTO: | case IFCOUNTER_NOPROTO: | ||||
return (vsi->noproto); | return (vsi->noproto); | ||||
default: | default: | ||||
return (if_get_counter_default(ifp, cnt)); | return (if_get_counter_default(ifp, cnt)); | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
/* | |||||
* Set TX and RX ring size adjusting value to supported range | |||||
*/ | |||||
void | |||||
ixl_vsi_setup_rings_size(struct ixl_vsi * vsi, int tx_ring_size, int rx_ring_size) | |||||
{ | |||||
struct device * dev = vsi->dev; | |||||
if (tx_ring_size < IXL_MIN_RING | |||||
|| tx_ring_size > IXL_MAX_RING | |||||
|| tx_ring_size % IXL_RING_INCREMENT != 0) { | |||||
device_printf(dev, "Invalid tx_ring_size value of %d set!\n", | |||||
tx_ring_size); | |||||
device_printf(dev, "tx_ring_size must be between %d and %d, " | |||||
"inclusive, and must be a multiple of %d\n", | |||||
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); | |||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_DEFAULT_RING); | |||||
vsi->num_tx_desc = IXL_DEFAULT_RING; | |||||
} else | |||||
vsi->num_tx_desc = tx_ring_size; | |||||
if (rx_ring_size < IXL_MIN_RING | |||||
|| rx_ring_size > IXL_MAX_RING | |||||
|| rx_ring_size % IXL_RING_INCREMENT != 0) { | |||||
device_printf(dev, "Invalid rx_ring_size value of %d set!\n", | |||||
rx_ring_size); | |||||
device_printf(dev, "rx_ring_size must be between %d and %d, " | |||||
"inclusive, and must be a multiple of %d\n", | |||||
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); | |||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_DEFAULT_RING); | |||||
vsi->num_rx_desc = IXL_DEFAULT_RING; | |||||
} else | |||||
vsi->num_rx_desc = rx_ring_size; | |||||
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", | |||||
vsi->num_tx_desc, vsi->num_rx_desc); | |||||
} |