Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ena/ena.c
Show First 20 Lines • Show All 221 Lines • ▼ Show 20 Lines | ena_dma_alloc(device_t dmadev, bus_size_t size, | ||||
struct ena_adapter* adapter = device_get_softc(dmadev); | struct ena_adapter* adapter = device_get_softc(dmadev); | ||||
uint32_t maxsize; | uint32_t maxsize; | ||||
uint64_t dma_space_addr; | uint64_t dma_space_addr; | ||||
int error; | int error; | ||||
maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; | maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; | ||||
dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); | dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); | ||||
if (dma_space_addr == 0) | if (unlikely(dma_space_addr == 0)) | ||||
dma_space_addr = BUS_SPACE_MAXADDR; | dma_space_addr = BUS_SPACE_MAXADDR; | ||||
error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ | error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ | ||||
8, 0, /* alignment, bounds */ | 8, 0, /* alignment, bounds */ | ||||
dma_space_addr, /* lowaddr of exclusion window */ | dma_space_addr, /* lowaddr of exclusion window */ | ||||
BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ | BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
maxsize, /* maxsize */ | maxsize, /* maxsize */ | ||||
1, /* nsegments */ | 1, /* nsegments */ | ||||
maxsize, /* maxsegsize */ | maxsize, /* maxsegsize */ | ||||
BUS_DMA_ALLOCNOW, /* flags */ | BUS_DMA_ALLOCNOW, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockarg */ | NULL, /* lockarg */ | ||||
&dma->tag); | &dma->tag); | ||||
if (error != 0) { | if (unlikely(error != 0)) { | ||||
device_printf(dmadev, "%s: bus_dma_tag_create failed: %d\n", | device_printf(dmadev, "%s: bus_dma_tag_create failed: %d\n", | ||||
__func__, error); | __func__, error); | ||||
goto fail_tag; | goto fail_tag; | ||||
} | } | ||||
error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, | error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, | ||||
BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); | ||||
if (error != 0) { | if (unlikely(error != 0)) { | ||||
device_printf(dmadev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", | device_printf(dmadev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", | ||||
__func__, (uintmax_t)size, error); | __func__, (uintmax_t)size, error); | ||||
goto fail_map_create; | goto fail_map_create; | ||||
} | } | ||||
dma->paddr = 0; | dma->paddr = 0; | ||||
error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, | error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, | ||||
size, ena_dmamap_callback, &dma->paddr, mapflags); | size, ena_dmamap_callback, &dma->paddr, mapflags); | ||||
if ((error != 0) || (dma->paddr == 0)) { | if (unlikely((error != 0) || (dma->paddr == 0))) { | ||||
device_printf(dmadev, "%s: bus_dmamap_load failed: %d\n", | device_printf(dmadev, "%s: bus_dmamap_load failed: %d\n", | ||||
__func__, error); | __func__, error); | ||||
goto fail_map_load; | goto fail_map_load; | ||||
} | } | ||||
return (0); | return (0); | ||||
fail_map_load: | fail_map_load: | ||||
Show All 11 Lines | |||||
{ | { | ||||
device_t pdev = adapter->pdev; | device_t pdev = adapter->pdev; | ||||
int rid; | int rid; | ||||
rid = PCIR_BAR(ENA_REG_BAR); | rid = PCIR_BAR(ENA_REG_BAR); | ||||
adapter->memory = NULL; | adapter->memory = NULL; | ||||
adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, | adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, | ||||
&rid, RF_ACTIVE); | &rid, RF_ACTIVE); | ||||
if (adapter->registers == NULL) { | if (unlikely(adapter->registers == NULL)) { | ||||
device_printf(pdev, "Unable to allocate bus resource: " | device_printf(pdev, "Unable to allocate bus resource: " | ||||
"registers\n"); | "registers\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
ena_change_mtu(if_t ifp, int new_mtu) | ena_change_mtu(if_t ifp, int new_mtu) | ||||
{ | { | ||||
struct ena_adapter *adapter = if_getsoftc(ifp); | struct ena_adapter *adapter = if_getsoftc(ifp); | ||||
struct ena_com_dev_get_features_ctx get_feat_ctx; | struct ena_com_dev_get_features_ctx get_feat_ctx; | ||||
int rc, old_mtu, max_frame; | int rc, old_mtu, max_frame; | ||||
rc = ena_com_get_dev_attr_feat(adapter->ena_dev, &get_feat_ctx); | rc = ena_com_get_dev_attr_feat(adapter->ena_dev, &get_feat_ctx); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Cannot get attribute for ena device\n"); | "Cannot get attribute for ena device\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
/* Save old MTU in case of fail */ | /* Save old MTU in case of fail */ | ||||
old_mtu = if_getmtu(ifp); | old_mtu = if_getmtu(ifp); | ||||
/* Change MTU and calculate max frame */ | /* Change MTU and calculate max frame */ | ||||
if_setmtu(ifp, new_mtu); | if_setmtu(ifp, new_mtu); | ||||
max_frame = ETHER_MAX_FRAME(ifp, ETHERTYPE_VLAN, 1); | max_frame = ETHER_MAX_FRAME(ifp, ETHERTYPE_VLAN, 1); | ||||
if ((new_mtu < ENA_MIN_FRAME_LEN) || | if (unlikely((new_mtu < ENA_MIN_FRAME_LEN) || | ||||
(new_mtu > get_feat_ctx.dev_attr.max_mtu) || | (new_mtu > get_feat_ctx.dev_attr.max_mtu) || | ||||
(max_frame > ENA_MAX_FRAME_LEN)) { | (max_frame > ENA_MAX_FRAME_LEN))) { | ||||
device_printf(adapter->pdev, "Invalid MTU setting. " | device_printf(adapter->pdev, "Invalid MTU setting. " | ||||
"new_mtu: %d\n", new_mtu); | "new_mtu: %d\n", new_mtu); | ||||
goto error; | goto error; | ||||
} | } | ||||
rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); | rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); | ||||
if (rc != 0) | if (rc != 0) | ||||
goto error; | goto error; | ||||
▲ Show 20 Lines • Show All 65 Lines • ▼ Show 20 Lines | for (i = 0; i < adapter->num_queues; i++) { | ||||
txr->tx_max_header_size = ena_dev->tx_max_header_size; | txr->tx_max_header_size = ena_dev->tx_max_header_size; | ||||
txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; | ||||
txr->smoothed_interval = | txr->smoothed_interval = | ||||
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); | ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); | ||||
/* Allocate a buf ring */ | /* Allocate a buf ring */ | ||||
txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF, | txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF, | ||||
M_WAITOK, &txr->ring_mtx); | M_WAITOK, &txr->ring_mtx); | ||||
if (txr->br == NULL) { | if (unlikely(txr->br == NULL)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Error while setting up bufring\n"); | "Error while setting up bufring\n"); | ||||
rc = ENOMEM; | rc = ENOMEM; | ||||
goto err_que_free; | goto err_que_free; | ||||
} | } | ||||
/* Alloc TX statistics. */ | /* Alloc TX statistics. */ | ||||
ena_alloc_counters((counter_u64_t *)&txr->tx_stats, | ena_alloc_counters((counter_u64_t *)&txr->tx_stats, | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), | ||||
ENA_TSO_MAXSIZE, /* maxsize */ | ENA_TSO_MAXSIZE, /* maxsize */ | ||||
adapter->max_tx_sgl_size - 1, /* nsegments */ | adapter->max_tx_sgl_size - 1, /* nsegments */ | ||||
ENA_TSO_MAXSIZE, /* maxsegsize */ | ENA_TSO_MAXSIZE, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&adapter->tx_buf_tag); | &adapter->tx_buf_tag); | ||||
if (ret != 0) | if (unlikely(ret != 0)) | ||||
device_printf(adapter->pdev, "Unable to create Tx DMA tag\n"); | device_printf(adapter->pdev, "Unable to create Tx DMA tag\n"); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
static int | static int | ||||
ena_free_tx_dma_tag(struct ena_adapter *adapter) | ena_free_tx_dma_tag(struct ena_adapter *adapter) | ||||
{ | { | ||||
int ret; | int ret; | ||||
ret = bus_dma_tag_destroy(adapter->tx_buf_tag); | ret = bus_dma_tag_destroy(adapter->tx_buf_tag); | ||||
if (ret == 0) | if (likely(ret == 0)) | ||||
adapter->tx_buf_tag = NULL; | adapter->tx_buf_tag = NULL; | ||||
return (ret); | return (ret); | ||||
} | } | ||||
static int | static int | ||||
ena_setup_rx_dma_tag(struct ena_adapter *adapter) | ena_setup_rx_dma_tag(struct ena_adapter *adapter) | ||||
{ | { | ||||
int ret; | int ret; | ||||
/* Create DMA tag for Rx buffers*/ | /* Create DMA tag for Rx buffers*/ | ||||
ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ | ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ | ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ | ||||
BUS_SPACE_MAXADDR, /* highaddr of excl window */ | BUS_SPACE_MAXADDR, /* highaddr of excl window */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
MJUM16BYTES, /* maxsize */ | MJUM16BYTES, /* maxsize */ | ||||
1, /* nsegments */ | 1, /* nsegments */ | ||||
MJUM16BYTES, /* maxsegsize */ | MJUM16BYTES, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockarg */ | NULL, /* lockarg */ | ||||
&adapter->rx_buf_tag); | &adapter->rx_buf_tag); | ||||
if (ret != 0) | if (unlikely(ret != 0)) | ||||
device_printf(adapter->pdev, "Unable to create Rx DMA tag\n"); | device_printf(adapter->pdev, "Unable to create Rx DMA tag\n"); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
static int | static int | ||||
ena_free_rx_dma_tag(struct ena_adapter *adapter) | ena_free_rx_dma_tag(struct ena_adapter *adapter) | ||||
{ | { | ||||
int ret; | int ret; | ||||
ret = bus_dma_tag_destroy(adapter->rx_buf_tag); | ret = bus_dma_tag_destroy(adapter->rx_buf_tag); | ||||
if (ret == 0) | if (likely(ret == 0)) | ||||
adapter->rx_buf_tag = NULL; | adapter->rx_buf_tag = NULL; | ||||
return (ret); | return (ret); | ||||
} | } | ||||
/** | /** | ||||
* ena_setup_tx_resources - allocate Tx resources (Descriptors) | * ena_setup_tx_resources - allocate Tx resources (Descriptors) | ||||
* @adapter: network interface device structure | * @adapter: network interface device structure | ||||
Show All 9 Lines | ena_setup_tx_resources(struct ena_adapter *adapter, int qid) | ||||
int size, i, err; | int size, i, err; | ||||
#ifdef RSS | #ifdef RSS | ||||
cpuset_t cpu_mask; | cpuset_t cpu_mask; | ||||
#endif | #endif | ||||
size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; | size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; | ||||
tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); | tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (tx_ring->tx_buffer_info == NULL) | if (unlikely(tx_ring->tx_buffer_info == NULL)) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
size = sizeof(uint16_t) * tx_ring->ring_size; | size = sizeof(uint16_t) * tx_ring->ring_size; | ||||
tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); | tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (tx_ring->free_tx_ids == NULL) | if (unlikely(tx_ring->free_tx_ids == NULL)) | ||||
goto err_buf_info_free; | goto err_buf_info_free; | ||||
/* Req id stack for TX OOO completions */ | /* Req id stack for TX OOO completions */ | ||||
for (i = 0; i < tx_ring->ring_size; i++) | for (i = 0; i < tx_ring->ring_size; i++) | ||||
tx_ring->free_tx_ids[i] = i; | tx_ring->free_tx_ids[i] = i; | ||||
/* Reset TX statistics. */ | /* Reset TX statistics. */ | ||||
ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, | ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, | ||||
sizeof(tx_ring->tx_stats)); | sizeof(tx_ring->tx_stats)); | ||||
tx_ring->next_to_use = 0; | tx_ring->next_to_use = 0; | ||||
tx_ring->next_to_clean = 0; | tx_ring->next_to_clean = 0; | ||||
/* Make sure that drbr is empty */ | /* Make sure that drbr is empty */ | ||||
ENA_RING_MTX_LOCK(tx_ring); | ENA_RING_MTX_LOCK(tx_ring); | ||||
drbr_flush(adapter->ifp, tx_ring->br); | drbr_flush(adapter->ifp, tx_ring->br); | ||||
ENA_RING_MTX_UNLOCK(tx_ring); | ENA_RING_MTX_UNLOCK(tx_ring); | ||||
/* ... and create the buffer DMA maps */ | /* ... and create the buffer DMA maps */ | ||||
for (i = 0; i < tx_ring->ring_size; i++) { | for (i = 0; i < tx_ring->ring_size; i++) { | ||||
err = bus_dmamap_create(adapter->tx_buf_tag, 0, | err = bus_dmamap_create(adapter->tx_buf_tag, 0, | ||||
&tx_ring->tx_buffer_info[i].map); | &tx_ring->tx_buffer_info[i].map); | ||||
if (err != 0) { | if (unlikely(err != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Unable to create Tx DMA map for buffer %d\n", i); | "Unable to create Tx DMA map for buffer %d\n", i); | ||||
goto err_buf_info_unmap; | goto err_buf_info_unmap; | ||||
} | } | ||||
} | } | ||||
/* Allocate taskqueues */ | /* Allocate taskqueues */ | ||||
TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); | TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); | ||||
tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, | tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, | ||||
taskqueue_thread_enqueue, &tx_ring->enqueue_tq); | taskqueue_thread_enqueue, &tx_ring->enqueue_tq); | ||||
if (tx_ring->enqueue_tq == NULL) { | if (unlikely(tx_ring->enqueue_tq == NULL)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Unable to create taskqueue for enqueue task\n"); | "Unable to create taskqueue for enqueue task\n"); | ||||
i = tx_ring->ring_size; | i = tx_ring->ring_size; | ||||
goto err_buf_info_unmap; | goto err_buf_info_unmap; | ||||
} | } | ||||
/* RSS set cpu for thread */ | /* RSS set cpu for thread */ | ||||
#ifdef RSS | #ifdef RSS | ||||
▲ Show 20 Lines • Show All 192 Lines • ▼ Show 20 Lines | if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { | ||||
} | } | ||||
} | } | ||||
/* Allocate taskqueues */ | /* Allocate taskqueues */ | ||||
TASK_INIT(&rx_ring->cmpl_task, 0, ena_deferred_rx_cleanup, rx_ring); | TASK_INIT(&rx_ring->cmpl_task, 0, ena_deferred_rx_cleanup, rx_ring); | ||||
rx_ring->cmpl_tq = taskqueue_create_fast("ena RX completion", M_NOWAIT, | rx_ring->cmpl_tq = taskqueue_create_fast("ena RX completion", M_NOWAIT, | ||||
taskqueue_thread_enqueue, &rx_ring->cmpl_tq); | taskqueue_thread_enqueue, &rx_ring->cmpl_tq); | ||||
if (rx_ring->cmpl_tq == NULL) { | if (unlikely(rx_ring->cmpl_tq == NULL)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Unable to create taskqueue for cmpl task\n"); | "Unable to create taskqueue for cmpl task\n"); | ||||
i = rx_ring->ring_size; | i = rx_ring->ring_size; | ||||
goto err_lro_free; | goto err_lro_free; | ||||
} | } | ||||
/* RSS set cpu for thread */ | /* RSS set cpu for thread */ | ||||
#ifdef RSS | #ifdef RSS | ||||
▲ Show 20 Lines • Show All 113 Lines • ▼ Show 20 Lines | |||||
ena_alloc_rx_mbuf(struct ena_adapter *adapter, | ena_alloc_rx_mbuf(struct ena_adapter *adapter, | ||||
struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) | struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) | ||||
{ | { | ||||
struct ena_com_buf *ena_buf; | struct ena_com_buf *ena_buf; | ||||
bus_dma_segment_t segs[1]; | bus_dma_segment_t segs[1]; | ||||
int nsegs, error; | int nsegs, error; | ||||
/* if previous allocated frag is not used */ | /* if previous allocated frag is not used */ | ||||
if (rx_info->mbuf != NULL) | if (unlikely(rx_info->mbuf != NULL)) | ||||
return (0); | return (0); | ||||
/* Get mbuf using UMA allocator */ | /* Get mbuf using UMA allocator */ | ||||
rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); | rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); | ||||
if (rx_info->mbuf == NULL) { | if (unlikely(rx_info->mbuf == NULL)) { | ||||
counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); | counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* Set mbuf length*/ | /* Set mbuf length*/ | ||||
rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = MJUM16BYTES; | rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = MJUM16BYTES; | ||||
/* Map packets for DMA */ | /* Map packets for DMA */ | ||||
ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, | ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, | ||||
"Using tag %p for buffers' DMA mapping, mbuf %p len: %d", | "Using tag %p for buffers' DMA mapping, mbuf %p len: %d", | ||||
adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); | adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); | ||||
error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, | error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, | ||||
rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); | rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); | ||||
if ((error != 0) || (nsegs != 1)) { | if (unlikely((error != 0) || (nsegs != 1))) { | ||||
device_printf(adapter->pdev, "failed to map mbuf, error: %d, " | device_printf(adapter->pdev, "failed to map mbuf, error: %d, " | ||||
"nsegs: %d\n", error, nsegs); | "nsegs: %d\n", error, nsegs); | ||||
counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); | counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); | ||||
goto exit; | goto exit; | ||||
} | } | ||||
bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); | bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | for (i = 0; i < num; i++) { | ||||
req_id = rx_ring->free_rx_ids[next_to_use]; | req_id = rx_ring->free_rx_ids[next_to_use]; | ||||
rc = validate_rx_req_id(rx_ring, req_id); | rc = validate_rx_req_id(rx_ring, req_id); | ||||
if (unlikely(rc != 0)) | if (unlikely(rc != 0)) | ||||
break; | break; | ||||
rx_info = &rx_ring->rx_buffer_info[req_id]; | rx_info = &rx_ring->rx_buffer_info[req_id]; | ||||
rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); | rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"failed to alloc buffer for rx queue\n"); | "failed to alloc buffer for rx queue\n"); | ||||
break; | break; | ||||
} | } | ||||
rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, | rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, | ||||
&rx_info->ena_buf, req_id); | &rx_info->ena_buf, req_id); | ||||
if (unlikely(rc != 0)) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"failed to add buffer for rx queue %d\n", | "failed to add buffer for rx queue %d\n", | ||||
rx_ring->qid); | rx_ring->qid); | ||||
break; | break; | ||||
} | } | ||||
next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, | next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, | ||||
rx_ring->ring_size); | rx_ring->ring_size); | ||||
} | } | ||||
if (i < num) { | if (unlikely(i < num)) { | ||||
counter_u64_add(rx_ring->rx_stats.refil_partial, 1); | counter_u64_add(rx_ring->rx_stats.refil_partial, 1); | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"refilled rx queue %d with %d pages only\n", | "refilled rx queue %d with %d pages only\n", | ||||
rx_ring->qid, i); | rx_ring->qid, i); | ||||
} | } | ||||
if (i != 0) { | if (likely(i != 0)) { | ||||
wmb(); | wmb(); | ||||
ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); | ||||
} | } | ||||
rx_ring->next_to_use = next_to_use; | rx_ring->next_to_use = next_to_use; | ||||
return (i); | return (i); | ||||
} | } | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 167 Lines • ▼ Show 20 Lines | for (i = 0; i < adapter->num_queues; i++) { | ||||
msix_vector = ENA_IO_IRQ_IDX(i); | msix_vector = ENA_IO_IRQ_IDX(i); | ||||
ena_qid = ENA_IO_RXQ_IDX(i); | ena_qid = ENA_IO_RXQ_IDX(i); | ||||
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | ||||
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | ||||
ctx.queue_size = adapter->rx_ring_size; | ctx.queue_size = adapter->rx_ring_size; | ||||
ctx.msix_vector = msix_vector; | ctx.msix_vector = msix_vector; | ||||
ctx.qid = ena_qid; | ctx.qid = ena_qid; | ||||
rc = ena_com_create_io_queue(ena_dev, &ctx); | rc = ena_com_create_io_queue(ena_dev, &ctx); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Failed to create io RX queue[%d] rc: %d\n", i, rc); | "Failed to create io RX queue[%d] rc: %d\n", i, rc); | ||||
goto err_rx; | goto err_rx; | ||||
} | } | ||||
ring = &adapter->rx_ring[i]; | ring = &adapter->rx_ring[i]; | ||||
rc = ena_com_get_io_handlers(ena_dev, ena_qid, | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | ||||
&ring->ena_com_io_sq, | &ring->ena_com_io_sq, | ||||
&ring->ena_com_io_cq); | &ring->ena_com_io_cq); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Failed to get RX queue handlers. RX queue num" | "Failed to get RX queue handlers. RX queue num" | ||||
" %d rc: %d\n", i, rc); | " %d rc: %d\n", i, rc); | ||||
ena_com_destroy_io_queue(ena_dev, ena_qid); | ena_com_destroy_io_queue(ena_dev, ena_qid); | ||||
goto err_rx; | goto err_rx; | ||||
} | } | ||||
} | } | ||||
Show All 40 Lines | ena_tx_cleanup(struct ena_ring *tx_ring) | ||||
io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; | io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; | ||||
next_to_clean = tx_ring->next_to_clean; | next_to_clean = tx_ring->next_to_clean; | ||||
do { | do { | ||||
struct ena_tx_buffer *tx_info; | struct ena_tx_buffer *tx_info; | ||||
struct mbuf *mbuf; | struct mbuf *mbuf; | ||||
rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); | rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
break; | break; | ||||
rc = validate_tx_req_id(tx_ring, req_id); | rc = validate_tx_req_id(tx_ring, req_id); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
break; | break; | ||||
tx_info = &tx_ring->tx_buffer_info[req_id]; | tx_info = &tx_ring->tx_buffer_info[req_id]; | ||||
mbuf = tx_info->mbuf; | mbuf = tx_info->mbuf; | ||||
tx_info->mbuf = NULL; | tx_info->mbuf = NULL; | ||||
bintime_clear(&tx_info->timestamp); | bintime_clear(&tx_info->timestamp); | ||||
if (tx_info->num_of_bufs != 0) { | if (likely(tx_info->num_of_bufs != 0)) { | ||||
/* Map is no longer required */ | /* Map is no longer required */ | ||||
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map); | bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map); | ||||
} | } | ||||
rlibby: Is this one right? | |||||
mk_semihalf.comAuthorUnsubmitted Not Done Inline ActionsYes, if it is true, it means that DMA mapping of the mbuf was successfull (we can predict that it will be almost always true). mk_semihalf.com: Yes, if it is true, it means that DMA mapping of the mbuf was successfull (we can predict that… | |||||
m_freem(mbuf); | m_freem(mbuf); | ||||
total_done += tx_info->tx_descs; | total_done += tx_info->tx_descs; | ||||
tx_ring->free_tx_ids[next_to_clean] = req_id; | tx_ring->free_tx_ids[next_to_clean] = req_id; | ||||
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, | ||||
tx_ring->ring_size); | tx_ring->ring_size); | ||||
if (--commit == 0) { | if (unlikely(--commit == 0)) { | ||||
commit = TX_COMMIT; | commit = TX_COMMIT; | ||||
/* update ring state every TX_COMMIT descriptor */ | /* update ring state every TX_COMMIT descriptor */ | ||||
tx_ring->next_to_clean = next_to_clean; | tx_ring->next_to_clean = next_to_clean; | ||||
ena_com_comp_ack( | ena_com_comp_ack( | ||||
&adapter->ena_dev->io_sq_queues[ena_qid], | &adapter->ena_dev->io_sq_queues[ena_qid], | ||||
total_done); | total_done); | ||||
ena_com_update_dev_comp_head(io_cq); | ena_com_update_dev_comp_head(io_cq); | ||||
total_done = 0; | total_done = 0; | ||||
} | } | ||||
} while (--budget); | } while (likely(--budget)); | ||||
rlibbyUnsubmitted Not Done Inline ActionsThis seems just to be a loop counter... Is there any point to predicting its value? rlibby: This seems just to be a loop counter... Is there any point to predicting its value? | |||||
mk_semihalf.comAuthorUnsubmitted Not Done Inline ActionsThis code is in hot path and the counter in the loop was added only to reduce latency on the RX. If TX cleanup would work in separate thread, there should be while(1). When the traffic will not be very high, we will rather call break inside the loop than hit budget == 0. mk_semihalf.com: This code is in hot path and the counter in the loop was added only to reduce latency on the RX. | |||||
work_done = TX_BUDGET - budget; | work_done = TX_BUDGET - budget; | ||||
/* If there is still something to commit update ring state */ | /* If there is still something to commit update ring state */ | ||||
if (commit != TX_COMMIT) { | if (likely(commit != TX_COMMIT)) { | ||||
tx_ring->next_to_clean = next_to_clean; | tx_ring->next_to_clean = next_to_clean; | ||||
ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], | ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], | ||||
total_done); | total_done); | ||||
ena_com_update_dev_comp_head(io_cq); | ena_com_update_dev_comp_head(io_cq); | ||||
} | } | ||||
taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); | taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); | ||||
return (work_done); | return (work_done); | ||||
} | } | ||||
static void | static void | ||||
ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, | ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, | ||||
struct mbuf *mbuf) | struct mbuf *mbuf) | ||||
{ | { | ||||
struct ena_adapter *adapter = rx_ring->adapter; | struct ena_adapter *adapter = rx_ring->adapter; | ||||
if (adapter->rss_support == true) { | if (likely(adapter->rss_support == true)) { | ||||
mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; | mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; | ||||
if ((ena_rx_ctx->frag == true) && | if ((ena_rx_ctx->frag == true) && | ||||
(ena_rx_ctx->l3_proto != ENA_ETH_IO_L4_PROTO_UNKNOWN)) { | (ena_rx_ctx->l3_proto != ENA_ETH_IO_L4_PROTO_UNKNOWN)) { | ||||
M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); | M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); | ||||
return; | return; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 107 Lines • ▼ Show 20 Lines | if (unlikely(rx_info->mbuf == NULL)) { | ||||
* The next_to_clean pointer will not be updated in case | * The next_to_clean pointer will not be updated in case | ||||
* of an error, so caller should advance it manually | * of an error, so caller should advance it manually | ||||
* in error handling routine to keep it up to date | * in error handling routine to keep it up to date | ||||
* with hw ring. | * with hw ring. | ||||
*/ | */ | ||||
goto err_mbuf_null; | goto err_mbuf_null; | ||||
} | } | ||||
if (m_append(mbuf, len, rx_info->mbuf->m_data) == 0) { | if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { | ||||
counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); | counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); | ||||
ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p", | ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p", | ||||
mbuf); | mbuf); | ||||
} | } | ||||
/* Free already appended mbuf, it won't be useful anymore */ | /* Free already appended mbuf, it won't be useful anymore */ | ||||
bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); | bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); | ||||
m_freem(rx_info->mbuf); | m_freem(rx_info->mbuf); | ||||
rx_info->mbuf = NULL; | rx_info->mbuf = NULL; | ||||
Show All 16 Lines | |||||
* ena_rx_checksum - indicate in mbuf if hw indicated a good cksum | * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum | ||||
**/ | **/ | ||||
static inline void | static inline void | ||||
ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, | ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, | ||||
struct mbuf *mbuf) | struct mbuf *mbuf) | ||||
{ | { | ||||
/* if IP and error */ | /* if IP and error */ | ||||
if ((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && | if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && | ||||
(ena_rx_ctx->l3_csum_err == true)) { | (ena_rx_ctx->l3_csum_err == true))) { | ||||
/* ipv4 checksum error */ | /* ipv4 checksum error */ | ||||
mbuf->m_pkthdr.csum_flags = 0; | mbuf->m_pkthdr.csum_flags = 0; | ||||
counter_u64_add(rx_ring->rx_stats.bad_csum, 1); | counter_u64_add(rx_ring->rx_stats.bad_csum, 1); | ||||
return; | return; | ||||
} | } | ||||
/* if TCP/UDP */ | /* if TCP/UDP */ | ||||
if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | ||||
Show All 15 Lines | ena_deferred_rx_cleanup(void *arg, int pending) | ||||
struct ena_ring *rx_ring = arg; | struct ena_ring *rx_ring = arg; | ||||
int budget = CLEAN_BUDGET; | int budget = CLEAN_BUDGET; | ||||
ENA_RING_MTX_LOCK(rx_ring); | ENA_RING_MTX_LOCK(rx_ring); | ||||
/* | /* | ||||
* If deferred task was executed, perform cleanup of all awaiting | * If deferred task was executed, perform cleanup of all awaiting | ||||
* descs (or until given budget is depleted to avoid infinite loop). | * descs (or until given budget is depleted to avoid infinite loop). | ||||
*/ | */ | ||||
while (budget--) { | while (likely(budget--)) { | ||||
if (ena_rx_cleanup(rx_ring) == 0) | if (ena_rx_cleanup(rx_ring) == 0) | ||||
break; | break; | ||||
} | } | ||||
ENA_RING_MTX_UNLOCK(rx_ring); | ENA_RING_MTX_UNLOCK(rx_ring); | ||||
} | } | ||||
/** | /** | ||||
* ena_rx_cleanup - handle rx irq | * ena_rx_cleanup - handle rx irq | ||||
▲ Show 20 Lines • Show All 148 Lines • ▼ Show 20 Lines | ena_handle_msix(void *arg) | ||||
if_t ifp = adapter->ifp; | if_t ifp = adapter->ifp; | ||||
struct ena_ring *tx_ring; | struct ena_ring *tx_ring; | ||||
struct ena_ring *rx_ring; | struct ena_ring *rx_ring; | ||||
struct ena_com_io_cq* io_cq; | struct ena_com_io_cq* io_cq; | ||||
struct ena_eth_io_intr_reg intr_reg; | struct ena_eth_io_intr_reg intr_reg; | ||||
int qid, ena_qid; | int qid, ena_qid; | ||||
int txc, rxc, i; | int txc, rxc, i; | ||||
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) | ||||
return; | return; | ||||
ena_trace(ENA_DBG, "MSI-X TX/RX routine"); | ena_trace(ENA_DBG, "MSI-X TX/RX routine"); | ||||
tx_ring = que->tx_ring; | tx_ring = que->tx_ring; | ||||
rx_ring = que->rx_ring; | rx_ring = que->rx_ring; | ||||
qid = que->id; | qid = que->id; | ||||
ena_qid = ENA_IO_TXQ_IDX(qid); | ena_qid = ENA_IO_TXQ_IDX(qid); | ||||
io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; | io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; | ||||
for (i = 0; i < CLEAN_BUDGET; ++i) { | for (i = 0; i < CLEAN_BUDGET; ++i) { | ||||
/* | /* | ||||
* If lock cannot be acquired, then deferred cleanup task was | * If lock cannot be acquired, then deferred cleanup task was | ||||
* being executed and rx ring is being cleaned up in | * being executed and rx ring is being cleaned up in | ||||
* another thread. | * another thread. | ||||
*/ | */ | ||||
if (ENA_RING_MTX_TRYLOCK(rx_ring) != 0) { | if (likely(ENA_RING_MTX_TRYLOCK(rx_ring) != 0)) { | ||||
rxc = ena_rx_cleanup(rx_ring); | rxc = ena_rx_cleanup(rx_ring); | ||||
ENA_RING_MTX_UNLOCK(rx_ring); | ENA_RING_MTX_UNLOCK(rx_ring); | ||||
} else { | } else { | ||||
rxc = 0; | rxc = 0; | ||||
} | } | ||||
/* Protection from calling ena_tx_cleanup from ena_start_xmit */ | /* Protection from calling ena_tx_cleanup from ena_start_xmit */ | ||||
ENA_RING_MTX_LOCK(tx_ring); | ENA_RING_MTX_LOCK(tx_ring); | ||||
txc = ena_tx_cleanup(tx_ring); | txc = ena_tx_cleanup(tx_ring); | ||||
ENA_RING_MTX_UNLOCK(tx_ring); | ENA_RING_MTX_UNLOCK(tx_ring); | ||||
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) | ||||
return; | return; | ||||
if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) | if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) | ||||
break; | break; | ||||
} | } | ||||
/* Signal that work is done and unmask interrupt */ | /* Signal that work is done and unmask interrupt */ | ||||
ena_com_update_intr_reg(&intr_reg, | ena_com_update_intr_reg(&intr_reg, | ||||
Show All 9 Lines | ena_enable_msix(struct ena_adapter *adapter) | ||||
device_t dev = adapter->pdev; | device_t dev = adapter->pdev; | ||||
int i, msix_vecs, rc = 0; | int i, msix_vecs, rc = 0; | ||||
/* Reserved the max msix vectors we might need */ | /* Reserved the max msix vectors we might need */ | ||||
msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); | msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); | ||||
adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), | adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), | ||||
M_DEVBUF, M_WAITOK | M_ZERO); | M_DEVBUF, M_WAITOK | M_ZERO); | ||||
if (adapter->msix_entries == NULL) { | if (unlikely(adapter->msix_entries == NULL)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Failed to allocate msix_entries, vectors %d\n", msix_vecs); | "Failed to allocate msix_entries, vectors %d\n", msix_vecs); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
device_printf(dev, "Allocated msix_entries, vectors (cnt: %d)\n", | device_printf(dev, "Allocated msix_entries, vectors (cnt: %d)\n", | ||||
msix_vecs); | msix_vecs); | ||||
for (i = 0; i < msix_vecs; i++) { | for (i = 0; i < msix_vecs; i++) { | ||||
adapter->msix_entries[i].entry = i; | adapter->msix_entries[i].entry = i; | ||||
/* Vectors must start from 1 */ | /* Vectors must start from 1 */ | ||||
adapter->msix_entries[i].vector = i + 1; | adapter->msix_entries[i].vector = i + 1; | ||||
} | } | ||||
rc = pci_alloc_msix(dev, &msix_vecs); | rc = pci_alloc_msix(dev, &msix_vecs); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); | "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); | ||||
rc = ENOSPC; | rc = ENOSPC; | ||||
goto err_msix_free; | goto err_msix_free; | ||||
} | } | ||||
adapter->msix_vecs = msix_vecs; | adapter->msix_vecs = msix_vecs; | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
#ifdef RSS | #ifdef RSS | ||||
adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = | adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = | ||||
rss_getcpu(i % rss_getnumbuckets()); | rss_getcpu(i % rss_getnumbuckets()); | ||||
#else | #else | ||||
/* | /* | ||||
* We still want to bind rings to the corresponding cpu | * We still want to bind rings to the corresponding cpu | ||||
* using something similar to the RSS round-robin technique. | * using something similar to the RSS round-robin technique. | ||||
*/ | */ | ||||
if (last_bind_cpu < 0) | if (unlikely(last_bind_cpu < 0)) | ||||
last_bind_cpu = CPU_FIRST(); | last_bind_cpu = CPU_FIRST(); | ||||
adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = | adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = | ||||
last_bind_cpu; | last_bind_cpu; | ||||
last_bind_cpu = CPU_NEXT(last_bind_cpu); | last_bind_cpu = CPU_NEXT(last_bind_cpu); | ||||
#endif | #endif | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
ena_request_mgmnt_irq(struct ena_adapter *adapter) | ena_request_mgmnt_irq(struct ena_adapter *adapter) | ||||
{ | { | ||||
struct ena_irq *irq; | struct ena_irq *irq; | ||||
unsigned long flags; | unsigned long flags; | ||||
int rc, rcc; | int rc, rcc; | ||||
flags = RF_ACTIVE | RF_SHAREABLE; | flags = RF_ACTIVE | RF_SHAREABLE; | ||||
irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | ||||
irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, | irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, | ||||
&irq->vector, flags); | &irq->vector, flags); | ||||
if (irq->res == NULL) { | if (unlikely(irq->res == NULL)) { | ||||
device_printf(adapter->pdev, "could not allocate " | device_printf(adapter->pdev, "could not allocate " | ||||
"irq vector: %d\n", irq->vector); | "irq vector: %d\n", irq->vector); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
rc = bus_activate_resource(adapter->pdev, SYS_RES_IRQ, | rc = bus_activate_resource(adapter->pdev, SYS_RES_IRQ, | ||||
irq->vector, irq->res); | irq->vector, irq->res); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "could not activate " | device_printf(adapter->pdev, "could not activate " | ||||
"irq vector: %d\n", irq->vector); | "irq vector: %d\n", irq->vector); | ||||
goto err_res_free; | goto err_res_free; | ||||
} | } | ||||
rc = bus_setup_intr(adapter->pdev, irq->res, | rc = bus_setup_intr(adapter->pdev, irq->res, | ||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, | INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, | ||||
irq->data, &irq->cookie); | irq->data, &irq->cookie); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "failed to register " | device_printf(adapter->pdev, "failed to register " | ||||
"interrupt handler for irq %ju: %d\n", | "interrupt handler for irq %ju: %d\n", | ||||
rman_get_start(irq->res), rc); | rman_get_start(irq->res), rc); | ||||
goto err_res_free; | goto err_res_free; | ||||
} | } | ||||
irq->requested = true; | irq->requested = true; | ||||
return (rc); | return (rc); | ||||
err_res_free: | err_res_free: | ||||
device_printf(adapter->pdev, "releasing resource for irq %d\n", | device_printf(adapter->pdev, "releasing resource for irq %d\n", | ||||
irq->vector); | irq->vector); | ||||
rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | ||||
irq->vector, irq->res); | irq->vector, irq->res); | ||||
if (rcc != 0) | if (unlikely(rcc != 0)) | ||||
device_printf(adapter->pdev, "dev has no parent while " | device_printf(adapter->pdev, "dev has no parent while " | ||||
"releasing res for irq: %d\n", irq->vector); | "releasing res for irq: %d\n", irq->vector); | ||||
irq->res = NULL; | irq->res = NULL; | ||||
return (rc); | return (rc); | ||||
} | } | ||||
static int | static int | ||||
ena_request_io_irq(struct ena_adapter *adapter) | ena_request_io_irq(struct ena_adapter *adapter) | ||||
{ | { | ||||
struct ena_irq *irq; | struct ena_irq *irq; | ||||
unsigned long flags = 0; | unsigned long flags = 0; | ||||
int rc = 0, i, rcc; | int rc = 0, i, rcc; | ||||
if (adapter->msix_enabled == 0) { | if (unlikely(adapter->msix_enabled == 0)) { | ||||
device_printf(adapter->pdev, "failed to request irq\n"); | device_printf(adapter->pdev, "failed to request irq\n"); | ||||
return (EINVAL); | return (EINVAL); | ||||
} else { | } else { | ||||
flags = RF_ACTIVE | RF_SHAREABLE; | flags = RF_ACTIVE | RF_SHAREABLE; | ||||
} | } | ||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { | for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { | ||||
irq = &adapter->irq_tbl[i]; | irq = &adapter->irq_tbl[i]; | ||||
if (irq->requested == true) | if (unlikely(irq->requested == true)) | ||||
continue; | continue; | ||||
irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, | irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, | ||||
&irq->vector, flags); | &irq->vector, flags); | ||||
if (irq->res == NULL) { | if (unlikely(irq->res == NULL)) { | ||||
device_printf(adapter->pdev, "could not allocate " | device_printf(adapter->pdev, "could not allocate " | ||||
"irq vector: %d\n", irq->vector); | "irq vector: %d\n", irq->vector); | ||||
goto err; | goto err; | ||||
} | } | ||||
rc = bus_setup_intr(adapter->pdev, irq->res, | rc = bus_setup_intr(adapter->pdev, irq->res, | ||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, | INTR_TYPE_NET | INTR_MPSAFE, NULL, | ||||
irq->handler, irq->data, &irq->cookie); | irq->handler, irq->data, &irq->cookie); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "failed to register " | device_printf(adapter->pdev, "failed to register " | ||||
"interrupt handler for irq %ju: %d\n", | "interrupt handler for irq %ju: %d\n", | ||||
rman_get_start(irq->res), rc); | rman_get_start(irq->res), rc); | ||||
goto err; | goto err; | ||||
} | } | ||||
irq->requested = true; | irq->requested = true; | ||||
#ifdef RSS | #ifdef RSS | ||||
Show All 12 Lines | err: | ||||
for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { | for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { | ||||
irq = &adapter->irq_tbl[i]; | irq = &adapter->irq_tbl[i]; | ||||
rcc = 0; | rcc = 0; | ||||
/* Once we entered err: section and irq->requested is true we | /* Once we entered err: section and irq->requested is true we | ||||
free both intr and resources */ | free both intr and resources */ | ||||
if (irq->requested == true) | if (irq->requested == true) | ||||
rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); | rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); | ||||
if (rcc != 0) | if (unlikely(rcc != 0)) | ||||
device_printf(adapter->pdev, "could not release" | device_printf(adapter->pdev, "could not release" | ||||
" irq: %d, error: %d\n", irq->vector, rcc); | " irq: %d, error: %d\n", irq->vector, rcc); | ||||
/* If we entred err: section without irq->requested set we know | /* If we entred err: section without irq->requested set we know | ||||
it was bus_alloc_resource_any() that needs cleanup, provided | it was bus_alloc_resource_any() that needs cleanup, provided | ||||
res is not NULL. In case res is NULL no work in needed in | res is not NULL. In case res is NULL no work in needed in | ||||
this iteration */ | this iteration */ | ||||
rcc = 0; | rcc = 0; | ||||
if (irq->res != NULL) { | if (irq->res != NULL) { | ||||
rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | ||||
irq->vector, irq->res); | irq->vector, irq->res); | ||||
} | } | ||||
if (rcc != 0) | if (unlikely(rcc != 0)) | ||||
device_printf(adapter->pdev, "dev has no parent while " | device_printf(adapter->pdev, "dev has no parent while " | ||||
"releasing res for irq: %d\n", irq->vector); | "releasing res for irq: %d\n", irq->vector); | ||||
irq->requested = false; | irq->requested = false; | ||||
irq->res = NULL; | irq->res = NULL; | ||||
} | } | ||||
return (rc); | return (rc); | ||||
} | } | ||||
static void | static void | ||||
ena_free_mgmnt_irq(struct ena_adapter *adapter) | ena_free_mgmnt_irq(struct ena_adapter *adapter) | ||||
{ | { | ||||
struct ena_irq *irq; | struct ena_irq *irq; | ||||
int rc; | int rc; | ||||
irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | ||||
if (irq->requested == true) { | if (irq->requested == true) { | ||||
ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", | ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", | ||||
irq->vector); | irq->vector); | ||||
rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); | rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
device_printf(adapter->pdev, "failed to tear " | device_printf(adapter->pdev, "failed to tear " | ||||
"down irq: %d\n", irq->vector); | "down irq: %d\n", irq->vector); | ||||
irq->requested = 0; | irq->requested = 0; | ||||
} | } | ||||
if (irq->res != NULL) { | if (irq->res != NULL) { | ||||
ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", | ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", | ||||
irq->vector); | irq->vector); | ||||
rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | ||||
irq->vector, irq->res); | irq->vector, irq->res); | ||||
irq->res = NULL; | irq->res = NULL; | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
device_printf(adapter->pdev, "dev has no parent while " | device_printf(adapter->pdev, "dev has no parent while " | ||||
"releasing res for irq: %d\n", irq->vector); | "releasing res for irq: %d\n", irq->vector); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
ena_free_io_irq(struct ena_adapter *adapter) | ena_free_io_irq(struct ena_adapter *adapter) | ||||
{ | { | ||||
struct ena_irq *irq; | struct ena_irq *irq; | ||||
int rc; | int rc; | ||||
for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { | for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { | ||||
irq = &adapter->irq_tbl[i]; | irq = &adapter->irq_tbl[i]; | ||||
if (irq->requested == true) { | if (irq->requested == true) { | ||||
ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", | ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", | ||||
irq->vector); | irq->vector); | ||||
rc = bus_teardown_intr(adapter->pdev, irq->res, | rc = bus_teardown_intr(adapter->pdev, irq->res, | ||||
irq->cookie); | irq->cookie); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "failed to tear " | device_printf(adapter->pdev, "failed to tear " | ||||
"down irq: %d\n", irq->vector); | "down irq: %d\n", irq->vector); | ||||
} | } | ||||
irq->requested = 0; | irq->requested = 0; | ||||
} | } | ||||
if (irq->res != NULL) { | if (irq->res != NULL) { | ||||
ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", | ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", | ||||
irq->vector); | irq->vector); | ||||
rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, | ||||
irq->vector, irq->res); | irq->vector, irq->res); | ||||
irq->res = NULL; | irq->res = NULL; | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "dev has no parent" | device_printf(adapter->pdev, "dev has no parent" | ||||
" while releasing res for irq: %d\n", | " while releasing res for irq: %d\n", | ||||
irq->vector); | irq->vector); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | ena_rss_configure(struct ena_adapter *adapter) | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
ena_up_complete(struct ena_adapter *adapter) | ena_up_complete(struct ena_adapter *adapter) | ||||
{ | { | ||||
int rc; | int rc; | ||||
if (adapter->rss_support == true) { | if (likely(adapter->rss_support == true)) { | ||||
rc = ena_rss_configure(adapter); | rc = ena_rss_configure(adapter); | ||||
if (rc != 0) | if (rc != 0) | ||||
return (rc); | return (rc); | ||||
} | } | ||||
rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); | rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
return (rc); | return (rc); | ||||
ena_refill_all_rx_bufs(adapter); | ena_refill_all_rx_bufs(adapter); | ||||
ena_reset_counters((counter_u64_t *)&adapter->hw_stats, | ena_reset_counters((counter_u64_t *)&adapter->hw_stats, | ||||
sizeof(adapter->hw_stats)); | sizeof(adapter->hw_stats)); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
ena_up(struct ena_adapter *adapter) | ena_up(struct ena_adapter *adapter) | ||||
{ | { | ||||
int rc = 0; | int rc = 0; | ||||
if (device_is_attached(adapter->pdev) == 0) { | if (unlikely(device_is_attached(adapter->pdev) == 0)) { | ||||
device_printf(adapter->pdev, "device is not attached!\n"); | device_printf(adapter->pdev, "device is not attached!\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
if (adapter->running == false) { | if (unlikely(adapter->running == false)) { | ||||
device_printf(adapter->pdev, "device is not running!\n"); | device_printf(adapter->pdev, "device is not running!\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
if (adapter->up == false) { | if (adapter->up == false) { | ||||
device_printf(adapter->pdev, "device is going UP\n"); | device_printf(adapter->pdev, "device is going UP\n"); | ||||
/* setup interrupts for IO queues */ | /* setup interrupts for IO queues */ | ||||
ena_setup_io_intr(adapter); | ena_setup_io_intr(adapter); | ||||
rc = ena_request_io_irq(adapter); | rc = ena_request_io_irq(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_ALERT, "err_req_irq"); | ena_trace(ENA_ALERT, "err_req_irq"); | ||||
goto err_req_irq; | goto err_req_irq; | ||||
} | } | ||||
/* allocate transmit descriptors */ | /* allocate transmit descriptors */ | ||||
rc = ena_setup_all_tx_resources(adapter); | rc = ena_setup_all_tx_resources(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_ALERT, "err_setup_tx"); | ena_trace(ENA_ALERT, "err_setup_tx"); | ||||
goto err_setup_tx; | goto err_setup_tx; | ||||
} | } | ||||
/* allocate receive descriptors */ | /* allocate receive descriptors */ | ||||
rc = ena_setup_all_rx_resources(adapter); | rc = ena_setup_all_rx_resources(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_ALERT, "err_setup_rx"); | ena_trace(ENA_ALERT, "err_setup_rx"); | ||||
goto err_setup_rx; | goto err_setup_rx; | ||||
} | } | ||||
/* create IO queues for Rx & Tx */ | /* create IO queues for Rx & Tx */ | ||||
rc = ena_create_io_queues(adapter); | rc = ena_create_io_queues(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_ALERT, | ena_trace(ENA_ALERT, | ||||
"create IO queues failed"); | "create IO queues failed"); | ||||
goto err_io_que; | goto err_io_que; | ||||
} | } | ||||
if (adapter->link_status == true) | if (unlikely(adapter->link_status == true)) | ||||
if_link_state_change(adapter->ifp, LINK_STATE_UP); | if_link_state_change(adapter->ifp, LINK_STATE_UP); | ||||
rc = ena_up_complete(adapter); | rc = ena_up_complete(adapter); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
goto err_up_complete; | goto err_up_complete; | ||||
counter_u64_add(adapter->dev_stats.interface_up, 1); | counter_u64_add(adapter->dev_stats.interface_up, 1); | ||||
ena_update_hwassist(adapter); | ena_update_hwassist(adapter); | ||||
if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, | if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, | ||||
IFF_DRV_OACTIVE); | IFF_DRV_OACTIVE); | ||||
▲ Show 20 Lines • Show All 253 Lines • ▼ Show 20 Lines | ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, | ||||
struct ena_com_dev_get_features_ctx *feat) | struct ena_com_dev_get_features_ctx *feat) | ||||
{ | { | ||||
if_t ifp; | if_t ifp; | ||||
int caps = 0; | int caps = 0; | ||||
ena_trace(ENA_DBG, "enter"); | ena_trace(ENA_DBG, "enter"); | ||||
ifp = adapter->ifp = if_gethandle(IFT_ETHER); | ifp = adapter->ifp = if_gethandle(IFT_ETHER); | ||||
if (ifp == NULL) { | if (unlikely(ifp == NULL)) { | ||||
device_printf(pdev, "can not allocate ifnet structure\n"); | device_printf(pdev, "can not allocate ifnet structure\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); | if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); | ||||
if_setdev(ifp, pdev); | if_setdev(ifp, pdev); | ||||
if_setsoftc(ifp, adapter); | if_setsoftc(ifp, adapter); | ||||
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); | if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | if (adapter->up == true) { | ||||
if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, | if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, | ||||
IFF_DRV_RUNNING); | IFF_DRV_RUNNING); | ||||
ena_free_io_irq(adapter); | ena_free_io_irq(adapter); | ||||
if (adapter->trigger_reset == true) { | if (adapter->trigger_reset == true) { | ||||
rc = ena_com_dev_reset(adapter->ena_dev, | rc = ena_com_dev_reset(adapter->ena_dev, | ||||
adapter->reset_reason); | adapter->reset_reason); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Device reset failed\n"); | "Device reset failed\n"); | ||||
} | } | ||||
ena_destroy_all_io_queues(adapter); | ena_destroy_all_io_queues(adapter); | ||||
ena_free_all_tx_bufs(adapter); | ena_free_all_tx_bufs(adapter); | ||||
ena_free_all_rx_bufs(adapter); | ena_free_all_rx_bufs(adapter); | ||||
▲ Show 20 Lines • Show All 107 Lines • ▼ Show 20 Lines | ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) | ||||
/* One segment must be reserved for configuration descriptor. */ | /* One segment must be reserved for configuration descriptor. */ | ||||
if (num_frags < adapter->max_tx_sgl_size) | if (num_frags < adapter->max_tx_sgl_size) | ||||
return (0); | return (0); | ||||
counter_u64_add(tx_ring->tx_stats.collapse, 1); | counter_u64_add(tx_ring->tx_stats.collapse, 1); | ||||
collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, | collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, | ||||
adapter->max_tx_sgl_size - 1); | adapter->max_tx_sgl_size - 1); | ||||
if (collapsed_mbuf == NULL) { | if (unlikely(collapsed_mbuf == NULL)) { | ||||
counter_u64_add(tx_ring->tx_stats.collapse_err, 1); | counter_u64_add(tx_ring->tx_stats.collapse_err, 1); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* If mbuf was collapsed succesfully, original mbuf is released. */ | /* If mbuf was collapsed succesfully, original mbuf is released. */ | ||||
*mbuf = collapsed_mbuf; | *mbuf = collapsed_mbuf; | ||||
return (0); | return (0); | ||||
Show All 19 Lines | ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) | ||||
int nb_hw_desc; | int nb_hw_desc; | ||||
ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); | ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); | ||||
adapter = tx_ring->que->adapter; | adapter = tx_ring->que->adapter; | ||||
ena_dev = adapter->ena_dev; | ena_dev = adapter->ena_dev; | ||||
io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; | io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; | ||||
rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); | rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_WARNING, | ena_trace(ENA_WARNING, | ||||
"Failed to collapse mbuf! err: %d", rc); | "Failed to collapse mbuf! err: %d", rc); | ||||
return (rc); | return (rc); | ||||
} | } | ||||
next_to_use = tx_ring->next_to_use; | next_to_use = tx_ring->next_to_use; | ||||
req_id = tx_ring->free_tx_ids[next_to_use]; | req_id = tx_ring->free_tx_ids[next_to_use]; | ||||
tx_info = &tx_ring->tx_buffer_info[req_id]; | tx_info = &tx_ring->tx_buffer_info[req_id]; | ||||
tx_info->mbuf = *mbuf; | tx_info->mbuf = *mbuf; | ||||
tx_info->num_of_bufs = 0; | tx_info->num_of_bufs = 0; | ||||
ena_buf = tx_info->bufs; | ena_buf = tx_info->bufs; | ||||
len = (*mbuf)->m_len; | len = (*mbuf)->m_len; | ||||
ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len); | ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len); | ||||
push_len = 0; | push_len = 0; | ||||
header_len = min_t(uint32_t, len, tx_ring->tx_max_header_size); | header_len = min_t(uint32_t, len, tx_ring->tx_max_header_size); | ||||
push_hdr = NULL; | push_hdr = NULL; | ||||
rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map, | rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map, | ||||
*mbuf, segs, &nsegs, BUS_DMA_NOWAIT); | *mbuf, segs, &nsegs, BUS_DMA_NOWAIT); | ||||
if ((rc != 0) || (nsegs == 0)) { | if (unlikely((rc != 0) || (nsegs == 0))) { | ||||
ena_trace(ENA_WARNING, | ena_trace(ENA_WARNING, | ||||
"dmamap load failed! err: %d nsegs: %d", rc, nsegs); | "dmamap load failed! err: %d nsegs: %d", rc, nsegs); | ||||
counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); | counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); | ||||
tx_info->mbuf = NULL; | tx_info->mbuf = NULL; | ||||
if (rc == ENOMEM) | if (rc == ENOMEM) | ||||
return (ENA_COM_NO_MEM); | return (ENA_COM_NO_MEM); | ||||
else | else | ||||
return (ENA_COM_INVAL); | return (ENA_COM_INVAL); | ||||
Show All 12 Lines | ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) | ||||
ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | ||||
ena_tx_ctx.req_id = req_id; | ena_tx_ctx.req_id = req_id; | ||||
ena_tx_ctx.header_len = header_len; | ena_tx_ctx.header_len = header_len; | ||||
/* Set flags and meta data */ | /* Set flags and meta data */ | ||||
ena_tx_csum(&ena_tx_ctx, *mbuf); | ena_tx_csum(&ena_tx_ctx, *mbuf); | ||||
/* Prepare the packet's descriptors and send them to device */ | /* Prepare the packet's descriptors and send them to device */ | ||||
rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); | rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_WARNING, "failed to prepare tx bufs\n"); | ena_trace(ENA_WARNING, "failed to prepare tx bufs\n"); | ||||
counter_enter(); | counter_enter(); | ||||
counter_u64_add_protected(tx_ring->tx_stats.queue_stop, 1); | counter_u64_add_protected(tx_ring->tx_stats.queue_stop, 1); | ||||
counter_u64_add_protected(tx_ring->tx_stats.prepare_ctx_err, 1); | counter_u64_add_protected(tx_ring->tx_stats.prepare_ctx_err, 1); | ||||
counter_exit(); | counter_exit(); | ||||
goto dma_error; | goto dma_error; | ||||
} | } | ||||
Show All 31 Lines | |||||
{ | { | ||||
struct mbuf *mbuf; | struct mbuf *mbuf; | ||||
struct ena_adapter *adapter = tx_ring->adapter; | struct ena_adapter *adapter = tx_ring->adapter; | ||||
struct ena_com_io_sq* io_sq; | struct ena_com_io_sq* io_sq; | ||||
int ena_qid; | int ena_qid; | ||||
int acum_pkts = 0; | int acum_pkts = 0; | ||||
int ret = 0; | int ret = 0; | ||||
if ((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0) | if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) | ||||
return; | return; | ||||
if (adapter->link_status == false) | if (unlikely(adapter->link_status == false)) | ||||
return; | return; | ||||
ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); | ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); | ||||
io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; | io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; | ||||
while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { | while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { | ||||
ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" | ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" | ||||
" header csum flags %#jx", | " header csum flags %#jx", | ||||
mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags); | mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags); | ||||
if (ena_com_sq_have_enough_space(io_sq, | if (unlikely(ena_com_sq_have_enough_space(io_sq, | ||||
ENA_TX_CLEANUP_THRESHOLD) == false) | ENA_TX_CLEANUP_THRESHOLD) == false)) | ||||
ena_tx_cleanup(tx_ring); | ena_tx_cleanup(tx_ring); | ||||
if ((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0) { | if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { | ||||
if (ret == ENA_COM_NO_MEM) { | if (ret == ENA_COM_NO_MEM) { | ||||
drbr_putback(adapter->ifp, tx_ring->br, mbuf); | drbr_putback(adapter->ifp, tx_ring->br, mbuf); | ||||
} else if (ret == ENA_COM_NO_SPACE) { | } else if (ret == ENA_COM_NO_SPACE) { | ||||
drbr_putback(adapter->ifp, tx_ring->br, mbuf); | drbr_putback(adapter->ifp, tx_ring->br, mbuf); | ||||
} else { | } else { | ||||
m_freem(mbuf); | m_freem(mbuf); | ||||
drbr_advance(adapter->ifp, tx_ring->br); | drbr_advance(adapter->ifp, tx_ring->br); | ||||
} | } | ||||
break; | break; | ||||
} | } | ||||
drbr_advance(adapter->ifp, tx_ring->br); | drbr_advance(adapter->ifp, tx_ring->br); | ||||
if ((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0) | if (unlikely((if_getdrvflags(adapter->ifp) & | ||||
IFF_DRV_RUNNING) == 0)) | |||||
return; | return; | ||||
acum_pkts++; | acum_pkts++; | ||||
BPF_MTAP(adapter->ifp, mbuf); | BPF_MTAP(adapter->ifp, mbuf); | ||||
if (acum_pkts == DB_THRESHOLD) { | if (unlikely(acum_pkts == DB_THRESHOLD)) { | ||||
acum_pkts = 0; | acum_pkts = 0; | ||||
wmb(); | wmb(); | ||||
/* Trigger the dma engine */ | /* Trigger the dma engine */ | ||||
ena_com_write_sq_doorbell(io_sq); | ena_com_write_sq_doorbell(io_sq); | ||||
counter_u64_add(tx_ring->tx_stats.doorbells, 1); | counter_u64_add(tx_ring->tx_stats.doorbells, 1); | ||||
} | } | ||||
} | } | ||||
if (acum_pkts != 0) { | if (likely(acum_pkts != 0)) { | ||||
wmb(); | wmb(); | ||||
/* Trigger the dma engine */ | /* Trigger the dma engine */ | ||||
ena_com_write_sq_doorbell(io_sq); | ena_com_write_sq_doorbell(io_sq); | ||||
counter_u64_add(tx_ring->tx_stats.doorbells, 1); | counter_u64_add(tx_ring->tx_stats.doorbells, 1); | ||||
} | } | ||||
if (ena_com_sq_have_enough_space(io_sq, | if (ena_com_sq_have_enough_space(io_sq, | ||||
ENA_TX_CLEANUP_THRESHOLD) == false) | ENA_TX_CLEANUP_THRESHOLD) == false) | ||||
Show All 17 Lines | |||||
static int | static int | ||||
ena_mq_start(if_t ifp, struct mbuf *m) | ena_mq_start(if_t ifp, struct mbuf *m) | ||||
{ | { | ||||
struct ena_adapter *adapter = ifp->if_softc; | struct ena_adapter *adapter = ifp->if_softc; | ||||
struct ena_ring *tx_ring; | struct ena_ring *tx_ring; | ||||
int ret, is_drbr_empty; | int ret, is_drbr_empty; | ||||
uint32_t i; | uint32_t i; | ||||
if ((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0) | if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) | ||||
return (ENODEV); | return (ENODEV); | ||||
/* Which queue to use */ | /* Which queue to use */ | ||||
/* | /* | ||||
* If everything is setup correctly, it should be the | * If everything is setup correctly, it should be the | ||||
* same bucket that the current CPU we're on is. | * same bucket that the current CPU we're on is. | ||||
* It should improve performance. | * It should improve performance. | ||||
*/ | */ | ||||
Show All 11 Lines | #endif | ||||
} else { | } else { | ||||
i = curcpu % adapter->num_queues; | i = curcpu % adapter->num_queues; | ||||
} | } | ||||
tx_ring = &adapter->tx_ring[i]; | tx_ring = &adapter->tx_ring[i]; | ||||
/* Check if drbr is empty before putting packet */ | /* Check if drbr is empty before putting packet */ | ||||
is_drbr_empty = drbr_empty(ifp, tx_ring->br); | is_drbr_empty = drbr_empty(ifp, tx_ring->br); | ||||
ret = drbr_enqueue(ifp, tx_ring->br, m); | ret = drbr_enqueue(ifp, tx_ring->br, m); | ||||
if (ret != 0) { | if (unlikely(ret != 0)) { | ||||
taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); | taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
if ((is_drbr_empty != 0) && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { | if ((is_drbr_empty != 0) && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { | ||||
ena_start_xmit(tx_ring); | ena_start_xmit(tx_ring); | ||||
ENA_RING_MTX_UNLOCK(tx_ring); | ENA_RING_MTX_UNLOCK(tx_ring); | ||||
} else { | } else { | ||||
▲ Show 20 Lines • Show All 133 Lines • ▼ Show 20 Lines | |||||
ena_rss_init_default_deferred(void *arg) | ena_rss_init_default_deferred(void *arg) | ||||
{ | { | ||||
struct ena_adapter *adapter; | struct ena_adapter *adapter; | ||||
devclass_t dc; | devclass_t dc; | ||||
int max; | int max; | ||||
int rc; | int rc; | ||||
dc = devclass_find("ena"); | dc = devclass_find("ena"); | ||||
if (dc == NULL) { | if (unlikely(dc == NULL)) { | ||||
ena_trace(ENA_DBG, "No devclass ena\n"); | ena_trace(ENA_DBG, "No devclass ena\n"); | ||||
return; | return; | ||||
} | } | ||||
max = devclass_get_maxunit(dc); | max = devclass_get_maxunit(dc); | ||||
while (max-- >= 0) { | while (max-- >= 0) { | ||||
adapter = devclass_get_softc(dc, max); | adapter = devclass_get_softc(dc, max); | ||||
if (adapter != NULL) { | if (adapter != NULL) { | ||||
rc = ena_rss_init_default(adapter); | rc = ena_rss_init_default(adapter); | ||||
adapter->rss_support = true; | adapter->rss_support = true; | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"WARNING: RSS was not properly initialized," | "WARNING: RSS was not properly initialized," | ||||
" it will affect bandwidth\n"); | " it will affect bandwidth\n"); | ||||
adapter->rss_support = false; | adapter->rss_support = false; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
} | } | ||||
SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); | SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); | ||||
static void | static void | ||||
ena_config_host_info(struct ena_com_dev *ena_dev) | ena_config_host_info(struct ena_com_dev *ena_dev) | ||||
{ | { | ||||
struct ena_admin_host_info *host_info; | struct ena_admin_host_info *host_info; | ||||
int rc; | int rc; | ||||
/* Allocate only the host info */ | /* Allocate only the host info */ | ||||
rc = ena_com_allocate_host_info(ena_dev); | rc = ena_com_allocate_host_info(ena_dev); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
ena_trace(ENA_ALERT, "Cannot allocate host info\n"); | ena_trace(ENA_ALERT, "Cannot allocate host info\n"); | ||||
return; | return; | ||||
} | } | ||||
host_info = ena_dev->host_attr.host_info; | host_info = ena_dev->host_attr.host_info; | ||||
host_info->os_type = ENA_ADMIN_OS_FREEBSD; | host_info->os_type = ENA_ADMIN_OS_FREEBSD; | ||||
host_info->kernel_ver = osreldate; | host_info->kernel_ver = osreldate; | ||||
sprintf(host_info->kernel_ver_str, "%d", osreldate); | sprintf(host_info->kernel_ver_str, "%d", osreldate); | ||||
host_info->os_dist = 0; | host_info->os_dist = 0; | ||||
strncpy(host_info->os_dist_str, osrelease, | strncpy(host_info->os_dist_str, osrelease, | ||||
sizeof(host_info->os_dist_str) - 1); | sizeof(host_info->os_dist_str) - 1); | ||||
host_info->driver_version = | host_info->driver_version = | ||||
(DRV_MODULE_VER_MAJOR) | | (DRV_MODULE_VER_MAJOR) | | ||||
(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | | ||||
(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); | (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); | ||||
rc = ena_com_set_host_attributes(ena_dev); | rc = ena_com_set_host_attributes(ena_dev); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
if (rc == EOPNOTSUPP) | if (rc == EOPNOTSUPP) | ||||
ena_trace(ENA_WARNING, "Cannot set host attributes\n"); | ena_trace(ENA_WARNING, "Cannot set host attributes\n"); | ||||
else | else | ||||
ena_trace(ENA_ALERT, "Cannot set host attributes\n"); | ena_trace(ENA_ALERT, "Cannot set host attributes\n"); | ||||
goto err; | goto err; | ||||
} | } | ||||
Show All 9 Lines | |||||
{ | { | ||||
struct ena_com_dev* ena_dev = adapter->ena_dev; | struct ena_com_dev* ena_dev = adapter->ena_dev; | ||||
bool readless_supported; | bool readless_supported; | ||||
uint32_t aenq_groups; | uint32_t aenq_groups; | ||||
int dma_width; | int dma_width; | ||||
int rc; | int rc; | ||||
rc = ena_com_mmio_reg_read_request_init(ena_dev); | rc = ena_com_mmio_reg_read_request_init(ena_dev); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, "failed to init mmio read less\n"); | device_printf(pdev, "failed to init mmio read less\n"); | ||||
return (rc); | return (rc); | ||||
} | } | ||||
/* | /* | ||||
* The PCIe configuration space revision id indicate if mmio reg | * The PCIe configuration space revision id indicate if mmio reg | ||||
* read is disabled | * read is disabled | ||||
*/ | */ | ||||
readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); | readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); | ||||
ena_com_set_mmio_read_mode(ena_dev, readless_supported); | ena_com_set_mmio_read_mode(ena_dev, readless_supported); | ||||
rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); | rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, "Can not reset device\n"); | device_printf(pdev, "Can not reset device\n"); | ||||
goto err_mmio_read_less; | goto err_mmio_read_less; | ||||
} | } | ||||
rc = ena_com_validate_version(ena_dev); | rc = ena_com_validate_version(ena_dev); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, "device version is too low\n"); | device_printf(pdev, "device version is too low\n"); | ||||
goto err_mmio_read_less; | goto err_mmio_read_less; | ||||
} | } | ||||
dma_width = ena_com_get_dma_width(ena_dev); | dma_width = ena_com_get_dma_width(ena_dev); | ||||
if (dma_width < 0) { | if (unlikely(dma_width < 0)) { | ||||
device_printf(pdev, "Invalid dma width value %d", dma_width); | device_printf(pdev, "Invalid dma width value %d", dma_width); | ||||
rc = dma_width; | rc = dma_width; | ||||
goto err_mmio_read_less; | goto err_mmio_read_less; | ||||
} | } | ||||
adapter->dma_width = dma_width; | adapter->dma_width = dma_width; | ||||
/* ENA admin level init */ | /* ENA admin level init */ | ||||
rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); | rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, | device_printf(pdev, | ||||
"Can not initialize ena admin queue with device\n"); | "Can not initialize ena admin queue with device\n"); | ||||
goto err_mmio_read_less; | goto err_mmio_read_less; | ||||
} | } | ||||
/* | /* | ||||
* To enable the msix interrupts the driver needs to know the number | * To enable the msix interrupts the driver needs to know the number | ||||
* of queues. So the driver uses polling mode to retrieve this | * of queues. So the driver uses polling mode to retrieve this | ||||
* information | * information | ||||
*/ | */ | ||||
ena_com_set_admin_polling_mode(ena_dev, true); | ena_com_set_admin_polling_mode(ena_dev, true); | ||||
ena_config_host_info(ena_dev); | ena_config_host_info(ena_dev); | ||||
/* Get Device Attributes */ | /* Get Device Attributes */ | ||||
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); | rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, | device_printf(pdev, | ||||
"Cannot get attribute for ena device rc: %d\n", rc); | "Cannot get attribute for ena device rc: %d\n", rc); | ||||
goto err_admin_init; | goto err_admin_init; | ||||
} | } | ||||
aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | | aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | | ||||
BIT(ENA_ADMIN_FATAL_ERROR) | | BIT(ENA_ADMIN_FATAL_ERROR) | | ||||
BIT(ENA_ADMIN_WARNING) | | BIT(ENA_ADMIN_WARNING) | | ||||
BIT(ENA_ADMIN_NOTIFICATION) | | BIT(ENA_ADMIN_NOTIFICATION) | | ||||
BIT(ENA_ADMIN_KEEP_ALIVE); | BIT(ENA_ADMIN_KEEP_ALIVE); | ||||
aenq_groups &= get_feat_ctx->aenq.supported_groups; | aenq_groups &= get_feat_ctx->aenq.supported_groups; | ||||
rc = ena_com_set_aenq_config(ena_dev, aenq_groups); | rc = ena_com_set_aenq_config(ena_dev, aenq_groups); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); | device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); | ||||
goto err_admin_init; | goto err_admin_init; | ||||
} | } | ||||
*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); | *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); | ||||
return (0); | return (0); | ||||
err_admin_init: | err_admin_init: | ||||
ena_com_delete_host_info(ena_dev); | ena_com_delete_host_info(ena_dev); | ||||
ena_com_admin_destroy(ena_dev); | ena_com_admin_destroy(ena_dev); | ||||
err_mmio_read_less: | err_mmio_read_less: | ||||
ena_com_mmio_reg_read_request_destroy(ena_dev); | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||||
return (rc); | return (rc); | ||||
} | } | ||||
static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, | static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, | ||||
int io_vectors) | int io_vectors) | ||||
{ | { | ||||
struct ena_com_dev *ena_dev = adapter->ena_dev; | struct ena_com_dev *ena_dev = adapter->ena_dev; | ||||
int rc; | int rc; | ||||
rc = ena_enable_msix(adapter); | rc = ena_enable_msix(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "Error with MSI-X enablement\n"); | device_printf(adapter->pdev, "Error with MSI-X enablement\n"); | ||||
return (rc); | return (rc); | ||||
} | } | ||||
ena_setup_mgmnt_intr(adapter); | ena_setup_mgmnt_intr(adapter); | ||||
rc = ena_request_mgmnt_irq(adapter); | rc = ena_request_mgmnt_irq(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); | device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); | ||||
goto err_disable_msix; | goto err_disable_msix; | ||||
} | } | ||||
ena_com_set_admin_polling_mode(ena_dev, false); | ena_com_set_admin_polling_mode(ena_dev, false); | ||||
ena_com_admin_aenq_enable(ena_dev); | ena_com_admin_aenq_enable(ena_dev); | ||||
Show All 27 Lines | |||||
/* Check for keep alive expiration */ | /* Check for keep alive expiration */ | ||||
static void check_for_missing_keep_alive(struct ena_adapter *adapter) | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | ||||
{ | { | ||||
sbintime_t timestamp, time; | sbintime_t timestamp, time; | ||||
if (adapter->wd_active == 0) | if (adapter->wd_active == 0) | ||||
return; | return; | ||||
if (adapter->keep_alive_timeout == 0) | if (likely(adapter->keep_alive_timeout == 0)) | ||||
return; | return; | ||||
timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); | timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); | ||||
time = getsbinuptime() - timestamp; | time = getsbinuptime() - timestamp; | ||||
if (unlikely(time > adapter->keep_alive_timeout)) { | if (unlikely(time > adapter->keep_alive_timeout)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Keep alive watchdog timeout.\n"); | "Keep alive watchdog timeout.\n"); | ||||
counter_u64_add(adapter->dev_stats.wd_expired, 1); | counter_u64_add(adapter->dev_stats.wd_expired, 1); | ||||
▲ Show 20 Lines • Show All 217 Lines • ▼ Show 20 Lines | ena_reset_task(void *arg, int pending) | ||||
ena_com_mmio_reg_read_request_destroy(ena_dev); | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||||
adapter->reset_reason = ENA_REGS_RESET_NORMAL; | adapter->reset_reason = ENA_REGS_RESET_NORMAL; | ||||
adapter->trigger_reset = false; | adapter->trigger_reset = false; | ||||
/* Finished destroy part. Restart the device */ | /* Finished destroy part. Restart the device */ | ||||
rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, | rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, | ||||
&adapter->wd_active); | &adapter->wd_active); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"ENA device init failed! (err: %d)\n", rc); | "ENA device init failed! (err: %d)\n", rc); | ||||
goto err_dev_free; | goto err_dev_free; | ||||
} | } | ||||
rc = ena_enable_msix_and_set_admin_interrupts(adapter, | rc = ena_enable_msix_and_set_admin_interrupts(adapter, | ||||
adapter->num_queues); | adapter->num_queues); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, "Enable MSI-X failed\n"); | device_printf(adapter->pdev, "Enable MSI-X failed\n"); | ||||
goto err_com_free; | goto err_com_free; | ||||
} | } | ||||
/* If the interface was up before the reset bring it up */ | /* If the interface was up before the reset bring it up */ | ||||
if (dev_up == true) { | if (dev_up == true) { | ||||
rc = ena_up(adapter); | rc = ena_up(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Failed to create I/O queues\n"); | "Failed to create I/O queues\n"); | ||||
goto err_msix_free; | goto err_msix_free; | ||||
} | } | ||||
} | } | ||||
callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, | callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, | ||||
ena_timer_service, (void *)adapter, 0); | ena_timer_service, (void *)adapter, 0); | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | ena_attach(device_t pdev) | ||||
adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; | adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; | ||||
adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; | adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; | ||||
adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; | adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; | ||||
if (version_printed++ == 0) | if (version_printed++ == 0) | ||||
device_printf(pdev, "%s\n", ena_version); | device_printf(pdev, "%s\n", ena_version); | ||||
rc = ena_allocate_pci_resources(adapter); | rc = ena_allocate_pci_resources(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, "PCI resource allocation failed!\n"); | device_printf(pdev, "PCI resource allocation failed!\n"); | ||||
ena_free_pci_resources(adapter); | ena_free_pci_resources(adapter); | ||||
return (rc); | return (rc); | ||||
} | } | ||||
/* Allocate memory for ena_dev structure */ | /* Allocate memory for ena_dev structure */ | ||||
ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, | ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, | ||||
M_WAITOK | M_ZERO); | M_WAITOK | M_ZERO); | ||||
if (ena_dev == NULL) { | if (unlikely(ena_dev == NULL)) { | ||||
device_printf(pdev, "allocating ena_dev failed\n"); | device_printf(pdev, "allocating ena_dev failed\n"); | ||||
rc = ENOMEM; | rc = ENOMEM; | ||||
goto err_pci_free; | goto err_pci_free; | ||||
} | } | ||||
adapter->ena_dev = ena_dev; | adapter->ena_dev = ena_dev; | ||||
ena_dev->dmadev = pdev; | ena_dev->dmadev = pdev; | ||||
ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, | ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, | ||||
M_WAITOK | M_ZERO); | M_WAITOK | M_ZERO); | ||||
if (ena_dev->bus == NULL) { | if (unlikely(ena_dev->bus == NULL)) { | ||||
device_printf(pdev, "allocating bus resources failed\n"); | device_printf(pdev, "allocating bus resources failed\n"); | ||||
rc = ENOMEM; | rc = ENOMEM; | ||||
goto err_dev_free; | goto err_dev_free; | ||||
} | } | ||||
/* Store register resources */ | /* Store register resources */ | ||||
((struct ena_bus*)(ena_dev->bus))->reg_bar_t = | ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = | ||||
rman_get_bustag(adapter->registers); | rman_get_bustag(adapter->registers); | ||||
((struct ena_bus*)(ena_dev->bus))->reg_bar_h = | ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = | ||||
rman_get_bushandle(adapter->registers); | rman_get_bushandle(adapter->registers); | ||||
if (((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0) { | if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { | ||||
device_printf(pdev, "failed to pmap registers bar\n"); | device_printf(pdev, "failed to pmap registers bar\n"); | ||||
rc = ENXIO; | rc = ENXIO; | ||||
goto err_bus_free; | goto err_bus_free; | ||||
} | } | ||||
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | ||||
/* Device initialization */ | /* Device initialization */ | ||||
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); | rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); | device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); | ||||
rc = ENXIO; | rc = ENXIO; | ||||
goto err_bus_free; | goto err_bus_free; | ||||
} | } | ||||
adapter->keep_alive_timestamp = getsbinuptime(); | adapter->keep_alive_timestamp = getsbinuptime(); | ||||
adapter->tx_offload_cap = get_feat_ctx.offload.tx; | adapter->tx_offload_cap = get_feat_ctx.offload.tx; | ||||
Show All 12 Lines | ena_attach(device_t pdev) | ||||
ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", | ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", | ||||
io_queue_num); | io_queue_num); | ||||
adapter->num_queues = io_queue_num; | adapter->num_queues = io_queue_num; | ||||
/* calculatre ring sizes */ | /* calculatre ring sizes */ | ||||
queue_size = ena_calc_queue_size(adapter,&tx_sgl_size, | queue_size = ena_calc_queue_size(adapter,&tx_sgl_size, | ||||
&rx_sgl_size, &get_feat_ctx); | &rx_sgl_size, &get_feat_ctx); | ||||
if ((queue_size <= 0) || (io_queue_num <= 0)) { | if (unlikely((queue_size <= 0) || (io_queue_num <= 0))) { | ||||
rc = ENA_COM_FAULT; | rc = ENA_COM_FAULT; | ||||
goto err_com_free; | goto err_com_free; | ||||
} | } | ||||
adapter->reset_reason = ENA_REGS_RESET_NORMAL; | adapter->reset_reason = ENA_REGS_RESET_NORMAL; | ||||
adapter->tx_ring_size = queue_size; | adapter->tx_ring_size = queue_size; | ||||
adapter->rx_ring_size = queue_size; | adapter->rx_ring_size = queue_size; | ||||
adapter->max_tx_sgl_size = tx_sgl_size; | adapter->max_tx_sgl_size = tx_sgl_size; | ||||
adapter->max_rx_sgl_size = rx_sgl_size; | adapter->max_rx_sgl_size = rx_sgl_size; | ||||
/* set up dma tags for rx and tx buffers */ | /* set up dma tags for rx and tx buffers */ | ||||
rc = ena_setup_tx_dma_tag(adapter); | rc = ena_setup_tx_dma_tag(adapter); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
goto err_com_free; | goto err_com_free; | ||||
rc = ena_setup_rx_dma_tag(adapter); | rc = ena_setup_rx_dma_tag(adapter); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
goto err_tx_tag_free; | goto err_tx_tag_free; | ||||
/* initialize rings basic information */ | /* initialize rings basic information */ | ||||
device_printf(pdev, "initalize %d io queues\n", io_queue_num); | device_printf(pdev, "initalize %d io queues\n", io_queue_num); | ||||
rc = ena_init_io_rings(adapter); | rc = ena_init_io_rings(adapter); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev,"Error with initialization of IO rings\n"); | device_printf(pdev,"Error with initialization of IO rings\n"); | ||||
goto err_rx_tag_free; | goto err_rx_tag_free; | ||||
} | } | ||||
/* setup network interface */ | /* setup network interface */ | ||||
rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); | rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev,"Error with network interface setup\n"); | device_printf(pdev,"Error with network interface setup\n"); | ||||
goto err_io_free; | goto err_io_free; | ||||
} | } | ||||
rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); | rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); | ||||
if (rc != 0) { | if (unlikely(rc != 0)) { | ||||
device_printf(pdev, | device_printf(pdev, | ||||
"Failed to enable and set the admin interrupts\n"); | "Failed to enable and set the admin interrupts\n"); | ||||
goto err_ifp_free; | goto err_ifp_free; | ||||
} | } | ||||
/* Initialize reset task queue */ | /* Initialize reset task queue */ | ||||
TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); | TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); | ||||
adapter->reset_tq = taskqueue_create("ena_reset_enqueue", | adapter->reset_tq = taskqueue_create("ena_reset_enqueue", | ||||
M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); | M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); | ||||
if (adapter->reset_tq == NULL) { | if (unlikely(adapter->reset_tq == NULL)) { | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Unable to create reset task queue\n"); | "Unable to create reset task queue\n"); | ||||
goto err_msix_free; | goto err_msix_free; | ||||
} | } | ||||
taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, | taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, | ||||
"%s rstq", device_get_nameunit(adapter->pdev)); | "%s rstq", device_get_nameunit(adapter->pdev)); | ||||
/* Initialize statistics */ | /* Initialize statistics */ | ||||
▲ Show 20 Lines • Show All 73 Lines • ▼ Show 20 Lines | ena_detach(device_t pdev) | ||||
ena_free_all_io_rings_resources(adapter); | ena_free_all_io_rings_resources(adapter); | ||||
ena_free_counters((counter_u64_t *)&adapter->hw_stats, | ena_free_counters((counter_u64_t *)&adapter->hw_stats, | ||||
sizeof(struct ena_hw_stats)); | sizeof(struct ena_hw_stats)); | ||||
ena_free_counters((counter_u64_t *)&adapter->dev_stats, | ena_free_counters((counter_u64_t *)&adapter->dev_stats, | ||||
sizeof(struct ena_stats_dev)); | sizeof(struct ena_stats_dev)); | ||||
if (adapter->rss_support == true) | if (likely(adapter->rss_support == true)) | ||||
ena_com_rss_destroy(ena_dev); | ena_com_rss_destroy(ena_dev); | ||||
rc = ena_free_rx_dma_tag(adapter); | rc = ena_free_rx_dma_tag(adapter); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Unmapped RX DMA tag associations\n"); | "Unmapped RX DMA tag associations\n"); | ||||
rc = ena_free_tx_dma_tag(adapter); | rc = ena_free_tx_dma_tag(adapter); | ||||
if (rc != 0) | if (unlikely(rc != 0)) | ||||
device_printf(adapter->pdev, | device_printf(adapter->pdev, | ||||
"Unmapped TX DMA tag associations\n"); | "Unmapped TX DMA tag associations\n"); | ||||
/* Reset the device only if the device is running. */ | /* Reset the device only if the device is running. */ | ||||
if (adapter->running == true) | if (adapter->running == true) | ||||
ena_com_dev_reset(ena_dev, adapter->reset_reason); | ena_com_dev_reset(ena_dev, adapter->reset_reason); | ||||
ena_com_delete_host_info(ena_dev); | ena_com_delete_host_info(ena_dev); | ||||
▲ Show 20 Lines • Show All 100 Lines • Show Last 20 Lines |
Is this one right?