Index: stable/11/sys/contrib/ena-com/ena_plat.h =================================================================== --- stable/11/sys/contrib/ena-com/ena_plat.h (revision 361467) +++ stable/11/sys/contrib/ena-com/ena_plat.h (revision 361468) @@ -1,388 +1,389 @@ /*- * BSD LICENSE * * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef ENA_PLAT_H_ #define ENA_PLAT_H_ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern struct ena_bus_space ebs; /* Levels */ #define ENA_ALERT (1 << 0) /* Alerts are providing more error info. */ #define ENA_WARNING (1 << 1) /* Driver output is more error sensitive. */ #define ENA_INFO (1 << 2) /* Provides additional driver info. */ #define ENA_DBG (1 << 3) /* Driver output for debugging. */ /* Detailed info that will be printed with ENA_INFO or ENA_DEBUG flag. */ #define ENA_TXPTH (1 << 4) /* Allows TX path tracing. */ #define ENA_RXPTH (1 << 5) /* Allows RX path tracing. */ #define ENA_RSC (1 << 6) /* Goes with TXPTH or RXPTH, free/alloc res. */ #define ENA_IOQ (1 << 7) /* Detailed info about IO queues. */ #define ENA_ADMQ (1 << 8) /* Detailed info about admin queue. */ +#define ENA_NETMAP (1 << 9) /* Detailed info about netmap. */ extern int ena_log_level; #define ena_trace_raw(level, fmt, args...) \ do { \ if (((level) & ena_log_level) != (level)) \ break; \ printf(fmt, ##args); \ } while (0) #define ena_trace(level, fmt, args...) \ ena_trace_raw(level, "%s() [TID:%d]: " \ fmt, __func__, curthread->td_tid, ##args) #define ena_trc_dbg(format, arg...) ena_trace(ENA_DBG, format, ##arg) #define ena_trc_info(format, arg...) ena_trace(ENA_INFO, format, ##arg) #define ena_trc_warn(format, arg...) ena_trace(ENA_WARNING, format, ##arg) #define ena_trc_err(format, arg...) ena_trace(ENA_ALERT, format, ##arg) #define unlikely(x) __predict_false(x) #define likely(x) __predict_true(x) #define __iomem #define ____cacheline_aligned __aligned(CACHE_LINE_SIZE) #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) unlikely((x) <= (unsigned long)MAX_ERRNO) #define ENA_ASSERT(cond, format, arg...) \ do { \ if (unlikely(!(cond))) { \ ena_trc_err( \ "Assert failed on %s:%s:%d:" format, \ __FILE__, __func__, __LINE__, ##arg); \ } \ } while (0) #define ENA_WARN(cond, format, arg...) \ do { \ if (unlikely((cond))) { \ ena_trc_warn(format, ##arg); \ } \ } while (0) static inline long IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } static inline void *ERR_PTR(long error) { return (void *)error; } static inline long PTR_ERR(const void *ptr) { return (long) ptr; } #define GENMASK(h, l) (((~0U) - (1U << (l)) + 1) & (~0U >> (32 - 1 - (h)))) #define GENMASK_ULL(h, l) (((~0ULL) << (l)) & (~0ULL >> (64 - 1 - (h)))) #define BIT(x) (1UL << (x)) #define ENA_ABORT() BUG() #define BUG() panic("ENA BUG") #define SZ_256 (256) #define SZ_4K (4096) #define ENA_COM_OK 0 #define ENA_COM_FAULT EFAULT #define ENA_COM_INVAL EINVAL #define ENA_COM_NO_MEM ENOMEM #define ENA_COM_NO_SPACE ENOSPC #define ENA_COM_TRY_AGAIN -1 #define ENA_COM_UNSUPPORTED EOPNOTSUPP #define ENA_COM_NO_DEVICE ENODEV #define ENA_COM_PERMISSION EPERM #define ENA_COM_TIMER_EXPIRED ETIMEDOUT #define ENA_MSLEEP(x) pause_sbt("ena", SBT_1MS * (x), SBT_1MS, 0) #define ENA_UDELAY(x) DELAY(x) #define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \ ((long)cputick2usec(cpu_ticks()) + (timeout_us)) #define ENA_TIME_EXPIRE(timeout) ((timeout) < (long)cputick2usec(cpu_ticks())) #define ENA_MIGHT_SLEEP() #define min_t(type, _x, _y) ((type)(_x) < (type)(_y) ? (type)(_x) : (type)(_y)) #define max_t(type, _x, _y) ((type)(_x) > (type)(_y) ? (type)(_x) : (type)(_y)) #define ENA_MIN32(x,y) MIN(x, y) #define ENA_MIN16(x,y) MIN(x, y) #define ENA_MIN8(x,y) MIN(x, y) #define ENA_MAX32(x,y) MAX(x, y) #define ENA_MAX16(x,y) MAX(x, y) #define ENA_MAX8(x,y) MAX(x, y) /* Spinlock related methods */ #define ena_spinlock_t struct mtx #define ENA_SPINLOCK_INIT(spinlock) \ mtx_init(&(spinlock), "ena_spin", NULL, MTX_SPIN) #define ENA_SPINLOCK_DESTROY(spinlock) \ do { \ if (mtx_initialized(&(spinlock))) \ mtx_destroy(&(spinlock)); \ } while (0) #define ENA_SPINLOCK_LOCK(spinlock, flags) \ do { \ (void)(flags); \ mtx_lock_spin(&(spinlock)); \ } while (0) #define ENA_SPINLOCK_UNLOCK(spinlock, flags) \ do { \ (void)(flags); \ mtx_unlock_spin(&(spinlock)); \ } while (0) /* Wait queue related methods */ #define ena_wait_event_t struct { struct cv wq; struct mtx mtx; } #define ENA_WAIT_EVENT_INIT(waitqueue) \ do { \ cv_init(&((waitqueue).wq), "cv"); \ mtx_init(&((waitqueue).mtx), "wq", NULL, MTX_DEF); \ } while (0) #define ENA_WAIT_EVENT_DESTROY(waitqueue) \ do { \ cv_destroy(&((waitqueue).wq)); \ mtx_destroy(&((waitqueue).mtx)); \ } while (0) #define ENA_WAIT_EVENT_CLEAR(waitqueue) \ cv_init(&((waitqueue).wq), (waitqueue).wq.cv_description) #define ENA_WAIT_EVENT_WAIT(waitqueue, timeout_us) \ do { \ mtx_lock(&((waitqueue).mtx)); \ cv_timedwait(&((waitqueue).wq), &((waitqueue).mtx), \ timeout_us * hz / 1000 / 1000 ); \ mtx_unlock(&((waitqueue).mtx)); \ } while (0) #define ENA_WAIT_EVENT_SIGNAL(waitqueue) \ do { \ mtx_lock(&((waitqueue).mtx)); \ cv_broadcast(&((waitqueue).wq)); \ mtx_unlock(&((waitqueue).mtx)); \ } while (0) #define dma_addr_t bus_addr_t #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define u64 uint64_t typedef struct { bus_addr_t paddr; caddr_t vaddr; bus_dma_tag_t tag; bus_dmamap_t map; bus_dma_segment_t seg; int nseg; } ena_mem_handle_t; struct ena_bus { bus_space_handle_t reg_bar_h; bus_space_tag_t reg_bar_t; bus_space_handle_t mem_bar_h; bus_space_tag_t mem_bar_t; }; typedef uint32_t ena_atomic32_t; void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error); int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma, int mapflags); #define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \ do { \ int count, i; \ volatile uint64_t *to = (volatile uint64_t *)(dst); \ const uint64_t *from = (const uint64_t *)(src); \ count = (size) / 8; \ \ for (i = 0; i < count; i++, from++, to++) \ *to = *from; \ } while (0) #define ENA_MEM_ALLOC(dmadev, size) malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO) #define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) (virt = NULL) #define ENA_MEM_FREE(dmadev, ptr) free(ptr, M_DEVBUF) #define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, handle, node, \ dev_node) \ do { \ ((virt) = NULL); \ (void)(dev_node); \ } while (0) #define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, dma) \ do { \ ena_dma_alloc((dmadev), (size), &(dma), 0); \ (virt) = (void *)(dma).vaddr; \ (phys) = (dma).paddr; \ } while (0) #define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, dma) \ do { \ (void)size; \ bus_dmamap_unload((dma).tag, (dma).map); \ bus_dmamem_free((dma).tag, (virt), (dma).map); \ bus_dma_tag_destroy((dma).tag); \ (dma).tag = NULL; \ (virt) = NULL; \ } while (0) /* Register R/W methods */ #define ENA_REG_WRITE32(bus, value, offset) \ bus_space_write_4( \ ((struct ena_bus*)bus)->reg_bar_t, \ ((struct ena_bus*)bus)->reg_bar_h, \ (bus_size_t)(offset), (value)) #define ENA_REG_WRITE32_RELAXED(bus, value, offset) \ ENA_REG_WRITE32(bus, value, offset) #define ENA_REG_READ32(bus, offset) \ bus_space_read_4( \ ((struct ena_bus*)bus)->reg_bar_t, \ ((struct ena_bus*)bus)->reg_bar_h, \ (bus_size_t)(offset)) #define ENA_DB_SYNC_WRITE(mem_handle) bus_dmamap_sync( \ (mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_PREWRITE) #define ENA_DB_SYNC_PREREAD(mem_handle) bus_dmamap_sync( \ (mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_PREREAD) #define ENA_DB_SYNC_POSTREAD(mem_handle) bus_dmamap_sync( \ (mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_POSTREAD) #define ENA_DB_SYNC(mem_handle) ENA_DB_SYNC_WRITE(mem_handle) #define time_after(a,b) ((long)((unsigned long)(b) - (unsigned long)(a)) < 0) #define VLAN_HLEN sizeof(struct ether_vlan_header) #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #define prefetch(x) (void)(x) #define prefetchw(x) (void)(x) /* DMA buffers access */ #define dma_unmap_addr(p, name) ((p)->dma->name) #define dma_unmap_addr_set(p, name, v) (((p)->dma->name) = (v)) #define dma_unmap_len(p, name) ((p)->name) #define dma_unmap_len_set(p, name, v) (((p)->name) = (v)) #define memcpy_toio memcpy #define ATOMIC32_INC(I32_PTR) atomic_add_int(I32_PTR, 1) #define ATOMIC32_DEC(I32_PTR) atomic_add_int(I32_PTR, -1) #define ATOMIC32_READ(I32_PTR) atomic_load_acq_int(I32_PTR) #define ATOMIC32_SET(I32_PTR, VAL) atomic_store_rel_int(I32_PTR, VAL) #define barrier() __asm__ __volatile__("": : :"memory") #define dma_rmb() barrier() #define mmiowb() barrier() #define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x)) #define READ_ONCE(x) ({ \ __typeof(x) __var; \ barrier(); \ __var = ACCESS_ONCE(x); \ barrier(); \ __var; \ }) #define READ_ONCE8(x) READ_ONCE(x) #define READ_ONCE16(x) READ_ONCE(x) #define READ_ONCE32(x) READ_ONCE(x) #define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) #define lower_32_bits(n) ((uint32_t)(n)) #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #include "ena_defs/ena_includes.h" #endif /* ENA_PLAT_H_ */ Index: stable/11/sys/dev/ena/ena.c =================================================================== --- stable/11/sys/dev/ena/ena.c (revision 361467) +++ stable/11/sys/dev/ena/ena.c (revision 361468) @@ -1,4665 +1,3604 @@ /*- * BSD LICENSE * * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include "ena_datapath.h" #include "ena.h" #include "ena_sysctl.h" +#ifdef DEV_NETMAP +#include "ena_netmap.h" +#endif /* DEV_NETMAP */ + /********************************************************* * Function prototypes *********************************************************/ static int ena_probe(device_t); static void ena_intr_msix_mgmnt(void *); static void ena_free_pci_resources(struct ena_adapter *); static int ena_change_mtu(if_t, int); static inline void ena_alloc_counters(counter_u64_t *, int); static inline void ena_free_counters(counter_u64_t *, int); static inline void ena_reset_counters(counter_u64_t *, int); static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, uint16_t); static void ena_init_io_rings(struct ena_adapter *); static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); static void ena_free_all_io_rings_resources(struct ena_adapter *); static int ena_setup_tx_dma_tag(struct ena_adapter *); static int ena_free_tx_dma_tag(struct ena_adapter *); static int ena_setup_rx_dma_tag(struct ena_adapter *); static int ena_free_rx_dma_tag(struct ena_adapter *); +static void ena_release_all_tx_dmamap(struct ena_ring *); static int ena_setup_tx_resources(struct ena_adapter *, int); static void ena_free_tx_resources(struct ena_adapter *, int); static int ena_setup_all_tx_resources(struct ena_adapter *); static void ena_free_all_tx_resources(struct ena_adapter *); -static inline int validate_rx_req_id(struct ena_ring *, uint16_t); static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); static void ena_free_rx_resources(struct ena_adapter *, unsigned int); static int ena_setup_all_rx_resources(struct ena_adapter *); static void ena_free_all_rx_resources(struct ena_adapter *); static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, struct ena_rx_buffer *); static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, struct ena_rx_buffer *); -static int ena_refill_rx_bufs(struct ena_ring *, uint32_t); static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); static void ena_refill_all_rx_bufs(struct ena_adapter *); static void ena_free_all_rx_bufs(struct ena_adapter *); static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); static void ena_free_all_tx_bufs(struct ena_adapter *); static void ena_destroy_all_tx_queues(struct ena_adapter *); static void ena_destroy_all_rx_queues(struct ena_adapter *); static void ena_destroy_all_io_queues(struct ena_adapter *); static int ena_create_io_queues(struct ena_adapter *); -static int ena_tx_cleanup(struct ena_ring *); -static int ena_rx_cleanup(struct ena_ring *); -static inline int validate_tx_req_id(struct ena_ring *, uint16_t); -static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *, - struct mbuf *); -static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *, - struct ena_com_rx_ctx *, uint16_t *); -static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *, - struct mbuf *); -static void ena_cleanup(void *arg, int pending); static int ena_handle_msix(void *); static int ena_enable_msix(struct ena_adapter *); static void ena_setup_mgmnt_intr(struct ena_adapter *); static int ena_setup_io_intr(struct ena_adapter *); static int ena_request_mgmnt_irq(struct ena_adapter *); static int ena_request_io_irq(struct ena_adapter *); static void ena_free_mgmnt_irq(struct ena_adapter *); static void ena_free_io_irq(struct ena_adapter *); static void ena_free_irqs(struct ena_adapter*); static void ena_disable_msix(struct ena_adapter *); static void ena_unmask_all_io_irqs(struct ena_adapter *); static int ena_rss_configure(struct ena_adapter *); static int ena_up_complete(struct ena_adapter *); -static int ena_up(struct ena_adapter *); -static void ena_down(struct ena_adapter *); static uint64_t ena_get_counter(if_t, ift_counter); static int ena_media_change(if_t); static void ena_media_status(if_t, struct ifmediareq *); static void ena_init(void *); static int ena_ioctl(if_t, u_long, caddr_t); static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); static void ena_update_host_info(struct ena_admin_host_info *, if_t); static void ena_update_hwassist(struct ena_adapter *); static int ena_setup_ifnet(device_t, struct ena_adapter *, struct ena_com_dev_get_features_ctx *); -static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *); -static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, - struct mbuf **mbuf); -static void ena_dmamap_llq(void *, bus_dma_segment_t *, int, int); -static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **); -static void ena_start_xmit(struct ena_ring *); -static int ena_mq_start(if_t, struct mbuf *); -static void ena_deferred_mq_start(void *, int); -static void ena_qflush(if_t); static int ena_enable_wc(struct resource *); static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); static int ena_calc_io_queue_num(struct ena_adapter *, struct ena_com_dev_get_features_ctx *); static int ena_calc_queue_size(struct ena_adapter *, struct ena_calc_queue_size_ctx *); static int ena_handle_updated_queues(struct ena_adapter *, struct ena_com_dev_get_features_ctx *); static int ena_rss_init_default(struct ena_adapter *); static void ena_rss_init_default_deferred(void *); static void ena_config_host_info(struct ena_com_dev *, device_t); static int ena_attach(device_t); static int ena_detach(device_t); static int ena_device_init(struct ena_adapter *, device_t, struct ena_com_dev_get_features_ctx *, int *); static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *, int); static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); static void ena_timer_service(void *); static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; static ena_vendor_info_t ena_vendor_info_array[] = { { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0}, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0}, /* Last entry */ { 0, 0, 0 } }; /* * Contains pointers to event handlers, e.g. link state chage. */ static struct ena_aenq_handlers aenq_handlers; void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma , int mapflags) { struct ena_adapter* adapter = device_get_softc(dmadev); uint32_t maxsize; uint64_t dma_space_addr; int error; maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); if (unlikely(dma_space_addr == 0)) dma_space_addr = BUS_SPACE_MAXADDR; error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 8, 0, /* alignment, bounds */ dma_space_addr, /* lowaddr of exclusion window */ BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->tag); if (unlikely(error != 0)) { ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); goto fail_tag; } error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); if (unlikely(error != 0)) { ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", (uintmax_t)size, error); goto fail_map_create; } dma->paddr = 0; error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, ena_dmamap_callback, &dma->paddr, mapflags); if (unlikely((error != 0) || (dma->paddr == 0))) { ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); goto fail_map_load; } bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); fail_map_load: bus_dmamem_free(dma->tag, dma->vaddr, dma->map); fail_map_create: bus_dma_tag_destroy(dma->tag); fail_tag: dma->tag = NULL; dma->vaddr = NULL; dma->paddr = 0; return (error); } static void ena_free_pci_resources(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; if (adapter->memory != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, PCIR_BAR(ENA_MEM_BAR), adapter->memory); } if (adapter->registers != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, PCIR_BAR(ENA_REG_BAR), adapter->registers); } } static int ena_probe(device_t dev) { ena_vendor_info_t *ent; char adapter_name[60]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); ent = ena_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id)) { ena_trace(ENA_DBG, "vendor=%x device=%x\n", pci_vendor_id, pci_device_id); sprintf(adapter_name, DEVICE_DESC); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } static int ena_change_mtu(if_t ifp, int new_mtu) { struct ena_adapter *adapter = if_getsoftc(ifp); int rc; if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { device_printf(adapter->pdev, "Invalid MTU setting. " "new_mtu: %d max mtu: %d min mtu: %d\n", new_mtu, adapter->max_mtu, ENA_MIN_MTU); return (EINVAL); } rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); if (likely(rc == 0)) { ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); if_setmtu(ifp, new_mtu); } else { device_printf(adapter->pdev, "Failed to set MTU to %d\n", new_mtu); } return (rc); } static inline void ena_alloc_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) *begin = counter_u64_alloc(M_WAITOK); } static inline void ena_free_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) counter_u64_free(*begin); } static inline void ena_reset_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) counter_u64_zero(*begin); } static void ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, uint16_t qid) { ring->qid = qid; ring->adapter = adapter; ring->ena_dev = adapter->ena_dev; ring->first_interrupt = false; ring->no_interrupt_event_cnt = 0; + ring->rx_mbuf_sz = ena_mbuf_sz; } static void ena_init_io_rings(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev; struct ena_ring *txr, *rxr; struct ena_que *que; int i; ena_dev = adapter->ena_dev; for (i = 0; i < adapter->num_queues; i++) { txr = &adapter->tx_ring[i]; rxr = &adapter->rx_ring[i]; /* TX/RX common ring state */ ena_init_io_rings_common(adapter, txr, i); ena_init_io_rings_common(adapter, rxr, i); /* TX specific ring state */ txr->ring_size = adapter->tx_ring_size; txr->tx_max_header_size = ena_dev->tx_max_header_size; txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); /* Allocate a buf ring */ txr->buf_ring_size = adapter->buf_ring_size; txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, &txr->ring_mtx); /* Alloc TX statistics. */ ena_alloc_counters((counter_u64_t *)&txr->tx_stats, sizeof(txr->tx_stats)); /* RX specific ring state */ rxr->ring_size = adapter->rx_ring_size; rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); /* Alloc RX statistics. */ ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, sizeof(rxr->rx_stats)); /* Initialize locks */ snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(adapter->pdev), i); snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(adapter->pdev), i); mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); que = &adapter->que[i]; que->adapter = adapter; que->id = i; que->tx_ring = txr; que->rx_ring = rxr; txr->que = que; rxr->que = que; rxr->empty_rx_queue = 0; } } static void ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *txr = &adapter->tx_ring[qid]; struct ena_ring *rxr = &adapter->rx_ring[qid]; ena_free_counters((counter_u64_t *)&txr->tx_stats, sizeof(txr->tx_stats)); ena_free_counters((counter_u64_t *)&rxr->rx_stats, sizeof(rxr->rx_stats)); ENA_RING_MTX_LOCK(txr); drbr_free(txr->br, M_DEVBUF); ENA_RING_MTX_UNLOCK(txr); mtx_destroy(&txr->ring_mtx); } static void ena_free_all_io_rings_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_io_ring_resources(adapter, i); } static int ena_setup_tx_dma_tag(struct ena_adapter *adapter) { int ret; /* Create DMA tag for Tx buffers */ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 1, 0, /* alignment, bounds */ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ BUS_SPACE_MAXADDR, /* highaddr of excl window */ NULL, NULL, /* filter, filterarg */ ENA_TSO_MAXSIZE, /* maxsize */ adapter->max_tx_sgl_size - 1, /* nsegments */ ENA_TSO_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &adapter->tx_buf_tag); return (ret); } static int ena_free_tx_dma_tag(struct ena_adapter *adapter) { int ret; ret = bus_dma_tag_destroy(adapter->tx_buf_tag); if (likely(ret == 0)) adapter->tx_buf_tag = NULL; return (ret); } static int ena_setup_rx_dma_tag(struct ena_adapter *adapter) { int ret; /* Create DMA tag for Rx buffers*/ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 1, 0, /* alignment, bounds */ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ BUS_SPACE_MAXADDR, /* highaddr of excl window */ NULL, NULL, /* filter, filterarg */ - MJUM16BYTES, /* maxsize */ + ena_mbuf_sz, /* maxsize */ adapter->max_rx_sgl_size, /* nsegments */ - MJUM16BYTES, /* maxsegsize */ + ena_mbuf_sz, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->rx_buf_tag); return (ret); } static int ena_free_rx_dma_tag(struct ena_adapter *adapter) { int ret; ret = bus_dma_tag_destroy(adapter->rx_buf_tag); if (likely(ret == 0)) adapter->rx_buf_tag = NULL; return (ret); } +static void +ena_release_all_tx_dmamap(struct ena_ring *tx_ring) +{ + struct ena_adapter *adapter = tx_ring->adapter; + struct ena_tx_buffer *tx_info; + bus_dma_tag_t tx_tag = adapter->tx_buf_tag;; + int i; +#ifdef DEV_NETMAP + struct ena_netmap_tx_info *nm_info; + int j; +#endif /* DEV_NETMAP */ + + for (i = 0; i < tx_ring->ring_size; ++i) { + tx_info = &tx_ring->tx_buffer_info[i]; +#ifdef DEV_NETMAP + if (adapter->ifp->if_capenable & IFCAP_NETMAP) { + nm_info = &tx_info->nm_info; + for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { + if (nm_info->map_seg[j] != NULL) { + bus_dmamap_destroy(tx_tag, + nm_info->map_seg[j]); + nm_info->map_seg[j] = NULL; + } + } + } +#endif /* DEV_NETMAP */ + if (tx_info->dmamap != NULL) { + bus_dmamap_destroy(tx_tag, tx_info->dmamap); + tx_info->dmamap = NULL; + } + } +} + /** * ena_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_que *que = &adapter->que[qid]; struct ena_ring *tx_ring = que->tx_ring; int size, i, err; +#ifdef DEV_NETMAP + bus_dmamap_t *map; + int j; + ena_netmap_reset_tx_ring(adapter, qid); +#endif /* DEV_NETMAP */ + size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->tx_buffer_info == NULL)) return (ENOMEM); size = sizeof(uint16_t) * tx_ring->ring_size; tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->free_tx_ids == NULL)) goto err_buf_info_free; size = tx_ring->tx_max_header_size; tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) goto err_tx_ids_free; /* Req id stack for TX OOO completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_tx_ids[i] = i; /* Reset TX statistics. */ ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, sizeof(tx_ring->tx_stats)); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->acum_pkts = 0; /* Make sure that drbr is empty */ ENA_RING_MTX_LOCK(tx_ring); drbr_flush(adapter->ifp, tx_ring->br); ENA_RING_MTX_UNLOCK(tx_ring); /* ... and create the buffer DMA maps */ for (i = 0; i < tx_ring->ring_size; i++) { err = bus_dmamap_create(adapter->tx_buf_tag, 0, - &tx_ring->tx_buffer_info[i].map_head); + &tx_ring->tx_buffer_info[i].dmamap); if (unlikely(err != 0)) { ena_trace(ENA_ALERT, - "Unable to create Tx DMA map_head for buffer %d\n", + "Unable to create Tx DMA map for buffer %d\n", i); - goto err_buf_info_unmap; + goto err_map_release; } - tx_ring->tx_buffer_info[i].seg_mapped = false; - err = bus_dmamap_create(adapter->tx_buf_tag, 0, - &tx_ring->tx_buffer_info[i].map_seg); - if (unlikely(err != 0)) { - ena_trace(ENA_ALERT, - "Unable to create Tx DMA map_seg for buffer %d\n", - i); - goto err_buf_info_head_unmap; +#ifdef DEV_NETMAP + if (adapter->ifp->if_capenable & IFCAP_NETMAP) { + map = tx_ring->tx_buffer_info[i].nm_info.map_seg; + for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { + err = bus_dmamap_create(adapter->tx_buf_tag, 0, + &map[j]); + if (unlikely(err != 0)) { + ena_trace(ENA_ALERT, "Unable to create " + "Tx DMA for buffer %d %d\n", i, j); + goto err_map_release; + } + } } - tx_ring->tx_buffer_info[i].head_mapped = false; +#endif /* DEV_NETMAP */ } /* Allocate taskqueues */ TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); if (unlikely(tx_ring->enqueue_tq == NULL)) { ena_trace(ENA_ALERT, "Unable to create taskqueue for enqueue task\n"); i = tx_ring->ring_size; - goto err_buf_info_unmap; + goto err_map_release; } tx_ring->running = true; taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); return (0); -err_buf_info_head_unmap: - bus_dmamap_destroy(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_head); -err_buf_info_unmap: - while (i--) { - bus_dmamap_destroy(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_head); - bus_dmamap_destroy(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_seg); - } - free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); +err_map_release: + ena_release_all_tx_dmamap(tx_ring); err_tx_ids_free: free(tx_ring->free_tx_ids, M_DEVBUF); tx_ring->free_tx_ids = NULL; err_buf_info_free: free(tx_ring->tx_buffer_info, M_DEVBUF); tx_ring->tx_buffer_info = NULL; return (ENOMEM); } /** * ena_free_tx_resources - Free Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources **/ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_ring *tx_ring = &adapter->tx_ring[qid]; +#ifdef DEV_NETMAP + struct ena_netmap_tx_info *nm_info; + int j; +#endif /* DEV_NETMAP */ while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); taskqueue_free(tx_ring->enqueue_tq); ENA_RING_MTX_LOCK(tx_ring); /* Flush buffer ring, */ drbr_flush(adapter->ifp, tx_ring->br); /* Free buffer DMA maps, */ for (int i = 0; i < tx_ring->ring_size; i++) { - if (tx_ring->tx_buffer_info[i].head_mapped == true) { - bus_dmamap_sync(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_head, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_head); - tx_ring->tx_buffer_info[i].head_mapped = false; - } + bus_dmamap_sync(adapter->tx_buf_tag, + tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(adapter->tx_buf_tag, + tx_ring->tx_buffer_info[i].dmamap); bus_dmamap_destroy(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_head); + tx_ring->tx_buffer_info[i].dmamap); - if (tx_ring->tx_buffer_info[i].seg_mapped == true) { - bus_dmamap_sync(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_seg, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_seg); - tx_ring->tx_buffer_info[i].seg_mapped = false; +#ifdef DEV_NETMAP + if (adapter->ifp->if_capenable & IFCAP_NETMAP) { + nm_info = &tx_ring->tx_buffer_info[i].nm_info; + for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { + if (nm_info->socket_buf_idx[j] != 0) { + bus_dmamap_sync(adapter->tx_buf_tag, + nm_info->map_seg[j], + BUS_DMASYNC_POSTWRITE); + ena_netmap_unload(adapter, + nm_info->map_seg[j]); + } + bus_dmamap_destroy(adapter->tx_buf_tag, + nm_info->map_seg[j]); + nm_info->socket_buf_idx[j] = 0; + } } - bus_dmamap_destroy(adapter->tx_buf_tag, - tx_ring->tx_buffer_info[i].map_seg); +#endif /* DEV_NETMAP */ m_freem(tx_ring->tx_buffer_info[i].mbuf); tx_ring->tx_buffer_info[i].mbuf = NULL; } ENA_RING_MTX_UNLOCK(tx_ring); /* And free allocated memory. */ free(tx_ring->tx_buffer_info, M_DEVBUF); tx_ring->tx_buffer_info = NULL; free(tx_ring->free_tx_ids, M_DEVBUF); tx_ring->free_tx_ids = NULL; ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->push_buf_intermediate_buf); tx_ring->push_buf_intermediate_buf = NULL; } /** * ena_setup_all_tx_resources - allocate all queues Tx resources * @adapter: network interface device structure * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_all_tx_resources(struct ena_adapter *adapter) { int i, rc; for (i = 0; i < adapter->num_queues; i++) { rc = ena_setup_tx_resources(adapter, i); if (rc != 0) { device_printf(adapter->pdev, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } } return (0); err_setup_tx: /* Rewind the index freeing the rings as we go */ while (i--) ena_free_tx_resources(adapter, i); return (rc); } /** * ena_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: network interface device structure * * Free all transmit software resources **/ static void ena_free_all_tx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_tx_resources(adapter, i); } -static inline int -validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) -{ - if (likely(req_id < rx_ring->ring_size)) - return (0); - - device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n", - req_id); - counter_u64_add(rx_ring->rx_stats.bad_req_id, 1); - - /* Trigger device reset */ - if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter))) { - rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; - ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter); - } - - return (EFAULT); -} - /** * ena_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_que *que = &adapter->que[qid]; struct ena_ring *rx_ring = que->rx_ring; int size, err, i; size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; +#ifdef DEV_NETMAP + ena_netmap_reset_rx_ring(adapter, qid); + rx_ring->initialized = false; +#endif /* DEV_NETMAP */ + /* * Alloc extra element so in rx path * we can always prefetch rx_info + 1 */ size += sizeof(struct ena_rx_buffer); rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); size = sizeof(uint16_t) * rx_ring->ring_size; rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); for (i = 0; i < rx_ring->ring_size; i++) rx_ring->free_rx_ids[i] = i; /* Reset RX statistics. */ ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, sizeof(rx_ring->rx_stats)); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; /* ... and create the buffer DMA maps */ for (i = 0; i < rx_ring->ring_size; i++) { err = bus_dmamap_create(adapter->rx_buf_tag, 0, &(rx_ring->rx_buffer_info[i].map)); if (err != 0) { ena_trace(ENA_ALERT, "Unable to create Rx DMA map for buffer %d\n", i); goto err_buf_info_unmap; } } /* Create LRO for the ring */ if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { int err = tcp_lro_init(&rx_ring->lro); if (err != 0) { device_printf(adapter->pdev, "LRO[%d] Initialization failed!\n", qid); } else { ena_trace(ENA_INFO, "RX Soft LRO[%d] Initialized\n", qid); rx_ring->lro.ifp = adapter->ifp; } } return (0); err_buf_info_unmap: while (i--) { bus_dmamap_destroy(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); } free(rx_ring->free_rx_ids, M_DEVBUF); rx_ring->free_rx_ids = NULL; free(rx_ring->rx_buffer_info, M_DEVBUF); rx_ring->rx_buffer_info = NULL; return (ENOMEM); } /** * ena_free_rx_resources - Free Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources **/ static void ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; /* Free buffer DMA maps, */ for (int i = 0; i < rx_ring->ring_size; i++) { bus_dmamap_sync(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); m_freem(rx_ring->rx_buffer_info[i].mbuf); rx_ring->rx_buffer_info[i].mbuf = NULL; bus_dmamap_unload(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); bus_dmamap_destroy(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); } /* free LRO resources, */ tcp_lro_free(&rx_ring->lro); /* free allocated memory */ free(rx_ring->rx_buffer_info, M_DEVBUF); rx_ring->rx_buffer_info = NULL; free(rx_ring->free_rx_ids, M_DEVBUF); rx_ring->free_rx_ids = NULL; } /** * ena_setup_all_rx_resources - allocate all queues Rx resources * @adapter: network interface device structure * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_all_rx_resources(struct ena_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_queues; i++) { rc = ena_setup_rx_resources(adapter, i); if (rc != 0) { device_printf(adapter->pdev, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } } return (0); err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) ena_free_rx_resources(adapter, i); return (rc); } /** * ena_free_all_rx_resources - Free Rx resources for all queues * @adapter: network interface device structure * * Free all receive software resources **/ static void ena_free_all_rx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_rx_resources(adapter, i); } static inline int ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { struct ena_com_buf *ena_buf; bus_dma_segment_t segs[1]; int nsegs, error; int mlen; /* if previous allocated frag is not used */ if (unlikely(rx_info->mbuf != NULL)) return (0); /* Get mbuf using UMA allocator */ - rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); + rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, + rx_ring->rx_mbuf_sz); if (unlikely(rx_info->mbuf == NULL)) { counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(rx_info->mbuf == NULL)) { counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); return (ENOMEM); } mlen = MCLBYTES; } else { - mlen = MJUM16BYTES; + mlen = rx_ring->rx_mbuf_sz; } /* Set mbuf length*/ rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; /* Map packets for DMA */ ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (unlikely((error != 0) || (nsegs != 1))) { ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " "nsegs: %d\n", error, nsegs); counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); goto exit; } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); ena_buf = &rx_info->ena_buf; ena_buf->paddr = segs[0].ds_addr; ena_buf->len = mlen; ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); return (0); exit: m_freem(rx_info->mbuf); rx_info->mbuf = NULL; return (EFAULT); } static void ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { if (rx_info->mbuf == NULL) { ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); return; } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); m_freem(rx_info->mbuf); rx_info->mbuf = NULL; } /** * ena_refill_rx_bufs - Refills ring with descriptors * @rx_ring: the ring which we want to feed with free descriptors * @num: number of descriptors to refill * Refills the ring with newly allocated DMA-mapped mbufs for receiving **/ -static int +int ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) { struct ena_adapter *adapter = rx_ring->adapter; uint16_t next_to_use, req_id; uint32_t i; int rc; ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n", rx_ring->qid); next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { struct ena_rx_buffer *rx_info; ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "RX buffer - next to use: %d\n", next_to_use); req_id = rx_ring->free_rx_ids[next_to_use]; rx_info = &rx_ring->rx_buffer_info[req_id]; - - rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); +#ifdef DEV_NETMAP + if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) + rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info); + else +#endif /* DEV_NETMAP */ + rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); if (unlikely(rc != 0)) { ena_trace(ENA_WARNING, "failed to alloc buffer for rx queue %d\n", rx_ring->qid); break; } rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, &rx_info->ena_buf, req_id); if (unlikely(rc != 0)) { ena_trace(ENA_WARNING, "failed to add buffer for rx queue %d\n", rx_ring->qid); break; } next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, rx_ring->ring_size); } if (unlikely(i < num)) { counter_u64_add(rx_ring->rx_stats.refil_partial, 1); ena_trace(ENA_WARNING, "refilled rx qid %d with only %d mbufs (from %d)\n", rx_ring->qid, i, num); } if (likely(i != 0)) { wmb(); ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); } rx_ring->next_to_use = next_to_use; return (i); } static void ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; unsigned int i; for (i = 0; i < rx_ring->ring_size; i++) { struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; if (rx_info->mbuf != NULL) ena_free_rx_mbuf(adapter, rx_ring, rx_info); +#ifdef DEV_NETMAP + if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && + (adapter->ifp->if_capenable & IFCAP_NETMAP)) { + if (rx_info->netmap_buf_idx != 0) + ena_netmap_free_rx_slot(adapter, rx_ring, + rx_info); + } +#endif /* DEV_NETMAP */ } } /** * ena_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: network interface device structure * */ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, rc, bufs_num; for (i = 0; i < adapter->num_queues; i++) { rx_ring = &adapter->rx_ring[i]; bufs_num = rx_ring->ring_size - 1; rc = ena_refill_rx_bufs(rx_ring, bufs_num); - if (unlikely(rc != bufs_num)) ena_trace(ENA_WARNING, "refilling Queue %d failed. " "Allocated %d buffers from: %d\n", i, rc, bufs_num); +#ifdef DEV_NETMAP + rx_ring->initialized = true; +#endif /* DEV_NETMAP */ } } static void ena_free_all_rx_bufs(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_rx_bufs(adapter, i); } /** * ena_free_tx_bufs - Free Tx Buffers per Queue * @adapter: network interface device structure * @qid: queue index **/ static void ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) { bool print_once = true; struct ena_ring *tx_ring = &adapter->tx_ring[qid]; ENA_RING_MTX_LOCK(tx_ring); for (int i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; if (tx_info->mbuf == NULL) continue; if (print_once) { device_printf(adapter->pdev, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); print_once = false; } else { ena_trace(ENA_DBG, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); } - if (tx_info->head_mapped == true) { - bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(adapter->tx_buf_tag, - tx_info->map_head); - tx_info->head_mapped = false; - } + bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); - if (tx_info->seg_mapped == true) { - bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(adapter->tx_buf_tag, - tx_info->map_seg); - tx_info->seg_mapped = false; - } - m_free(tx_info->mbuf); tx_info->mbuf = NULL; } ENA_RING_MTX_UNLOCK(tx_ring); } static void ena_free_all_tx_bufs(struct ena_adapter *adapter) { for (int i = 0; i < adapter->num_queues; i++) ena_free_tx_bufs(adapter, i); } static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) { uint16_t ena_qid; int i; for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) { uint16_t ena_qid; int i; for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_RXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_io_queues(struct ena_adapter *adapter) { struct ena_que *queue; int i; for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); taskqueue_free(queue->cleanup_tq); } ena_destroy_all_tx_queues(adapter); ena_destroy_all_rx_queues(adapter); } -static inline int -validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) -{ - struct ena_adapter *adapter = tx_ring->adapter; - struct ena_tx_buffer *tx_info = NULL; - - if (likely(req_id < tx_ring->ring_size)) { - tx_info = &tx_ring->tx_buffer_info[req_id]; - if (tx_info->mbuf != NULL) - return (0); - device_printf(adapter->pdev, - "tx_info doesn't have valid mbuf\n"); - } - - device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id); - counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); - - /* Trigger device reset */ - adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; - ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); - - return (EFAULT); -} - static int ena_create_io_queues(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_create_io_ctx ctx; struct ena_ring *ring; struct ena_que *queue; uint16_t ena_qid; uint32_t msix_vector; int rc, i; /* Create TX queues */ for (i = 0; i < adapter->num_queues; i++) { msix_vector = ENA_IO_IRQ_IDX(i); ena_qid = ENA_IO_TXQ_IDX(i); ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.queue_size = adapter->tx_ring_size; ctx.msix_vector = msix_vector; ctx.qid = ena_qid; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc != 0) { device_printf(adapter->pdev, "Failed to create io TX queue #%d rc: %d\n", i, rc); goto err_tx; } ring = &adapter->tx_ring[i]; rc = ena_com_get_io_handlers(ena_dev, ena_qid, &ring->ena_com_io_sq, &ring->ena_com_io_cq); if (rc != 0) { device_printf(adapter->pdev, "Failed to get TX queue handlers. TX queue num" " %d rc: %d\n", i, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); goto err_tx; } } /* Create RX queues */ for (i = 0; i < adapter->num_queues; i++) { msix_vector = ENA_IO_IRQ_IDX(i); ena_qid = ENA_IO_RXQ_IDX(i); ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.queue_size = adapter->rx_ring_size; ctx.msix_vector = msix_vector; ctx.qid = ena_qid; rc = ena_com_create_io_queue(ena_dev, &ctx); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Failed to create io RX queue[%d] rc: %d\n", i, rc); goto err_rx; } ring = &adapter->rx_ring[i]; rc = ena_com_get_io_handlers(ena_dev, ena_qid, &ring->ena_com_io_sq, &ring->ena_com_io_cq); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Failed to get RX queue handlers. RX queue num" " %d rc: %d\n", i, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); goto err_rx; } } for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); queue->cleanup_tq = taskqueue_create_fast("ena cleanup", M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET, "%s queue %d cleanup", device_get_nameunit(adapter->pdev), i); } return (0); err_rx: while (i--) ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); i = adapter->num_queues; err_tx: while (i--) ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); return (ENXIO); } -/** - * ena_tx_cleanup - clear sent packets and corresponding descriptors - * @tx_ring: ring for which we want to clean packets - * - * Once packets are sent, we ask the device in a loop for no longer used - * descriptors. We find the related mbuf chain in a map (index in an array) - * and free it, then update ring state. - * This is performed in "endless" loop, updating ring pointers every - * TX_COMMIT. The first check of free descriptor is performed before the actual - * loop, then repeated at the loop end. - **/ -static int -ena_tx_cleanup(struct ena_ring *tx_ring) -{ - struct ena_adapter *adapter; - struct ena_com_io_cq* io_cq; - uint16_t next_to_clean; - uint16_t req_id; - uint16_t ena_qid; - unsigned int total_done = 0; - int rc; - int commit = TX_COMMIT; - int budget = TX_BUDGET; - int work_done; - bool above_thresh; - - adapter = tx_ring->que->adapter; - ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); - io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; - next_to_clean = tx_ring->next_to_clean; - - do { - struct ena_tx_buffer *tx_info; - struct mbuf *mbuf; - - rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); - if (unlikely(rc != 0)) - break; - - rc = validate_tx_req_id(tx_ring, req_id); - if (unlikely(rc != 0)) - break; - - tx_info = &tx_ring->tx_buffer_info[req_id]; - - mbuf = tx_info->mbuf; - - tx_info->mbuf = NULL; - bintime_clear(&tx_info->timestamp); - - /* Map is no longer required */ - if (tx_info->head_mapped == true) { - bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(adapter->tx_buf_tag, - tx_info->map_head); - tx_info->head_mapped = false; - } - if (tx_info->seg_mapped == true) { - bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(adapter->tx_buf_tag, - tx_info->map_seg); - tx_info->seg_mapped = false; - } - - ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n", - tx_ring->qid, mbuf); - - m_freem(mbuf); - - total_done += tx_info->tx_descs; - - tx_ring->free_tx_ids[next_to_clean] = req_id; - next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, - tx_ring->ring_size); - - if (unlikely(--commit == 0)) { - commit = TX_COMMIT; - /* update ring state every TX_COMMIT descriptor */ - tx_ring->next_to_clean = next_to_clean; - ena_com_comp_ack( - &adapter->ena_dev->io_sq_queues[ena_qid], - total_done); - ena_com_update_dev_comp_head(io_cq); - total_done = 0; - } - } while (likely(--budget)); - - work_done = TX_BUDGET - budget; - - ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n", - tx_ring->qid, work_done); - - /* If there is still something to commit update ring state */ - if (likely(commit != TX_COMMIT)) { - tx_ring->next_to_clean = next_to_clean; - ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], - total_done); - ena_com_update_dev_comp_head(io_cq); - } - - /* - * Need to make the rings circular update visible to - * ena_xmit_mbuf() before checking for tx_ring->running. - */ - mb(); - - above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - ENA_TX_RESUME_THRESH); - if (unlikely(!tx_ring->running && above_thresh)) { - ENA_RING_MTX_LOCK(tx_ring); - above_thresh = - ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - ENA_TX_RESUME_THRESH); - if (!tx_ring->running && above_thresh) { - tx_ring->running = true; - counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); - taskqueue_enqueue(tx_ring->enqueue_tq, - &tx_ring->enqueue_task); - } - ENA_RING_MTX_UNLOCK(tx_ring); - } - - return (work_done); -} - -static void -ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, - struct mbuf *mbuf) -{ - struct ena_adapter *adapter = rx_ring->adapter; - - if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { - mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; - - if (ena_rx_ctx->frag && - (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) { - M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); - return; - } - - switch (ena_rx_ctx->l3_proto) { - case ENA_ETH_IO_L3_PROTO_IPV4: - switch (ena_rx_ctx->l4_proto) { - case ENA_ETH_IO_L4_PROTO_TCP: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); - break; - case ENA_ETH_IO_L4_PROTO_UDP: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); - break; - default: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); - } - break; - case ENA_ETH_IO_L3_PROTO_IPV6: - switch (ena_rx_ctx->l4_proto) { - case ENA_ETH_IO_L4_PROTO_TCP: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); - break; - case ENA_ETH_IO_L4_PROTO_UDP: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); - break; - default: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); - } - break; - case ENA_ETH_IO_L3_PROTO_UNKNOWN: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); - break; - default: - M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); - } - } else { - mbuf->m_pkthdr.flowid = rx_ring->qid; - M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); - } -} - -/** - * ena_rx_mbuf - assemble mbuf from descriptors - * @rx_ring: ring for which we want to clean packets - * @ena_bufs: buffer info - * @ena_rx_ctx: metadata for this packet(s) - * @next_to_clean: ring pointer, will be updated only upon success - * - **/ -static struct mbuf* -ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, - struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean) -{ - struct mbuf *mbuf; - struct ena_rx_buffer *rx_info; - struct ena_adapter *adapter; - unsigned int descs = ena_rx_ctx->descs; - int rc; - uint16_t ntc, len, req_id, buf = 0; - - ntc = *next_to_clean; - adapter = rx_ring->adapter; - - len = ena_bufs[buf].len; - req_id = ena_bufs[buf].req_id; - rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc != 0)) - return (NULL); - - rx_info = &rx_ring->rx_buffer_info[req_id]; - if (unlikely(rx_info->mbuf == NULL)) { - device_printf(adapter->pdev, "NULL mbuf in rx_info"); - return (NULL); - } - - ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n", - rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); - - bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, - BUS_DMASYNC_POSTREAD); - mbuf = rx_info->mbuf; - mbuf->m_flags |= M_PKTHDR; - mbuf->m_pkthdr.len = len; - mbuf->m_len = len; - mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp; - - /* Fill mbuf with hash key and it's interpretation for optimization */ - ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); - - ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n", - mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); - - /* DMA address is not needed anymore, unmap it */ - bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); - - rx_info->mbuf = NULL; - rx_ring->free_rx_ids[ntc] = req_id; - ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); - - /* - * While we have more than 1 descriptors for one rcvd packet, append - * other mbufs to the main one - */ - while (--descs) { - ++buf; - len = ena_bufs[buf].len; - req_id = ena_bufs[buf].req_id; - rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc != 0)) { - /* - * If the req_id is invalid, then the device will be - * reset. In that case we must free all mbufs that - * were already gathered. - */ - m_freem(mbuf); - return (NULL); - } - rx_info = &rx_ring->rx_buffer_info[req_id]; - - if (unlikely(rx_info->mbuf == NULL)) { - device_printf(adapter->pdev, "NULL mbuf in rx_info"); - /* - * If one of the required mbufs was not allocated yet, - * we can break there. - * All earlier used descriptors will be reallocated - * later and not used mbufs can be reused. - * The next_to_clean pointer will not be updated in case - * of an error, so caller should advance it manually - * in error handling routine to keep it up to date - * with hw ring. - */ - m_freem(mbuf); - return (NULL); - } - - bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, - BUS_DMASYNC_POSTREAD); - if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { - counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); - ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p\n", - mbuf); - } - - ena_trace(ENA_DBG | ENA_RXPTH, - "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len); - - /* Free already appended mbuf, it won't be useful anymore */ - bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); - m_freem(rx_info->mbuf); - rx_info->mbuf = NULL; - - rx_ring->free_rx_ids[ntc] = req_id; - ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); - } - - *next_to_clean = ntc; - - return (mbuf); -} - -/** - * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum - **/ -static inline void -ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, - struct mbuf *mbuf) -{ - - /* if IP and error */ - if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && - ena_rx_ctx->l3_csum_err)) { - /* ipv4 checksum error */ - mbuf->m_pkthdr.csum_flags = 0; - counter_u64_add(rx_ring->rx_stats.bad_csum, 1); - ena_trace(ENA_DBG, "RX IPv4 header checksum error\n"); - return; - } - - /* if TCP/UDP */ - if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || - (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) { - if (ena_rx_ctx->l4_csum_err) { - /* TCP/UDP checksum error */ - mbuf->m_pkthdr.csum_flags = 0; - counter_u64_add(rx_ring->rx_stats.bad_csum, 1); - ena_trace(ENA_DBG, "RX L4 checksum error\n"); - } else { - mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; - mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; - } - } -} - -/** - * ena_rx_cleanup - handle rx irq - * @arg: ring for which irq is being handled - **/ -static int -ena_rx_cleanup(struct ena_ring *rx_ring) -{ - struct ena_adapter *adapter; - struct mbuf *mbuf; - struct ena_com_rx_ctx ena_rx_ctx; - struct ena_com_io_cq* io_cq; - struct ena_com_io_sq* io_sq; - if_t ifp; - uint16_t ena_qid; - uint16_t next_to_clean; - uint32_t refill_required; - uint32_t refill_threshold; - uint32_t do_if_input = 0; - unsigned int qid; - int rc, i; - int budget = RX_BUDGET; - - adapter = rx_ring->que->adapter; - ifp = adapter->ifp; - qid = rx_ring->que->id; - ena_qid = ENA_IO_RXQ_IDX(qid); - io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; - io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; - next_to_clean = rx_ring->next_to_clean; - - ena_trace(ENA_DBG, "rx: qid %d\n", qid); - - do { - ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; - ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size; - ena_rx_ctx.descs = 0; - bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, - io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); - rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); - - if (unlikely(rc != 0)) - goto error; - - if (unlikely(ena_rx_ctx.descs == 0)) - break; - - ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " - "descs #: %d l3 proto %d l4 proto %d hash: %x\n", - rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, - ena_rx_ctx.l4_proto, ena_rx_ctx.hash); - - /* Receive mbuf from the ring */ - mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, - &ena_rx_ctx, &next_to_clean); - bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, - io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); - /* Exit if we failed to retrieve a buffer */ - if (unlikely(mbuf == NULL)) { - for (i = 0; i < ena_rx_ctx.descs; ++i) { - rx_ring->free_rx_ids[next_to_clean] = - rx_ring->ena_bufs[i].req_id; - next_to_clean = - ENA_RX_RING_IDX_NEXT(next_to_clean, - rx_ring->ring_size); - - } - break; - } - - if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) || - ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) { - ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf); - } - - counter_enter(); - counter_u64_add_protected(rx_ring->rx_stats.bytes, - mbuf->m_pkthdr.len); - counter_u64_add_protected(adapter->hw_stats.rx_bytes, - mbuf->m_pkthdr.len); - counter_exit(); - /* - * LRO is only for IP/TCP packets and TCP checksum of the packet - * should be computed by hardware. - */ - do_if_input = 1; - if (((ifp->if_capenable & IFCAP_LRO) != 0) && - ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && - (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) { - /* - * Send to the stack if: - * - LRO not enabled, or - * - no LRO resources, or - * - lro enqueue fails - */ - if ((rx_ring->lro.lro_cnt != 0) && - (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)) - do_if_input = 0; - } - if (do_if_input != 0) { - ena_trace(ENA_DBG | ENA_RXPTH, - "calling if_input() with mbuf %p\n", mbuf); - (*ifp->if_input)(ifp, mbuf); - } - - counter_enter(); - counter_u64_add_protected(rx_ring->rx_stats.cnt, 1); - counter_u64_add_protected(adapter->hw_stats.rx_packets, 1); - counter_exit(); - } while (--budget); - - rx_ring->next_to_clean = next_to_clean; - - refill_required = ena_com_free_desc(io_sq); - refill_threshold = min_t(int, - rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, - ENA_RX_REFILL_THRESH_PACKET); - - if (refill_required > refill_threshold) { - ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); - ena_refill_rx_bufs(rx_ring, refill_required); - } - - tcp_lro_flush_all(&rx_ring->lro); - - return (RX_BUDGET - budget); - -error: - counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); - - /* Too many desc from the device. Trigger reset */ - if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { - adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; - ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); - } - - return (0); -} - /********************************************************************* * * MSIX & Interrupt Service routine * **********************************************************************/ /** * ena_handle_msix - MSIX Interrupt Handler for admin/async queue * @arg: interrupt number **/ static void ena_intr_msix_mgmnt(void *arg) { struct ena_adapter *adapter = (struct ena_adapter *)arg; ena_com_admin_q_comp_intr_handler(adapter->ena_dev); if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) ena_com_aenq_intr_handler(adapter->ena_dev, arg); } -static void -ena_cleanup(void *arg, int pending) -{ - struct ena_que *que = arg; - struct ena_adapter *adapter = que->adapter; - if_t ifp = adapter->ifp; - struct ena_ring *tx_ring; - struct ena_ring *rx_ring; - struct ena_com_io_cq* io_cq; - struct ena_eth_io_intr_reg intr_reg; - int qid, ena_qid; - int txc, rxc, i; - - if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) - return; - - ena_trace(ENA_DBG, "MSI-X TX/RX routine\n"); - - tx_ring = que->tx_ring; - rx_ring = que->rx_ring; - qid = que->id; - ena_qid = ENA_IO_TXQ_IDX(qid); - io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; - - tx_ring->first_interrupt = true; - rx_ring->first_interrupt = true; - - for (i = 0; i < CLEAN_BUDGET; ++i) { - rxc = ena_rx_cleanup(rx_ring); - txc = ena_tx_cleanup(tx_ring); - - if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) - return; - - if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) - break; - } - - /* Signal that work is done and unmask interrupt */ - ena_com_update_intr_reg(&intr_reg, - RX_IRQ_INTERVAL, - TX_IRQ_INTERVAL, - true); - ena_com_unmask_intr(io_cq, &intr_reg); -} - /** * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx * @arg: queue **/ static int ena_handle_msix(void *arg) { struct ena_que *queue = arg; struct ena_adapter *adapter = queue->adapter; if_t ifp = adapter->ifp; if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) return (FILTER_STRAY); taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); return (FILTER_HANDLED); } static int ena_enable_msix(struct ena_adapter *adapter) { device_t dev = adapter->pdev; int msix_vecs, msix_req; int i, rc = 0; if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { device_printf(dev, "Error, MSI-X is already enabled\n"); return (EINVAL); } /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), M_DEVBUF, M_WAITOK | M_ZERO); ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); for (i = 0; i < msix_vecs; i++) { adapter->msix_entries[i].entry = i; /* Vectors must start from 1 */ adapter->msix_entries[i].vector = i + 1; } msix_req = msix_vecs; rc = pci_alloc_msix(dev, &msix_vecs); if (unlikely(rc != 0)) { device_printf(dev, "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); rc = ENOSPC; goto err_msix_free; } if (msix_vecs != msix_req) { if (msix_vecs == ENA_ADMIN_MSIX_VEC) { device_printf(dev, "Not enough number of MSI-x allocated: %d\n", msix_vecs); pci_release_msi(dev); rc = ENOSPC; goto err_msix_free; } device_printf(dev, "Enable only %d MSI-x (out of %d), reduce " "the number of queues\n", msix_vecs, msix_req); adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC; } adapter->msix_vecs = msix_vecs; ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); return (0); err_msix_free: free(adapter->msix_entries, M_DEVBUF); adapter->msix_entries = NULL; return (rc); } static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) { snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); /* * Handler is NULL on purpose, it will be set * when mgmnt interrupt is acquired */ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; } static int ena_setup_io_intr(struct ena_adapter *adapter) { static int last_bind_cpu = -1; int irq_idx; if (adapter->msix_entries == NULL) return (EINVAL); for (int i = 0; i < adapter->num_queues; i++) { irq_idx = ENA_IO_IRQ_IDX(i); snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); adapter->irq_tbl[irq_idx].handler = ena_handle_msix; adapter->irq_tbl[irq_idx].data = &adapter->que[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", adapter->msix_entries[irq_idx].vector); /* * We want to bind rings to the corresponding cpu * using something similar to the RSS round-robin technique. */ if (unlikely(last_bind_cpu < 0)) last_bind_cpu = CPU_FIRST(); adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = last_bind_cpu; last_bind_cpu = CPU_NEXT(last_bind_cpu); } return (0); } static int ena_request_mgmnt_irq(struct ena_adapter *adapter) { struct ena_irq *irq; unsigned long flags; int rc, rcc; flags = RF_ACTIVE | RF_SHAREABLE; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, &irq->vector, flags); if (unlikely(irq->res == NULL)) { device_printf(adapter->pdev, "could not allocate " "irq vector: %d\n", irq->vector); return (ENXIO); } rc = bus_setup_intr(adapter->pdev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, &irq->cookie); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "failed to register " "interrupt handler for irq %ju: %d\n", rman_get_start(irq->res), rc); goto err_res_free; } irq->requested = true; return (rc); err_res_free: ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", irq->vector); rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); if (unlikely(rcc != 0)) device_printf(adapter->pdev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->res = NULL; return (rc); } static int ena_request_io_irq(struct ena_adapter *adapter) { struct ena_irq *irq; unsigned long flags = 0; int rc = 0, i, rcc; if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { device_printf(adapter->pdev, "failed to request I/O IRQ: MSI-X is not enabled\n"); return (EINVAL); } else { flags = RF_ACTIVE | RF_SHAREABLE; } for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (unlikely(irq->requested)) continue; irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, &irq->vector, flags); if (unlikely(irq->res == NULL)) { rc = ENOMEM; device_printf(adapter->pdev, "could not allocate " "irq vector: %d\n", irq->vector); goto err; } rc = bus_setup_intr(adapter->pdev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, &irq->cookie); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "failed to register " "interrupt handler for irq %ju: %d\n", rman_get_start(irq->res), rc); goto err; } irq->requested = true; ena_trace(ENA_INFO, "queue %d - cpu %d\n", i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); } return (rc); err: for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { irq = &adapter->irq_tbl[i]; rcc = 0; /* Once we entered err: section and irq->requested is true we free both intr and resources */ if (irq->requested) rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rcc != 0)) device_printf(adapter->pdev, "could not release" " irq: %d, error: %d\n", irq->vector, rcc); /* If we entred err: section without irq->requested set we know it was bus_alloc_resource_any() that needs cleanup, provided res is not NULL. In case res is NULL no work in needed in this iteration */ rcc = 0; if (irq->res != NULL) { rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); } if (unlikely(rcc != 0)) device_printf(adapter->pdev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->requested = false; irq->res = NULL; } return (rc); } static void ena_free_mgmnt_irq(struct ena_adapter *adapter) { struct ena_irq *irq; int rc; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; if (irq->requested) { ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) device_printf(adapter->pdev, "failed to tear " "down irq: %d\n", irq->vector); irq->requested = 0; } if (irq->res != NULL) { ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (unlikely(rc != 0)) device_printf(adapter->pdev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); } } static void ena_free_io_irq(struct ena_adapter *adapter) { struct ena_irq *irq; int rc; for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested) { ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; } if (irq->res != NULL) { ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (unlikely(rc != 0)) { device_printf(adapter->pdev, "dev has no parent" " while releasing res for irq: %d\n", irq->vector); } } } } static void ena_free_irqs(struct ena_adapter* adapter) { ena_free_io_irq(adapter); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); } static void ena_disable_msix(struct ena_adapter *adapter) { if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); pci_release_msi(adapter->pdev); } adapter->msix_vecs = 0; if (adapter->msix_entries != NULL) free(adapter->msix_entries, M_DEVBUF); adapter->msix_entries = NULL; } static void ena_unmask_all_io_irqs(struct ena_adapter *adapter) { struct ena_com_io_cq* io_cq; struct ena_eth_io_intr_reg intr_reg; uint16_t ena_qid; int i; /* Unmask interrupts for all queues */ for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; ena_com_update_intr_reg(&intr_reg, 0, 0, true); ena_com_unmask_intr(io_cq, &intr_reg); } } /* Configure the Rx forwarding */ static int ena_rss_configure(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; /* Set indirect table */ rc = ena_com_indirect_table_set(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) return (rc); /* Configure hash function (if supported) */ rc = ena_com_set_hash_function(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) return (rc); /* Configure hash inputs (if supported) */ rc = ena_com_set_hash_ctrl(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) return (rc); return (0); } static int ena_up_complete(struct ena_adapter *adapter) { int rc; if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { rc = ena_rss_configure(adapter); if (rc != 0) return (rc); } rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); if (unlikely(rc != 0)) return (rc); ena_refill_all_rx_bufs(adapter); ena_reset_counters((counter_u64_t *)&adapter->hw_stats, sizeof(adapter->hw_stats)); return (0); } -static int +int ena_up(struct ena_adapter *adapter) { int rc = 0; if (unlikely(device_is_attached(adapter->pdev) == 0)) { device_printf(adapter->pdev, "device is not attached!\n"); return (ENXIO); } if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { device_printf(adapter->pdev, "device is going UP\n"); /* setup interrupts for IO queues */ rc = ena_setup_io_intr(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "error setting up IO interrupt\n"); goto error; } rc = ena_request_io_irq(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "err_req_irq\n"); goto error; } /* allocate transmit descriptors */ rc = ena_setup_all_tx_resources(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "err_setup_tx\n"); goto err_setup_tx; } /* allocate receive descriptors */ rc = ena_setup_all_rx_resources(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "err_setup_rx\n"); goto err_setup_rx; } /* create IO queues for Rx & Tx */ rc = ena_create_io_queues(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "create IO queues failed\n"); goto err_io_que; } if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) if_link_state_change(adapter->ifp, LINK_STATE_UP); rc = ena_up_complete(adapter); if (unlikely(rc != 0)) goto err_up_complete; counter_u64_add(adapter->dev_stats.interface_up, 1); ena_update_hwassist(adapter); if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); - callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, - ena_timer_service, (void *)adapter, 0); + /* Activate timer service only if the device is running. + * If this flag is not set, it means that the driver is being + * reset and timer service will be activated afterwards. + */ + if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) { + callout_reset_sbt(&adapter->timer_service, SBT_1S, + SBT_1S, ena_timer_service, (void *)adapter, 0); + } ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); ena_unmask_all_io_irqs(adapter); } return (0); err_up_complete: ena_destroy_all_io_queues(adapter); err_io_que: ena_free_all_rx_resources(adapter); err_setup_rx: ena_free_all_tx_resources(adapter); err_setup_tx: ena_free_io_irq(adapter); error: return (rc); } static uint64_t ena_get_counter(if_t ifp, ift_counter cnt) { struct ena_adapter *adapter; struct ena_hw_stats *stats; adapter = if_getsoftc(ifp); stats = &adapter->hw_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (counter_u64_fetch(stats->rx_packets)); case IFCOUNTER_OPACKETS: return (counter_u64_fetch(stats->tx_packets)); case IFCOUNTER_IBYTES: return (counter_u64_fetch(stats->rx_bytes)); case IFCOUNTER_OBYTES: return (counter_u64_fetch(stats->tx_bytes)); case IFCOUNTER_IQDROPS: return (counter_u64_fetch(stats->rx_drops)); default: return (if_get_counter_default(ifp, cnt)); } } static int ena_media_change(if_t ifp) { /* Media Change is not supported by firmware */ return (0); } static void ena_media_status(if_t ifp, struct ifmediareq *ifmr) { struct ena_adapter *adapter = if_getsoftc(ifp); ena_trace(ENA_DBG, "enter\n"); mtx_lock(&adapter->global_mtx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { mtx_unlock(&adapter->global_mtx); ena_trace(ENA_INFO, "Link is down\n"); return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; mtx_unlock(&adapter->global_mtx); } static void ena_init(void *arg) { struct ena_adapter *adapter = (struct ena_adapter *)arg; if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { sx_xlock(&adapter->ioctl_sx); ena_up(adapter); sx_unlock(&adapter->ioctl_sx); } } static int ena_ioctl(if_t ifp, u_long command, caddr_t data) { struct ena_adapter *adapter; struct ifreq *ifr; int rc; adapter = ifp->if_softc; ifr = (struct ifreq *)data; /* * Acquiring lock to prevent from running up and down routines parallel. */ rc = 0; switch (command) { case SIOCSIFMTU: if (ifp->if_mtu == ifr->ifr_mtu) break; sx_xlock(&adapter->ioctl_sx); ena_down(adapter); ena_change_mtu(ifp, ifr->ifr_mtu); rc = ena_up(adapter); sx_unlock(&adapter->ioctl_sx); break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { device_printf(adapter->pdev, "ioctl promisc/allmulti\n"); } } else { sx_xlock(&adapter->ioctl_sx); rc = ena_up(adapter); sx_unlock(&adapter->ioctl_sx); } } else { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { sx_xlock(&adapter->ioctl_sx); ena_down(adapter); sx_unlock(&adapter->ioctl_sx); } } break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int reinit = 0; if (ifr->ifr_reqcap != ifp->if_capenable) { ifp->if_capenable = ifr->ifr_reqcap; reinit = 1; } if ((reinit != 0) && ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { sx_xlock(&adapter->ioctl_sx); ena_down(adapter); rc = ena_up(adapter); sx_unlock(&adapter->ioctl_sx); } } break; default: rc = ether_ioctl(ifp, command, data); break; } return (rc); } static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) { int caps = 0; if ((feat->offload.tx & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) caps |= IFCAP_TXCSUM; if ((feat->offload.tx & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) caps |= IFCAP_TXCSUM_IPV6; if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) caps |= IFCAP_TSO4; if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) caps |= IFCAP_TSO6; if ((feat->offload.rx_supported & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) caps |= IFCAP_RXCSUM; if ((feat->offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) caps |= IFCAP_RXCSUM_IPV6; caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; return (caps); } static void ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) { host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); } static void ena_update_hwassist(struct ena_adapter *adapter) { if_t ifp = adapter->ifp; uint32_t feat = adapter->tx_offload_cap; int cap = if_getcapenable(ifp); int flags = 0; if_clearhwassist(ifp); if ((cap & IFCAP_TXCSUM) != 0) { if ((feat & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) flags |= CSUM_IP; if ((feat & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) flags |= CSUM_IP_UDP | CSUM_IP_TCP; } if ((cap & IFCAP_TXCSUM_IPV6) != 0) flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; if ((cap & IFCAP_TSO4) != 0) flags |= CSUM_IP_TSO; if ((cap & IFCAP_TSO6) != 0) flags |= CSUM_IP6_TSO; if_sethwassistbits(ifp, flags, 0); } static int ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *feat) { if_t ifp; int caps = 0; ifp = adapter->ifp = if_gethandle(IFT_ETHER); if (unlikely(ifp == NULL)) { ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); return (ENXIO); } if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); if_setdev(ifp, pdev); if_setsoftc(ifp, adapter); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setinitfn(ifp, ena_init); if_settransmitfn(ifp, ena_mq_start); if_setqflushfn(ifp, ena_qflush); if_setioctlfn(ifp, ena_ioctl); if_setgetcounterfn(ifp, ena_get_counter); if_setsendqlen(ifp, adapter->tx_ring_size); if_setsendqready(ifp); if_setmtu(ifp, ETHERMTU); if_setbaudrate(ifp, 0); /* Zeroize capabilities... */ if_setcapabilities(ifp, 0); if_setcapenable(ifp, 0); /* check hardware support */ caps = ena_get_dev_offloads(feat); /* ... and set them */ if_setcapabilitiesbit(ifp, caps, 0); /* TSO parameters */ ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, ena_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, adapter->mac_addr); return (0); } -static void +void ena_down(struct ena_adapter *adapter) { int rc; if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { device_printf(adapter->pdev, "device is going DOWN\n"); callout_drain(&adapter->timer_service); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ena_free_io_irq(adapter); if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); if (unlikely(rc != 0)) device_printf(adapter->pdev, "Device reset failed\n"); } ena_destroy_all_io_queues(adapter); ena_free_all_tx_bufs(adapter); ena_free_all_rx_bufs(adapter); ena_free_all_tx_resources(adapter); ena_free_all_rx_resources(adapter); counter_u64_add(adapter->dev_stats.interface_down, 1); } } -static void -ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf) -{ - struct ena_com_tx_meta *ena_meta; - struct ether_vlan_header *eh; - struct mbuf *mbuf_next; - u32 mss; - bool offload; - uint16_t etype; - int ehdrlen; - struct ip *ip; - int iphlen; - struct tcphdr *th; - int offset; - - offload = false; - ena_meta = &ena_tx_ctx->ena_meta; - mss = mbuf->m_pkthdr.tso_segsz; - - if (mss != 0) - offload = true; - - if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) - offload = true; - - if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) - offload = true; - - if (!offload) { - ena_tx_ctx->meta_valid = 0; - return; - } - - /* Determine where frame payload starts. */ - eh = mtod(mbuf, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - etype = ntohs(eh->evl_proto); - ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - } else { - etype = ntohs(eh->evl_encap_proto); - ehdrlen = ETHER_HDR_LEN; - } - - mbuf_next = m_getptr(mbuf, ehdrlen, &offset); - ip = (struct ip *)(mtodo(mbuf_next, offset)); - iphlen = ip->ip_hl << 2; - - mbuf_next = m_getptr(mbuf, iphlen + ehdrlen, &offset); - th = (struct tcphdr *)(mtodo(mbuf_next, offset)); - - if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) { - ena_tx_ctx->l3_csum_enable = 1; - } - if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) { - ena_tx_ctx->tso_enable = 1; - ena_meta->l4_hdr_len = (th->th_off); - } - - switch (etype) { - case ETHERTYPE_IP: - ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; - if ((ip->ip_off & htons(IP_DF)) != 0) - ena_tx_ctx->df = 1; - break; - case ETHERTYPE_IPV6: - ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; - - default: - break; - } - - if (ip->ip_p == IPPROTO_TCP) { - ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; - if ((mbuf->m_pkthdr.csum_flags & - (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0) - ena_tx_ctx->l4_csum_enable = 1; - else - ena_tx_ctx->l4_csum_enable = 0; - } else if (ip->ip_p == IPPROTO_UDP) { - ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; - if ((mbuf->m_pkthdr.csum_flags & - (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0) - ena_tx_ctx->l4_csum_enable = 1; - else - ena_tx_ctx->l4_csum_enable = 0; - } else { - ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; - ena_tx_ctx->l4_csum_enable = 0; - } - - ena_meta->mss = mss; - ena_meta->l3_hdr_len = iphlen; - ena_meta->l3_hdr_offset = ehdrlen; - ena_tx_ctx->meta_valid = 1; -} - static int -ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) -{ - struct ena_adapter *adapter; - struct mbuf *collapsed_mbuf; - int num_frags; - - adapter = tx_ring->adapter; - num_frags = ena_mbuf_count(*mbuf); - - /* One segment must be reserved for configuration descriptor. */ - if (num_frags < adapter->max_tx_sgl_size) - return (0); - counter_u64_add(tx_ring->tx_stats.collapse, 1); - - collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, - adapter->max_tx_sgl_size - 1); - if (unlikely(collapsed_mbuf == NULL)) { - counter_u64_add(tx_ring->tx_stats.collapse_err, 1); - return (ENOMEM); - } - - /* If mbuf was collapsed succesfully, original mbuf is released. */ - *mbuf = collapsed_mbuf; - - return (0); -} - -static void -ena_dmamap_llq(void *arg, bus_dma_segment_t *segs, int nseg, int error) -{ - struct ena_com_buf *ena_buf = arg; - - if (unlikely(error != 0)) { - ena_buf->paddr = 0; - return; - } - - KASSERT(nseg == 1, ("Invalid num of segments for LLQ dma")); - - ena_buf->paddr = segs->ds_addr; - ena_buf->len = segs->ds_len; -} - -static int -ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, - struct mbuf *mbuf, void **push_hdr, u16 *header_len) -{ - struct ena_adapter *adapter = tx_ring->adapter; - struct ena_com_buf *ena_buf; - bus_dma_segment_t segs[ENA_BUS_DMA_SEGS]; - uint32_t mbuf_head_len, frag_len; - uint16_t push_len = 0; - uint16_t delta = 0; - int i, rc, nsegs; - - mbuf_head_len = mbuf->m_len; - tx_info->mbuf = mbuf; - ena_buf = tx_info->bufs; - - if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - /* - * When the device is LLQ mode, the driver will copy - * the header into the device memory space. - * the ena_com layer assumes the header is in a linear - * memory space. - * This assumption might be wrong since part of the header - * can be in the fragmented buffers. - * First check if header fits in the mbuf. If not, copy it to - * separate buffer that will be holding linearized data. - */ - push_len = min_t(uint32_t, mbuf->m_pkthdr.len, - tx_ring->tx_max_header_size); - *header_len = push_len; - /* If header is in linear space, just point into mbuf's data. */ - if (likely(push_len <= mbuf_head_len)) { - *push_hdr = mbuf->m_data; - /* - * Otherwise, copy whole portion of header from multiple mbufs - * to intermediate buffer. - */ - } else { - m_copydata(mbuf, 0, push_len, - tx_ring->push_buf_intermediate_buf); - *push_hdr = tx_ring->push_buf_intermediate_buf; - - counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); - delta = push_len - mbuf_head_len; - } - - ena_trace(ENA_DBG | ENA_TXPTH, - "mbuf: %p header_buf->vaddr: %p push_len: %d\n", - mbuf, *push_hdr, push_len); - - /* - * If header was in linear memory space, map for the dma rest of the data - * in the first mbuf of the mbuf chain. - */ - if (mbuf_head_len > push_len) { - rc = bus_dmamap_load(adapter->tx_buf_tag, - tx_info->map_head, - mbuf->m_data + push_len, mbuf_head_len - push_len, - ena_dmamap_llq, ena_buf, BUS_DMA_NOWAIT); - if (unlikely((rc != 0) || (ena_buf->paddr == 0))) - goto single_dma_error; - - ena_buf++; - tx_info->num_of_bufs++; - - tx_info->head_mapped = true; - } - mbuf = mbuf->m_next; - } else { - *push_hdr = NULL; - /* - * header_len is just a hint for the device. Because FreeBSD is not - * giving us information about packet header length and it is not - * guaranteed that all packet headers will be in the 1st mbuf, setting - * header_len to 0 is making the device ignore this value and resolve - * header on it's own. - */ - *header_len = 0; - } - - /* - * If header is in non linear space (delta > 0), then skip mbufs - * containing header and map the last one containing both header and the - * packet data. - * The first segment is already counted in. - * If LLQ is not supported, the loop will be skipped. - */ - while (delta > 0) { - frag_len = mbuf->m_len; - - /* - * If whole segment contains header just move to the - * next one and reduce delta. - */ - if (unlikely(delta >= frag_len)) { - delta -= frag_len; - } else { - /* - * Map rest of the packet data that was contained in - * the mbuf. - */ - rc = bus_dmamap_load(adapter->tx_buf_tag, - tx_info->map_head, mbuf->m_data + delta, - frag_len - delta, ena_dmamap_llq, ena_buf, - BUS_DMA_NOWAIT); - if (unlikely((rc != 0) || (ena_buf->paddr == 0))) - goto single_dma_error; - - ena_buf++; - tx_info->num_of_bufs++; - tx_info->head_mapped = true; - - delta = 0; - } - - mbuf = mbuf->m_next; - } - - if (mbuf == NULL) { - return (0); - } - - /* Map rest of the mbufs */ - rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map_seg, mbuf, - segs, &nsegs, BUS_DMA_NOWAIT); - if (unlikely((rc != 0) || (nsegs == 0))) { - ena_trace(ENA_WARNING, - "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs); - goto dma_error; - } - - for (i = 0; i < nsegs; i++) { - ena_buf->len = segs[i].ds_len; - ena_buf->paddr = segs[i].ds_addr; - ena_buf++; - } - tx_info->num_of_bufs += nsegs; - tx_info->seg_mapped = true; - - return (0); - -dma_error: - if (tx_info->head_mapped == true) - bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); -single_dma_error: - counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); - tx_info->mbuf = NULL; - return (rc); -} - -static int -ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) -{ - struct ena_adapter *adapter; - struct ena_tx_buffer *tx_info; - struct ena_com_tx_ctx ena_tx_ctx; - struct ena_com_dev *ena_dev; - struct ena_com_io_sq* io_sq; - void *push_hdr; - uint16_t next_to_use; - uint16_t req_id; - uint16_t ena_qid; - uint16_t header_len; - int rc; - int nb_hw_desc; - - ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); - adapter = tx_ring->que->adapter; - ena_dev = adapter->ena_dev; - io_sq = &ena_dev->io_sq_queues[ena_qid]; - - rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); - if (unlikely(rc != 0)) { - ena_trace(ENA_WARNING, - "Failed to collapse mbuf! err: %d\n", rc); - return (rc); - } - - ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len); - - next_to_use = tx_ring->next_to_use; - req_id = tx_ring->free_tx_ids[next_to_use]; - tx_info = &tx_ring->tx_buffer_info[req_id]; - tx_info->num_of_bufs = 0; - - rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); - if (unlikely(rc != 0)) { - ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); - return (rc); - } - memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); - ena_tx_ctx.ena_bufs = tx_info->bufs; - ena_tx_ctx.push_header = push_hdr; - ena_tx_ctx.num_bufs = tx_info->num_of_bufs; - ena_tx_ctx.req_id = req_id; - ena_tx_ctx.header_len = header_len; - - /* Set flags and meta data */ - ena_tx_csum(&ena_tx_ctx, *mbuf); - - if (tx_ring->acum_pkts == DB_THRESHOLD || - ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { - ena_trace(ENA_DBG | ENA_TXPTH, - "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", - tx_ring->que->id); - wmb(); - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - counter_u64_add(tx_ring->tx_stats.doorbells, 1); - tx_ring->acum_pkts = 0; - } - - /* Prepare the packet's descriptors and send them to device */ - rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); - if (unlikely(rc != 0)) { - if (likely(rc == ENA_COM_NO_MEM)) { - ena_trace(ENA_DBG | ENA_TXPTH, - "tx ring[%d] if out of space\n", tx_ring->que->id); - } else { - device_printf(adapter->pdev, - "failed to prepare tx bufs\n"); - } - counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); - goto dma_error; - } - - counter_enter(); - counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); - counter_u64_add_protected(tx_ring->tx_stats.bytes, - (*mbuf)->m_pkthdr.len); - - counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); - counter_u64_add_protected(adapter->hw_stats.tx_bytes, - (*mbuf)->m_pkthdr.len); - counter_exit(); - - tx_info->tx_descs = nb_hw_desc; - getbinuptime(&tx_info->timestamp); - tx_info->print_once = true; - - tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, - tx_ring->ring_size); - - /* stop the queue when no more space available, the packet can have up - * to sgl_size + 2. one for the meta descriptor and one for header - * (if the header is larger than tx_max_header_size). - */ - if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - adapter->max_tx_sgl_size + 2))) { - ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", - tx_ring->que->id); - - tx_ring->running = false; - counter_u64_add(tx_ring->tx_stats.queue_stop, 1); - - /* There is a rare condition where this function decides to - * stop the queue but meanwhile tx_cleanup() updates - * next_to_completion and terminates. - * The queue will remain stopped forever. - * To solve this issue this function performs mb(), checks - * the wakeup condition and wakes up the queue if needed. - */ - mb(); - - if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, - ENA_TX_RESUME_THRESH)) { - tx_ring->running = true; - counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); - } - } - - if (tx_info->head_mapped == true) - bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, - BUS_DMASYNC_PREWRITE); - if (tx_info->seg_mapped == true) - bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, - BUS_DMASYNC_PREWRITE); - - return (0); - -dma_error: - tx_info->mbuf = NULL; - if (tx_info->seg_mapped == true) { - bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_seg); - tx_info->seg_mapped = false; - } - if (tx_info->head_mapped == true) { - bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); - tx_info->head_mapped = false; - } - - return (rc); -} - -static void -ena_start_xmit(struct ena_ring *tx_ring) -{ - struct mbuf *mbuf; - struct ena_adapter *adapter = tx_ring->adapter; - struct ena_com_io_sq* io_sq; - int ena_qid; - int ret = 0; - - if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) - return; - - if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))) - return; - - ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); - io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; - - while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { - ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" - " header csum flags %#jx\n", - mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); - - if (unlikely(!tx_ring->running)) { - drbr_putback(adapter->ifp, tx_ring->br, mbuf); - break; - } - - if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { - if (ret == ENA_COM_NO_MEM) { - drbr_putback(adapter->ifp, tx_ring->br, mbuf); - } else if (ret == ENA_COM_NO_SPACE) { - drbr_putback(adapter->ifp, tx_ring->br, mbuf); - } else { - m_freem(mbuf); - drbr_advance(adapter->ifp, tx_ring->br); - } - - break; - } - - drbr_advance(adapter->ifp, tx_ring->br); - - if (unlikely((if_getdrvflags(adapter->ifp) & - IFF_DRV_RUNNING) == 0)) - return; - - tx_ring->acum_pkts++; - - BPF_MTAP(adapter->ifp, mbuf); - } - - if (likely(tx_ring->acum_pkts != 0)) { - wmb(); - /* Trigger the dma engine */ - ena_com_write_sq_doorbell(io_sq); - counter_u64_add(tx_ring->tx_stats.doorbells, 1); - tx_ring->acum_pkts = 0; - } - - if (unlikely(!tx_ring->running)) - taskqueue_enqueue(tx_ring->que->cleanup_tq, - &tx_ring->que->cleanup_task); -} - -static void -ena_deferred_mq_start(void *arg, int pending) -{ - struct ena_ring *tx_ring = (struct ena_ring *)arg; - struct ifnet *ifp = tx_ring->adapter->ifp; - - while (!drbr_empty(ifp, tx_ring->br) && - tx_ring->running && - (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { - ENA_RING_MTX_LOCK(tx_ring); - ena_start_xmit(tx_ring); - ENA_RING_MTX_UNLOCK(tx_ring); - } -} - -static int -ena_mq_start(if_t ifp, struct mbuf *m) -{ - struct ena_adapter *adapter = ifp->if_softc; - struct ena_ring *tx_ring; - int ret, is_drbr_empty; - uint32_t i; - - if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) - return (ENODEV); - - /* Which queue to use */ - /* - * If everything is setup correctly, it should be the - * same bucket that the current CPU we're on is. - * It should improve performance. - */ - if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { - i = m->m_pkthdr.flowid % adapter->num_queues; - } else { - i = curcpu % adapter->num_queues; - } - tx_ring = &adapter->tx_ring[i]; - - /* Check if drbr is empty before putting packet */ - is_drbr_empty = drbr_empty(ifp, tx_ring->br); - ret = drbr_enqueue(ifp, tx_ring->br, m); - if (unlikely(ret != 0)) { - taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); - return (ret); - } - - if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { - ena_start_xmit(tx_ring); - ENA_RING_MTX_UNLOCK(tx_ring); - } else { - taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); - } - - return (0); -} - -static void -ena_qflush(if_t ifp) -{ - struct ena_adapter *adapter = ifp->if_softc; - struct ena_ring *tx_ring = adapter->tx_ring; - int i; - - for(i = 0; i < adapter->num_queues; ++i, ++tx_ring) - if (!drbr_empty(ifp, tx_ring->br)) { - ENA_RING_MTX_LOCK(tx_ring); - drbr_flush(ifp, tx_ring->br); - ENA_RING_MTX_UNLOCK(tx_ring); - } - - if_qflush(ifp); -} - -static int ena_calc_io_queue_num(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_com_dev *ena_dev = adapter->ena_dev; int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; /* Regular queues capabilities */ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &get_feat_ctx->max_queue_ext.max_queue_ext; io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, max_queue_ext->max_rx_cq_num); io_tx_sq_num = max_queue_ext->max_tx_sq_num; io_tx_cq_num = max_queue_ext->max_tx_cq_num; } else { struct ena_admin_queue_feature_desc *max_queues = &get_feat_ctx->max_queues; io_tx_sq_num = max_queues->max_sq_num; io_tx_cq_num = max_queues->max_cq_num; io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); } /* In case of LLQ use the llq fields for the tx SQ/CQ */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) io_tx_sq_num = get_feat_ctx->llq.max_llq_num; io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); io_queue_num = min_t(int, io_queue_num, io_rx_num); io_queue_num = min_t(int, io_queue_num, io_tx_sq_num); io_queue_num = min_t(int, io_queue_num, io_tx_cq_num); /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */ io_queue_num = min_t(int, io_queue_num, pci_msix_count(adapter->pdev) - 1); return (io_queue_num); } static int ena_enable_wc(struct resource *res) { #if defined(__i386) || defined(__amd64) vm_offset_t va; vm_size_t len; int rc; va = (vm_offset_t)rman_get_virtual(res); len = rman_get_size(res); /* Enable write combining */ rc = pmap_change_attr(va, len, PAT_WRITE_COMBINING); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); return (rc); } return (0); #endif return (EOPNOTSUPP); } static int ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq, struct ena_llq_configurations *llq_default_configurations) { struct ena_adapter *adapter = device_get_softc(pdev); int rc, rid; uint32_t llq_feature_mask; llq_feature_mask = 1 << ENA_ADMIN_LLQ; if (!(ena_dev->supported_features & llq_feature_mask)) { device_printf(pdev, "LLQ is not supported. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to configure the device mode. " "Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } /* Nothing to config, exit */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) return (0); /* Try to allocate resources for LLQ bar */ rid = PCIR_BAR(ENA_MEM_BAR); adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (unlikely(adapter->memory == NULL)) { device_printf(pdev, "unable to allocate LLQ bar resource. " "Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } /* Enable write combining for better LLQ performance */ rc = ena_enable_wc(adapter->memory); if (unlikely(rc != 0)) { device_printf(pdev, "failed to enable write combining.\n"); return (rc); } /* * Save virtual address of the device's memory region * for the ena_com layer. */ ena_dev->mem_bar = rman_get_virtual(adapter->memory); return (0); } static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) { llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; llq_config->llq_ring_entry_size_value = 128; } static int ena_calc_queue_size(struct ena_adapter *adapter, struct ena_calc_queue_size_ctx *ctx) { struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; struct ena_com_dev *ena_dev = ctx->ena_dev; uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; uint32_t rx_queue_size = adapter->rx_ring_size; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; rx_queue_size = min_t(uint32_t, rx_queue_size, max_queue_ext->max_rx_cq_depth); rx_queue_size = min_t(uint32_t, rx_queue_size, max_queue_ext->max_rx_sq_depth); tx_queue_size = min_t(uint32_t, tx_queue_size, max_queue_ext->max_tx_cq_depth); if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) tx_queue_size = min_t(uint32_t, tx_queue_size, llq->max_llq_depth); else tx_queue_size = min_t(uint32_t, tx_queue_size, max_queue_ext->max_tx_sq_depth); ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_rx_descs); ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_tx_descs); } else { struct ena_admin_queue_feature_desc *max_queues = &ctx->get_feat_ctx->max_queues; rx_queue_size = min_t(uint32_t, rx_queue_size, max_queues->max_cq_depth); rx_queue_size = min_t(uint32_t, rx_queue_size, max_queues->max_sq_depth); tx_queue_size = min_t(uint32_t, tx_queue_size, max_queues->max_cq_depth); if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) tx_queue_size = min_t(uint32_t, tx_queue_size, llq->max_llq_depth); else tx_queue_size = min_t(uint32_t, tx_queue_size, max_queues->max_sq_depth); ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queues->max_packet_tx_descs); ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queues->max_packet_rx_descs); } /* round down to the nearest power of 2 */ rx_queue_size = 1 << (fls(rx_queue_size) - 1); tx_queue_size = 1 << (fls(tx_queue_size) - 1); if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { device_printf(ctx->pdev, "Invalid queue size\n"); return (EFAULT); } ctx->rx_queue_size = rx_queue_size; ctx->tx_queue_size = tx_queue_size; return (0); } static int ena_handle_updated_queues(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; device_t pdev = adapter->pdev; bool are_queues_changed = false; int io_queue_num, rc; calc_queue_ctx.ena_dev = ena_dev; calc_queue_ctx.get_feat_ctx = get_feat_ctx; calc_queue_ctx.pdev = pdev; io_queue_num = ena_calc_io_queue_num(adapter, get_feat_ctx); rc = ena_calc_queue_size(adapter, &calc_queue_ctx); if (unlikely(rc != 0 || io_queue_num <= 0)) return EFAULT; if (adapter->tx_ring->buf_ring_size != adapter->buf_ring_size) are_queues_changed = true; if (unlikely(adapter->tx_ring_size > calc_queue_ctx.tx_queue_size || adapter->rx_ring_size > calc_queue_ctx.rx_queue_size)) { device_printf(pdev, "Not enough resources to allocate requested queue sizes " "(TX,RX)=(%d,%d), falling back to queue sizes " "(TX,RX)=(%d,%d)\n", adapter->tx_ring_size, adapter->rx_ring_size, calc_queue_ctx.tx_queue_size, calc_queue_ctx.rx_queue_size); adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; are_queues_changed = true; } if (unlikely(adapter->num_queues > io_queue_num)) { device_printf(pdev, "Not enough resources to allocate %d queues, " "falling back to %d queues\n", adapter->num_queues, io_queue_num); adapter->num_queues = io_queue_num; if (ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)) { ena_com_rss_destroy(ena_dev); rc = ena_rss_init_default(adapter); if (unlikely(rc != 0) && (rc != EOPNOTSUPP)) { device_printf(pdev, "Cannot init RSS rc: %d\n", rc); return (rc); } } are_queues_changed = true; } if (unlikely(are_queues_changed)) { ena_free_all_io_rings_resources(adapter); ena_init_io_rings(adapter); } return (0); } static int ena_rss_init_default(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; device_t dev = adapter->pdev; int qid, rc, i; rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); if (unlikely(rc != 0)) { device_printf(dev, "Cannot init indirect table\n"); return (rc); } for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { qid = i % adapter->num_queues; rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(qid)); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { device_printf(dev, "Cannot fill indirect table\n"); goto err_rss_destroy; } } rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { device_printf(dev, "Cannot fill hash function\n"); goto err_rss_destroy; } rc = ena_com_set_default_hash_ctrl(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { device_printf(dev, "Cannot fill hash control\n"); goto err_rss_destroy; } return (0); err_rss_destroy: ena_com_rss_destroy(ena_dev); return (rc); } static void ena_rss_init_default_deferred(void *arg) { struct ena_adapter *adapter; devclass_t dc; int max; int rc; dc = devclass_find("ena"); if (unlikely(dc == NULL)) { ena_trace(ENA_ALERT, "No devclass ena\n"); return; } max = devclass_get_maxunit(dc); while (max-- >= 0) { adapter = devclass_get_softc(dc, max); if (adapter != NULL) { rc = ena_rss_init_default(adapter); ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "WARNING: RSS was not properly initialized," " it will affect bandwidth\n"); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); } } } } SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); static void ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) { struct ena_admin_host_info *host_info; uintptr_t rid; int rc; /* Allocate only the host info */ rc = ena_com_allocate_host_info(ena_dev); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "Cannot allocate host info\n"); return; } host_info = ena_dev->host_attr.host_info; if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) host_info->bdf = rid; host_info->os_type = ENA_ADMIN_OS_FREEBSD; host_info->kernel_ver = osreldate; sprintf(host_info->kernel_ver_str, "%d", osreldate); host_info->os_dist = 0; strncpy(host_info->os_dist_str, osrelease, sizeof(host_info->os_dist_str) - 1); host_info->driver_version = (DRV_MODULE_VER_MAJOR) | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); host_info->num_cpus = mp_ncpus; rc = ena_com_set_host_attributes(ena_dev); if (unlikely(rc != 0)) { if (rc == EOPNOTSUPP) ena_trace(ENA_WARNING, "Cannot set host attributes\n"); else ena_trace(ENA_ALERT, "Cannot set host attributes\n"); goto err; } return; err: ena_com_delete_host_info(ena_dev); } static int ena_device_init(struct ena_adapter *adapter, device_t pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) { struct ena_com_dev* ena_dev = adapter->ena_dev; bool readless_supported; uint32_t aenq_groups; int dma_width; int rc; rc = ena_com_mmio_reg_read_request_init(ena_dev); if (unlikely(rc != 0)) { device_printf(pdev, "failed to init mmio read less\n"); return (rc); } /* * The PCIe configuration space revision id indicate if mmio reg * read is disabled */ readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); ena_com_set_mmio_read_mode(ena_dev, readless_supported); rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); if (unlikely(rc != 0)) { device_printf(pdev, "Can not reset device\n"); goto err_mmio_read_less; } rc = ena_com_validate_version(ena_dev); if (unlikely(rc != 0)) { device_printf(pdev, "device version is too low\n"); goto err_mmio_read_less; } dma_width = ena_com_get_dma_width(ena_dev); if (unlikely(dma_width < 0)) { device_printf(pdev, "Invalid dma width value %d", dma_width); rc = dma_width; goto err_mmio_read_less; } adapter->dma_width = dma_width; /* ENA admin level init */ rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (unlikely(rc != 0)) { device_printf(pdev, "Can not initialize ena admin queue with device\n"); goto err_mmio_read_less; } /* * To enable the msix interrupts the driver needs to know the number * of queues. So the driver uses polling mode to retrieve this * information */ ena_com_set_admin_polling_mode(ena_dev, true); ena_config_host_info(ena_dev, pdev); /* Get Device Attributes */ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (unlikely(rc != 0)) { device_printf(pdev, "Cannot get attribute for ena device rc: %d\n", rc); goto err_admin_init; } aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_FATAL_ERROR) | BIT(ENA_ADMIN_WARNING) | BIT(ENA_ADMIN_NOTIFICATION) | BIT(ENA_ADMIN_KEEP_ALIVE); aenq_groups &= get_feat_ctx->aenq.supported_groups; rc = ena_com_set_aenq_config(ena_dev, aenq_groups); if (unlikely(rc != 0)) { device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); goto err_admin_init; } *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); return (0); err_admin_init: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: ena_com_mmio_reg_read_request_destroy(ena_dev); return (rc); } static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, int io_vectors) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; rc = ena_enable_msix(adapter); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Error with MSI-X enablement\n"); return (rc); } ena_setup_mgmnt_intr(adapter); rc = ena_request_mgmnt_irq(adapter); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); goto err_disable_msix; } ena_com_set_admin_polling_mode(ena_dev, false); ena_com_admin_aenq_enable(ena_dev); return (0); err_disable_msix: ena_disable_msix(adapter); return (rc); } /* Function called on ENA_ADMIN_KEEP_ALIVE event */ static void ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_keep_alive_desc *desc; sbintime_t stime; uint64_t rx_drops; desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; counter_u64_zero(adapter->hw_stats.rx_drops); counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); stime = getsbinuptime(); atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); } /* Check for keep alive expiration */ static void check_for_missing_keep_alive(struct ena_adapter *adapter) { sbintime_t timestamp, time; if (adapter->wd_active == 0) return; if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); time = getsbinuptime() - timestamp; if (unlikely(time > adapter->keep_alive_timeout)) { device_printf(adapter->pdev, "Keep alive watchdog timeout.\n"); counter_u64_add(adapter->dev_stats.wd_expired, 1); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } } } /* Check if admin queue is enabled */ static void check_for_admin_com_state(struct ena_adapter *adapter) { if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { device_printf(adapter->pdev, "ENA admin queue is not in running state!\n"); counter_u64_add(adapter->dev_stats.admin_q_pause, 1); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } } } static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, struct ena_ring *rx_ring) { if (likely(rx_ring->first_interrupt)) return (0); if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) return (0); rx_ring->no_interrupt_event_cnt++; if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { device_printf(adapter->pdev, "Potential MSIX issue on Rx side " "Queue = %d. Reset the device\n", rx_ring->qid); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } return (EIO); } return (0); } static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct ena_ring *tx_ring) { struct bintime curtime, time; struct ena_tx_buffer *tx_buf; sbintime_t time_offset; uint32_t missed_tx = 0; int i, rc = 0; getbinuptime(&curtime); for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; if (bintime_isset(&tx_buf->timestamp) == 0) continue; time = curtime; bintime_sub(&time, &tx_buf->timestamp); time_offset = bttosbt(time); if (unlikely(!tx_ring->first_interrupt && time_offset > 2 * adapter->missing_tx_timeout)) { /* * If after graceful period interrupt is still not * received, we schedule a reset. */ device_printf(adapter->pdev, "Potential MSIX issue on Tx side Queue = %d. " "Reset the device\n", tx_ring->qid); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } return (EIO); } /* Check again if packet is still waiting */ if (unlikely(time_offset > adapter->missing_tx_timeout)) { if (!tx_buf->print_once) ena_trace(ENA_WARNING, "Found a Tx that wasn't " "completed on time, qid %d, index %d.\n", tx_ring->qid, i); tx_buf->print_once = true; missed_tx++; } } if (unlikely(missed_tx > adapter->missing_tx_threshold)) { device_printf(adapter->pdev, "The number of lost tx completion is above the threshold " "(%d > %d). Reset the device\n", missed_tx, adapter->missing_tx_threshold); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } rc = EIO; } counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); return (rc); } /* * Check for TX which were not completed on time. * Timeout is defined by "missing_tx_timeout". * Reset will be performed if number of incompleted * transactions exceeds "missing_tx_threshold". */ static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; struct ena_ring *rx_ring; int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ rmb(); if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) return; if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; budget = adapter->missing_tx_max_queues; for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) { tx_ring = &adapter->tx_ring[i]; rx_ring = &adapter->rx_ring[i]; rc = check_missing_comp_in_tx_queue(adapter, tx_ring); if (unlikely(rc != 0)) return; rc = check_for_rx_interrupt_queue(adapter, rx_ring); if (unlikely(rc != 0)) return; budget--; if (budget == 0) { i++; break; } } adapter->next_monitored_tx_qid = i % adapter->num_queues; } /* trigger rx cleanup after 2 consecutive detections */ #define EMPTY_RX_REFILL 2 /* For the rare case where the device runs out of Rx descriptors and the * msix handler failed to refill new Rx descriptors (due to a lack of memory * for example). * This case will lead to a deadlock: * The device won't send interrupts since all the new Rx packets will be dropped * The msix handler won't allocate new Rx descriptors so the device won't be * able to send new packets. * * When such a situation is detected - execute rx cleanup task in another thread */ static void check_for_empty_rx_ring(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, refill_required; if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) return; for (i = 0; i < adapter->num_queues; i++) { rx_ring = &adapter->rx_ring[i]; refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 1); device_printf(adapter->pdev, "trigger refill for ring %d\n", i); taskqueue_enqueue(rx_ring->que->cleanup_tq, &rx_ring->que->cleanup_task); rx_ring->empty_rx_queue = 0; } } else { rx_ring->empty_rx_queue = 0; } } } static void ena_update_hints(struct ena_adapter *adapter, struct ena_admin_ena_hw_hints *hints) { struct ena_com_dev *ena_dev = adapter->ena_dev; if (hints->admin_completion_tx_timeout) ena_dev->admin_queue.completion_timeout = hints->admin_completion_tx_timeout * 1000; if (hints->mmio_read_timeout) /* convert to usec */ ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; if (hints->missed_tx_completion_count_threshold_to_reset) adapter->missing_tx_threshold = hints->missed_tx_completion_count_threshold_to_reset; if (hints->missing_tx_completion_timeout) { if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->missing_tx_timeout = SBT_1MS * hints->missing_tx_completion_timeout; } if (hints->driver_watchdog_timeout) { if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->keep_alive_timeout = SBT_1MS * hints->driver_watchdog_timeout; } } static void ena_timer_service(void *data) { struct ena_adapter *adapter = (struct ena_adapter *)data; struct ena_admin_host_info *host_info = adapter->ena_dev->host_attr.host_info; check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_missing_completions(adapter); check_for_empty_rx_ring(adapter); if (host_info != NULL) ena_update_host_info(host_info, adapter->ifp); if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { device_printf(adapter->pdev, "Trigger reset is on\n"); taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); return; } /* * Schedule another timeout one second from now. */ callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0); } -static void +void ena_destroy_device(struct ena_adapter *adapter, bool graceful) { if_t ifp = adapter->ifp; struct ena_com_dev *ena_dev = adapter->ena_dev; bool dev_up; if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) return; if_link_state_change(ifp, LINK_STATE_DOWN); callout_drain(&adapter->timer_service); dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); if (dev_up) ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); else ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) ena_down(adapter); /* * Stop the device from sending AENQ events (if the device was up, and * the trigger reset was on, ena_down already performs device reset) */ if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); adapter->reset_reason = ENA_REGS_RESET_NORMAL; ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); } static int ena_device_validate_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, ETHER_ADDR_LEN) != 0) { device_printf(adapter->pdev, "Error, mac address are different\n"); return (EINVAL); } if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { device_printf(adapter->pdev, "Error, device max mtu is smaller than ifp MTU\n"); return (EINVAL); } return 0; } -static int +int ena_restore_device(struct ena_adapter *adapter) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = adapter->ena_dev; if_t ifp = adapter->ifp; device_t dev = adapter->pdev; int wd_active; int rc; ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); if (rc != 0) { device_printf(dev, "Cannot initialize device\n"); goto err; } /* * Only enable WD if it was enabled before reset, so it won't override * value set by the user by the sysctl. */ if (adapter->wd_active != 0) adapter->wd_active = wd_active; rc = ena_device_validate_params(adapter, &get_feat_ctx); if (rc != 0) { device_printf(dev, "Validation of device parameters failed\n"); goto err_device_destroy; } rc = ena_handle_updated_queues(adapter, &get_feat_ctx); if (rc != 0) goto err_device_destroy; ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); /* Make sure we don't have a race with AENQ Links state handler */ if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) if_link_state_change(ifp, LINK_STATE_UP); rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc != 0) { device_printf(dev, "Enable MSI-X failed\n"); goto err_device_destroy; } /* If the interface was up before the reset bring it up */ if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { rc = ena_up(adapter); if (rc != 0) { device_printf(dev, "Failed to create I/O queues\n"); goto err_disable_msix; } } + /* Indicate that device is running again and ready to work */ ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); - callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, - ena_timer_service, (void *)adapter, 0); + if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { + /* + * As the AENQ handlers weren't executed during reset because + * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the + * timestamp must be updated again That will prevent next reset + * caused by missing keep alive. + */ + adapter->keep_alive_timestamp = getsbinuptime(); + callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, + ena_timer_service, (void *)adapter, 0); + } + device_printf(dev, "Device reset completed successfully, Driver info: %s\n", ena_version); return (rc); err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); ena_com_mmio_reg_read_request_destroy(ena_dev); err: ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); device_printf(dev, "Reset attempt failed. Can not reset the device\n"); return (rc); } static void ena_reset_task(void *arg, int pending) { struct ena_adapter *adapter = (struct ena_adapter *)arg; if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { device_printf(adapter->pdev, "device reset scheduled but trigger_reset is off\n"); return; } sx_xlock(&adapter->ioctl_sx); ena_destroy_device(adapter, false); ena_restore_device(adapter); sx_unlock(&adapter->ioctl_sx); } /** * ena_attach - Device Initialization Routine * @pdev: device information struct * * Returns 0 on success, otherwise on failure. * * ena_attach initializes an adapter identified by a device structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ena_attach(device_t pdev) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_llq_configurations llq_config; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; static int version_printed; struct ena_adapter *adapter; struct ena_com_dev *ena_dev = NULL; const char *queue_type_str; int io_queue_num; int rid, rc; adapter = device_get_softc(pdev); adapter->pdev = pdev; mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF); sx_init(&adapter->ioctl_sx, "ENA ioctl sx"); /* Set up the timer service */ callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0); adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; if (version_printed++ == 0) device_printf(pdev, "%s\n", ena_version); /* Allocate memory for ena_dev structure */ ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, M_WAITOK | M_ZERO); adapter->ena_dev = ena_dev; ena_dev->dmadev = pdev; rid = PCIR_BAR(ENA_REG_BAR); adapter->memory = NULL; adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (unlikely(adapter->registers == NULL)) { device_printf(pdev, "unable to allocate bus resource: registers!\n"); rc = ENOMEM; goto err_dev_free; } ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, M_WAITOK | M_ZERO); /* Store register resources */ ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = rman_get_bustag(adapter->registers); ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(adapter->registers); if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { device_printf(pdev, "failed to pmap registers bar\n"); rc = ENXIO; goto err_bus_free; } ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; /* Initially clear all the flags */ ENA_FLAG_ZERO(adapter); /* Device initialization */ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); if (unlikely(rc != 0)) { device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); rc = ENXIO; goto err_bus_free; } set_default_llq_configurations(&llq_config); #if defined(__arm__) || defined(__aarch64__) /* * Force LLQ disable, as the driver is not supporting WC enablement * on the ARM architecture. Using LLQ without WC would affect * performance in a negative way. */ ena_dev->supported_features &= ~(1 << ENA_ADMIN_LLQ); #endif rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, &llq_config); if (unlikely(rc != 0)) { device_printf(pdev, "failed to set placement policy\n"); goto err_com_free; } if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) queue_type_str = "Regular"; else queue_type_str = "Low Latency"; device_printf(pdev, "Placement policy: %s\n", queue_type_str); adapter->keep_alive_timestamp = getsbinuptime(); adapter->tx_offload_cap = get_feat_ctx.offload.tx; memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, ETHER_ADDR_LEN); calc_queue_ctx.ena_dev = ena_dev; calc_queue_ctx.get_feat_ctx = &get_feat_ctx; calc_queue_ctx.pdev = pdev; /* calculate IO queue number to create */ io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx); ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", io_queue_num); adapter->num_queues = io_queue_num; adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; // Set the requested Rx ring size adapter->rx_ring_size = ENA_DEFAULT_RING_SIZE; /* calculatre ring sizes */ rc = ena_calc_queue_size(adapter, &calc_queue_ctx); if (unlikely((rc != 0) || (io_queue_num <= 0))) { rc = EFAULT; goto err_com_free; } adapter->reset_reason = ENA_REGS_RESET_NORMAL; adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; /* set up dma tags for rx and tx buffers */ rc = ena_setup_tx_dma_tag(adapter); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to create TX DMA tag\n"); goto err_com_free; } rc = ena_setup_rx_dma_tag(adapter); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to create RX DMA tag\n"); goto err_tx_tag_free; } /* initialize rings basic information */ device_printf(pdev, "Creating %d io queues. Rx queue size: %d, Tx queue size: %d\n", io_queue_num, calc_queue_ctx.rx_queue_size, calc_queue_ctx.tx_queue_size); ena_init_io_rings(adapter); rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to enable and set the admin interrupts\n"); goto err_io_free; } /* setup network interface */ rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); if (unlikely(rc != 0)) { device_printf(pdev, "Error with network interface setup\n"); goto err_msix_free; } /* Initialize reset task queue */ TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); adapter->reset_tq = taskqueue_create("ena_reset_enqueue", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", device_get_nameunit(adapter->pdev)); /* Initialize statistics */ ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, sizeof(struct ena_stats_dev)); ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, sizeof(struct ena_hw_stats)); ena_sysctl_add_nodes(adapter); +#ifdef DEV_NETMAP + rc = ena_netmap_attach(adapter); + if (rc != 0) { + device_printf(pdev, "netmap attach failed: %d\n", rc); + goto err_detach; + } +#endif /* DEV_NETMAP */ + /* Tell the stack that the interface is not active */ if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); return (0); +#ifdef DEV_NETMAP +err_detach: + ether_ifdetach(adapter->ifp); +#endif /* DEV_NETMAP */ err_msix_free: ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_io_free: ena_free_all_io_rings_resources(adapter); ena_free_rx_dma_tag(adapter); err_tx_tag_free: ena_free_tx_dma_tag(adapter); err_com_free: ena_com_admin_destroy(ena_dev); ena_com_delete_host_info(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); err_bus_free: free(ena_dev->bus, M_DEVBUF); ena_free_pci_resources(adapter); err_dev_free: free(ena_dev, M_DEVBUF); return (rc); } /** * ena_detach - Device Removal Routine * @pdev: device information struct * * ena_detach is called by the device subsystem to alert the driver * that it should release a PCI device. **/ static int ena_detach(device_t pdev) { struct ena_adapter *adapter = device_get_softc(pdev); struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(adapter->pdev ,"VLAN is in use, detach first\n"); return (EBUSY); } ether_ifdetach(adapter->ifp); /* Free reset task and callout */ callout_drain(&adapter->timer_service); while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) taskqueue_drain(adapter->reset_tq, &adapter->reset_task); taskqueue_free(adapter->reset_tq); sx_xlock(&adapter->ioctl_sx); ena_down(adapter); ena_destroy_device(adapter, true); sx_unlock(&adapter->ioctl_sx); +#ifdef DEV_NETMAP + netmap_detach(adapter->ifp); +#endif /* DEV_NETMAP */ + ena_free_all_io_rings_resources(adapter); ena_free_counters((counter_u64_t *)&adapter->hw_stats, sizeof(struct ena_hw_stats)); ena_free_counters((counter_u64_t *)&adapter->dev_stats, sizeof(struct ena_stats_dev)); rc = ena_free_rx_dma_tag(adapter); if (unlikely(rc != 0)) device_printf(adapter->pdev, "Unmapped RX DMA tag associations\n"); rc = ena_free_tx_dma_tag(adapter); if (unlikely(rc != 0)) device_printf(adapter->pdev, "Unmapped TX DMA tag associations\n"); ena_free_irqs(adapter); ena_free_pci_resources(adapter); if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) ena_com_rss_destroy(ena_dev); ena_com_delete_host_info(ena_dev); mtx_destroy(&adapter->global_mtx); sx_destroy(&adapter->ioctl_sx); if_free(adapter->ifp); if (ena_dev->bus != NULL) free(ena_dev->bus, M_DEVBUF); if (ena_dev != NULL) free(ena_dev, M_DEVBUF); return (bus_generic_detach(pdev)); } /****************************************************************************** ******************************** AENQ Handlers ******************************* *****************************************************************************/ /** * ena_update_on_link_change: * Notify the network interface about the change in link status **/ static void ena_update_on_link_change(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_link_change_desc *aenq_desc; int status; if_t ifp; aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; ifp = adapter->ifp; status = aenq_desc->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; if (status != 0) { device_printf(adapter->pdev, "link is UP\n"); ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) if_link_state_change(ifp, LINK_STATE_UP); } else { device_printf(adapter->pdev, "link is DOWN\n"); if_link_state_change(ifp, LINK_STATE_DOWN); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); } } static void ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_ena_hw_hints *hints; ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); switch (aenq_e->aenq_common_desc.syndrom) { case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); ena_update_hints(adapter, hints); break; default: device_printf(adapter->pdev, "Invalid aenq notification link state %d\n", aenq_e->aenq_common_desc.syndrom); } } /** * This handler will called for unknown event group or unimplemented handlers **/ static void unimplemented_aenq_handler(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; device_printf(adapter->pdev, "Unknown event was received or event with unimplemented handler\n"); } static struct ena_aenq_handlers aenq_handlers = { .handlers = { [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, [ENA_ADMIN_NOTIFICATION] = ena_notification, [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, }, .unimplemented_handler = unimplemented_aenq_handler }; /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ena_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ena_probe), DEVMETHOD(device_attach, ena_attach), DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END }; static driver_t ena_driver = { "ena", ena_methods, sizeof(struct ena_adapter), }; devclass_t ena_devclass; DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, sizeof(ena_vendor_info_array[0]), nitems(ena_vendor_info_array) - 1); MODULE_DEPEND(ena, pci, 1, 1, 1); MODULE_DEPEND(ena, ether, 1, 1, 1); +#ifdef DEV_NETMAP +MODULE_DEPEND(ena, netmap, 1, 1, 1); +#endif /* DEV_NETMAP */ /*********************************************************************/ Index: stable/11/sys/dev/ena/ena.h =================================================================== --- stable/11/sys/dev/ena/ena.h (revision 361467) +++ stable/11/sys/dev/ena/ena.h (revision 361468) @@ -1,468 +1,512 @@ /*- * BSD LICENSE * * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef ENA_H #define ENA_H #include #include "ena-com/ena_com.h" #include "ena-com/ena_eth_com.h" #define DRV_MODULE_VER_MAJOR 2 -#define DRV_MODULE_VER_MINOR 0 -#define DRV_MODULE_VER_SUBMINOR 0 +#define DRV_MODULE_VER_MINOR 1 +#define DRV_MODULE_VER_SUBMINOR 2 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION #define DRV_MODULE_VERSION \ __XSTRING(DRV_MODULE_VER_MAJOR) "." \ __XSTRING(DRV_MODULE_VER_MINOR) "." \ __XSTRING(DRV_MODULE_VER_SUBMINOR) #endif #define DEVICE_NAME "Elastic Network Adapter (ENA)" #define DEVICE_DESC "ENA adapter" /* Calculate DMA mask - width for ena cannot exceed 48, so it is safe */ #define ENA_DMA_BIT_MASK(x) ((1ULL << (x)) - 1ULL) /* 1 for AENQ + ADMIN */ #define ENA_ADMIN_MSIX_VEC 1 #define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues)) #define ENA_REG_BAR 0 #define ENA_MEM_BAR 2 #define ENA_BUS_DMA_SEGS 32 #define ENA_DEFAULT_BUF_RING_SIZE 4096 #define ENA_DEFAULT_RING_SIZE 1024 /* * Refill Rx queue when number of required descriptors is above * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET */ #define ENA_RX_REFILL_THRESH_DIVIDER 8 #define ENA_RX_REFILL_THRESH_PACKET 256 #define ENA_IRQNAME_SIZE 40 #define ENA_PKT_MAX_BUFS 19 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) #define ENA_HASH_KEY_SIZE 40 #define ENA_MAX_FRAME_LEN 10000 #define ENA_MIN_FRAME_LEN 60 #define ENA_TX_RESUME_THRESH (ENA_PKT_MAX_BUFS + 2) #define DB_THRESHOLD 64 #define TX_COMMIT 32 /* * TX budget for cleaning. It should be half of the RX budget to reduce amount * of TCP retransmissions. */ #define TX_BUDGET 128 /* RX cleanup budget. -1 stands for infinity. */ #define RX_BUDGET 256 /* * How many times we can repeat cleanup in the io irq handling routine if the * RX or TX budget was depleted. */ #define CLEAN_BUDGET 8 #define RX_IRQ_INTERVAL 20 #define TX_IRQ_INTERVAL 50 #define ENA_MIN_MTU 128 #define ENA_TSO_MAXSIZE 65536 #define ENA_MMIO_DISABLE_REG_READ BIT(0) #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) #define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) #define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) #define ENA_MGMNT_IRQ_IDX 0 #define ENA_IO_IRQ_FIRST_IDX 1 #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) #define ENA_MAX_NO_INTERRUPT_ITERATIONS 3 /* * ENA device should send keep alive msg every 1 sec. * We wait for 6 sec just to be on the safe side. */ #define DEFAULT_KEEP_ALIVE_TO (SBT_1S * 6) /* Time in jiffies before concluding the transmitter is hung. */ #define DEFAULT_TX_CMP_TO (SBT_1S * 5) /* Number of queues to check for missing queues per timer tick */ #define DEFAULT_TX_MONITORED_QUEUES (4) /* Max number of timeouted packets before device reset */ #define DEFAULT_TX_CMP_THRESHOLD (128) /* * Supported PCI vendor and devices IDs */ #define PCI_VENDOR_ID_AMAZON 0x1d0f #define PCI_DEV_ID_ENA_PF 0x0ec2 #define PCI_DEV_ID_ENA_LLQ_PF 0x1ec2 #define PCI_DEV_ID_ENA_VF 0xec20 #define PCI_DEV_ID_ENA_LLQ_VF 0xec21 /* * Flags indicating current ENA driver state */ enum ena_flags_t { ENA_FLAG_DEVICE_RUNNING, ENA_FLAG_DEV_UP, ENA_FLAG_LINK_UP, ENA_FLAG_MSIX_ENABLED, ENA_FLAG_TRIGGER_RESET, ENA_FLAG_ONGOING_RESET, ENA_FLAG_DEV_UP_BEFORE_RESET, ENA_FLAG_RSS_ACTIVE, ENA_FLAGS_NUMBER = ENA_FLAG_RSS_ACTIVE }; BITSET_DEFINE(_ena_state, ENA_FLAGS_NUMBER); typedef struct _ena_state ena_state_t; #define ENA_FLAG_ZERO(adapter) \ BIT_ZERO(ENA_FLAGS_NUMBER, &(adapter)->flags) #define ENA_FLAG_ISSET(bit, adapter) \ BIT_ISSET(ENA_FLAGS_NUMBER, (bit), &(adapter)->flags) #define ENA_FLAG_SET_ATOMIC(bit, adapter) \ BIT_SET_ATOMIC(ENA_FLAGS_NUMBER, (bit), &(adapter)->flags) #define ENA_FLAG_CLEAR_ATOMIC(bit, adapter) \ BIT_CLR_ATOMIC(ENA_FLAGS_NUMBER, (bit), &(adapter)->flags) struct msix_entry { int entry; int vector; }; typedef struct _ena_vendor_info_t { uint16_t vendor_id; uint16_t device_id; unsigned int index; } ena_vendor_info_t; struct ena_irq { /* Interrupt resources */ struct resource *res; driver_filter_t *handler; void *data; void *cookie; unsigned int vector; bool requested; int cpu; char name[ENA_IRQNAME_SIZE]; }; struct ena_que { struct ena_adapter *adapter; struct ena_ring *tx_ring; struct ena_ring *rx_ring; struct task cleanup_task; struct taskqueue *cleanup_tq; uint32_t id; int cpu; }; struct ena_calc_queue_size_ctx { struct ena_com_dev_get_features_ctx *get_feat_ctx; struct ena_com_dev *ena_dev; device_t pdev; uint16_t rx_queue_size; uint16_t tx_queue_size; uint16_t max_tx_sgl_size; uint16_t max_rx_sgl_size; }; +#ifdef DEV_NETMAP +struct ena_netmap_tx_info { + uint32_t socket_buf_idx[ENA_PKT_MAX_BUFS]; + bus_dmamap_t map_seg[ENA_PKT_MAX_BUFS]; + unsigned int sockets_used; +}; +#endif + struct ena_tx_buffer { struct mbuf *mbuf; /* # of ena desc for this specific mbuf * (includes data desc and metadata desc) */ unsigned int tx_descs; /* # of buffers used by this mbuf */ unsigned int num_of_bufs; - bus_dmamap_t map_head; - bus_dmamap_t map_seg; - /* Indicate if segments of the mbuf were mapped */ - bool seg_mapped; - /* Indicate if bufs[0] maps the linear data of the mbuf */ - bool head_mapped; + bus_dmamap_t dmamap; /* Used to detect missing tx packets */ struct bintime timestamp; bool print_once; +#ifdef DEV_NETMAP + struct ena_netmap_tx_info nm_info; +#endif /* DEV_NETMAP */ + struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; } __aligned(CACHE_LINE_SIZE); struct ena_rx_buffer { struct mbuf *mbuf; bus_dmamap_t map; struct ena_com_buf ena_buf; +#ifdef DEV_NETMAP + uint32_t netmap_buf_idx; +#endif /* DEV_NETMAP */ } __aligned(CACHE_LINE_SIZE); struct ena_stats_tx { counter_u64_t cnt; counter_u64_t bytes; counter_u64_t prepare_ctx_err; counter_u64_t dma_mapping_err; counter_u64_t doorbells; counter_u64_t missing_tx_comp; counter_u64_t bad_req_id; counter_u64_t collapse; counter_u64_t collapse_err; counter_u64_t queue_wakeup; counter_u64_t queue_stop; counter_u64_t llq_buffer_copy; }; struct ena_stats_rx { counter_u64_t cnt; counter_u64_t bytes; counter_u64_t refil_partial; counter_u64_t bad_csum; counter_u64_t mjum_alloc_fail; counter_u64_t mbuf_alloc_fail; counter_u64_t dma_mapping_err; counter_u64_t bad_desc_num; counter_u64_t bad_req_id; counter_u64_t empty_rx_ring; }; struct ena_ring { /* Holds the empty requests for TX/RX out of order completions */ union { uint16_t *free_tx_ids; uint16_t *free_rx_ids; }; struct ena_com_dev *ena_dev; struct ena_adapter *adapter; struct ena_com_io_cq *ena_com_io_cq; struct ena_com_io_sq *ena_com_io_sq; uint16_t qid; /* Determines if device will use LLQ or normal mode for TX */ enum ena_admin_placement_policy_type tx_mem_queue_type; - /* The maximum length the driver can push to the device (For LLQ) */ - uint8_t tx_max_header_size; + union { + /* The maximum length the driver can push to the device (For LLQ) */ + uint8_t tx_max_header_size; + /* The maximum (and default) mbuf size for the Rx descriptor. */ + uint16_t rx_mbuf_sz; + }; + bool first_interrupt; uint16_t no_interrupt_event_cnt; struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]; /* * Fields used for Adaptive Interrupt Modulation - to be implemented in * the future releases */ uint32_t smoothed_interval; enum ena_intr_moder_level moder_tbl_idx; struct ena_que *que; struct lro_ctrl lro; uint16_t next_to_use; uint16_t next_to_clean; union { struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */ }; int ring_size; /* number of tx/rx_buffer_info's entries */ struct buf_ring *br; /* only for TX */ uint32_t buf_ring_size; struct mtx ring_mtx; char mtx_name[16]; struct { struct task enqueue_task; struct taskqueue *enqueue_tq; }; union { struct ena_stats_tx tx_stats; struct ena_stats_rx rx_stats; }; union { int empty_rx_queue; /* For Tx ring to indicate if it's running or not */ bool running; }; /* How many packets are sent in one Tx loop, used for doorbells */ uint32_t acum_pkts; /* Used for LLQ */ uint8_t *push_buf_intermediate_buf; + +#ifdef DEV_NETMAP + bool initialized; +#endif /* DEV_NETMAP */ } __aligned(CACHE_LINE_SIZE); struct ena_stats_dev { counter_u64_t wd_expired; counter_u64_t interface_up; counter_u64_t interface_down; counter_u64_t admin_q_pause; }; struct ena_hw_stats { counter_u64_t rx_packets; counter_u64_t tx_packets; counter_u64_t rx_bytes; counter_u64_t tx_bytes; counter_u64_t rx_drops; }; /* Board specific private data structure */ struct ena_adapter { struct ena_com_dev *ena_dev; /* OS defined structs */ if_t ifp; device_t pdev; struct ifmedia media; /* OS resources */ struct resource *memory; struct resource *registers; struct mtx global_mtx; struct sx ioctl_sx; /* MSI-X */ struct msix_entry *msix_entries; int msix_vecs; /* DMA tags used throughout the driver adapter for Tx and Rx */ bus_dma_tag_t tx_buf_tag; bus_dma_tag_t rx_buf_tag; int dma_width; uint32_t max_mtu; uint16_t max_tx_sgl_size; uint16_t max_rx_sgl_size; uint32_t tx_offload_cap; /* Tx fast path data */ int num_queues; unsigned int tx_ring_size; unsigned int rx_ring_size; uint16_t buf_ring_size; /* RSS*/ uint8_t rss_ind_tbl[ENA_RX_RSS_TABLE_SIZE]; uint8_t mac_addr[ETHER_ADDR_LEN]; /* mdio and phy*/ ena_state_t flags; /* Queue will represent one TX and one RX ring */ struct ena_que que[ENA_MAX_NUM_IO_QUEUES] __aligned(CACHE_LINE_SIZE); /* TX */ struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES] __aligned(CACHE_LINE_SIZE); /* RX */ struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES] __aligned(CACHE_LINE_SIZE); struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)]; /* Timer service */ struct callout timer_service; sbintime_t keep_alive_timestamp; uint32_t next_monitored_tx_qid; struct task reset_task; struct taskqueue *reset_tq; int wd_active; sbintime_t keep_alive_timeout; sbintime_t missing_tx_timeout; uint32_t missing_tx_max_queues; uint32_t missing_tx_threshold; /* Statistics */ struct ena_stats_dev dev_stats; struct ena_hw_stats hw_stats; enum ena_regs_reset_reason_types reset_reason; }; #define ENA_RING_MTX_LOCK(_ring) mtx_lock(&(_ring)->ring_mtx) #define ENA_RING_MTX_TRYLOCK(_ring) mtx_trylock(&(_ring)->ring_mtx) #define ENA_RING_MTX_UNLOCK(_ring) mtx_unlock(&(_ring)->ring_mtx) static inline int ena_mbuf_count(struct mbuf *mbuf) { int count = 1; while ((mbuf = mbuf->m_next) != NULL) ++count; return count; +} + +int ena_up(struct ena_adapter *); +void ena_down(struct ena_adapter *); +int ena_restore_device(struct ena_adapter *); +void ena_destroy_device(struct ena_adapter *, bool); +int ena_refill_rx_bufs(struct ena_ring *, uint32_t); + +static inline int +validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +{ + if (likely(req_id < rx_ring->ring_size)) + return (0); + + device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n", + req_id); + counter_u64_add(rx_ring->rx_stats.bad_req_id, 1); + + /* Trigger device reset */ + if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter))) { + rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter); + } + + return (EFAULT); } #endif /* !(ENA_H) */ Index: stable/11/sys/dev/ena/ena_datapath.c =================================================================== --- stable/11/sys/dev/ena/ena_datapath.c (nonexistent) +++ stable/11/sys/dev/ena/ena_datapath.c (revision 361468) @@ -0,0 +1,1143 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +__FBSDID("$FreeBSD$"); + +#include "ena.h" +#include "ena_datapath.h" +#ifdef DEV_NETMAP +#include "ena_netmap.h" +#endif /* DEV_NETMAP */ + +/********************************************************************* + * Static functions prototypes + *********************************************************************/ + +static int ena_tx_cleanup(struct ena_ring *); +static int ena_rx_cleanup(struct ena_ring *); +static inline int validate_tx_req_id(struct ena_ring *, uint16_t); +static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *, + struct mbuf *); +static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *, + struct ena_com_rx_ctx *, uint16_t *); +static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *, + struct mbuf *); +static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *); +static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, + struct mbuf **mbuf); +static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **); +static void ena_start_xmit(struct ena_ring *); + +/********************************************************************* + * Global functions + *********************************************************************/ + +void +ena_cleanup(void *arg, int pending) +{ + struct ena_que *que = arg; + struct ena_adapter *adapter = que->adapter; + if_t ifp = adapter->ifp; + struct ena_ring *tx_ring; + struct ena_ring *rx_ring; + struct ena_com_io_cq* io_cq; + struct ena_eth_io_intr_reg intr_reg; + int qid, ena_qid; + int txc, rxc, i; + + if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) + return; + + ena_trace(ENA_DBG, "MSI-X TX/RX routine\n"); + + tx_ring = que->tx_ring; + rx_ring = que->rx_ring; + qid = que->id; + ena_qid = ENA_IO_TXQ_IDX(qid); + io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; + + tx_ring->first_interrupt = true; + rx_ring->first_interrupt = true; + + for (i = 0; i < CLEAN_BUDGET; ++i) { + rxc = ena_rx_cleanup(rx_ring); + txc = ena_tx_cleanup(tx_ring); + + if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) + return; + + if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) + break; + } + + /* Signal that work is done and unmask interrupt */ + ena_com_update_intr_reg(&intr_reg, + RX_IRQ_INTERVAL, + TX_IRQ_INTERVAL, + true); + ena_com_unmask_intr(io_cq, &intr_reg); +} + +void +ena_deferred_mq_start(void *arg, int pending) +{ + struct ena_ring *tx_ring = (struct ena_ring *)arg; + struct ifnet *ifp = tx_ring->adapter->ifp; + + while (!drbr_empty(ifp, tx_ring->br) && + tx_ring->running && + (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + ENA_RING_MTX_LOCK(tx_ring); + ena_start_xmit(tx_ring); + ENA_RING_MTX_UNLOCK(tx_ring); + } +} + +int +ena_mq_start(if_t ifp, struct mbuf *m) +{ + struct ena_adapter *adapter = ifp->if_softc; + struct ena_ring *tx_ring; + int ret, is_drbr_empty; + uint32_t i; + + if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) + return (ENODEV); + + /* Which queue to use */ + /* + * If everything is setup correctly, it should be the + * same bucket that the current CPU we're on is. + * It should improve performance. + */ + if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { + i = m->m_pkthdr.flowid % adapter->num_queues; + } else { + i = curcpu % adapter->num_queues; + } + tx_ring = &adapter->tx_ring[i]; + + /* Check if drbr is empty before putting packet */ + is_drbr_empty = drbr_empty(ifp, tx_ring->br); + ret = drbr_enqueue(ifp, tx_ring->br, m); + if (unlikely(ret != 0)) { + taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); + return (ret); + } + + if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { + ena_start_xmit(tx_ring); + ENA_RING_MTX_UNLOCK(tx_ring); + } else { + taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); + } + + return (0); +} + +void +ena_qflush(if_t ifp) +{ + struct ena_adapter *adapter = ifp->if_softc; + struct ena_ring *tx_ring = adapter->tx_ring; + int i; + + for(i = 0; i < adapter->num_queues; ++i, ++tx_ring) + if (!drbr_empty(ifp, tx_ring->br)) { + ENA_RING_MTX_LOCK(tx_ring); + drbr_flush(ifp, tx_ring->br); + ENA_RING_MTX_UNLOCK(tx_ring); + } + + if_qflush(ifp); +} + +/********************************************************************* + * Static functions + *********************************************************************/ + +static inline int +validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) +{ + struct ena_adapter *adapter = tx_ring->adapter; + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (tx_info->mbuf != NULL) + return (0); + device_printf(adapter->pdev, + "tx_info doesn't have valid mbuf\n"); + } + + device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id); + counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); + + /* Trigger device reset */ + adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); + + return (EFAULT); +} + +/** + * ena_tx_cleanup - clear sent packets and corresponding descriptors + * @tx_ring: ring for which we want to clean packets + * + * Once packets are sent, we ask the device in a loop for no longer used + * descriptors. We find the related mbuf chain in a map (index in an array) + * and free it, then update ring state. + * This is performed in "endless" loop, updating ring pointers every + * TX_COMMIT. The first check of free descriptor is performed before the actual + * loop, then repeated at the loop end. + **/ +static int +ena_tx_cleanup(struct ena_ring *tx_ring) +{ + struct ena_adapter *adapter; + struct ena_com_io_cq* io_cq; + uint16_t next_to_clean; + uint16_t req_id; + uint16_t ena_qid; + unsigned int total_done = 0; + int rc; + int commit = TX_COMMIT; + int budget = TX_BUDGET; + int work_done; + bool above_thresh; + + adapter = tx_ring->que->adapter; + ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); + io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; + next_to_clean = tx_ring->next_to_clean; + +#ifdef DEV_NETMAP + if (netmap_tx_irq(adapter->ifp, tx_ring->qid) != NM_IRQ_PASS) + return (0); +#endif /* DEV_NETMAP */ + + do { + struct ena_tx_buffer *tx_info; + struct mbuf *mbuf; + + rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); + if (unlikely(rc != 0)) + break; + + rc = validate_tx_req_id(tx_ring, req_id); + if (unlikely(rc != 0)) + break; + + tx_info = &tx_ring->tx_buffer_info[req_id]; + + mbuf = tx_info->mbuf; + + tx_info->mbuf = NULL; + bintime_clear(&tx_info->timestamp); + + bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(adapter->tx_buf_tag, + tx_info->dmamap); + + ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n", + tx_ring->qid, mbuf); + + m_freem(mbuf); + + total_done += tx_info->tx_descs; + + tx_ring->free_tx_ids[next_to_clean] = req_id; + next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, + tx_ring->ring_size); + + if (unlikely(--commit == 0)) { + commit = TX_COMMIT; + /* update ring state every TX_COMMIT descriptor */ + tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack( + &adapter->ena_dev->io_sq_queues[ena_qid], + total_done); + ena_com_update_dev_comp_head(io_cq); + total_done = 0; + } + } while (likely(--budget)); + + work_done = TX_BUDGET - budget; + + ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n", + tx_ring->qid, work_done); + + /* If there is still something to commit update ring state */ + if (likely(commit != TX_COMMIT)) { + tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], + total_done); + ena_com_update_dev_comp_head(io_cq); + } + + /* + * Need to make the rings circular update visible to + * ena_xmit_mbuf() before checking for tx_ring->running. + */ + mb(); + + above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_RESUME_THRESH); + if (unlikely(!tx_ring->running && above_thresh)) { + ENA_RING_MTX_LOCK(tx_ring); + above_thresh = + ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_RESUME_THRESH); + if (!tx_ring->running && above_thresh) { + tx_ring->running = true; + counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); + taskqueue_enqueue(tx_ring->enqueue_tq, + &tx_ring->enqueue_task); + } + ENA_RING_MTX_UNLOCK(tx_ring); + } + + return (work_done); +} + +static void +ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, + struct mbuf *mbuf) +{ + struct ena_adapter *adapter = rx_ring->adapter; + + if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { + mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; + + if (ena_rx_ctx->frag && + (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) { + M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); + return; + } + + switch (ena_rx_ctx->l3_proto) { + case ENA_ETH_IO_L3_PROTO_IPV4: + switch (ena_rx_ctx->l4_proto) { + case ENA_ETH_IO_L4_PROTO_TCP: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); + break; + case ENA_ETH_IO_L4_PROTO_UDP: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); + break; + default: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); + } + break; + case ENA_ETH_IO_L3_PROTO_IPV6: + switch (ena_rx_ctx->l4_proto) { + case ENA_ETH_IO_L4_PROTO_TCP: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); + break; + case ENA_ETH_IO_L4_PROTO_UDP: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); + break; + default: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); + } + break; + case ENA_ETH_IO_L3_PROTO_UNKNOWN: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); + break; + default: + M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); + } + } else { + mbuf->m_pkthdr.flowid = rx_ring->qid; + M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); + } +} + +/** + * ena_rx_mbuf - assemble mbuf from descriptors + * @rx_ring: ring for which we want to clean packets + * @ena_bufs: buffer info + * @ena_rx_ctx: metadata for this packet(s) + * @next_to_clean: ring pointer, will be updated only upon success + * + **/ +static struct mbuf* +ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, + struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean) +{ + struct mbuf *mbuf; + struct ena_rx_buffer *rx_info; + struct ena_adapter *adapter; + unsigned int descs = ena_rx_ctx->descs; + int rc; + uint16_t ntc, len, req_id, buf = 0; + + ntc = *next_to_clean; + adapter = rx_ring->adapter; + + len = ena_bufs[buf].len; + req_id = ena_bufs[buf].req_id; + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc != 0)) + return (NULL); + + rx_info = &rx_ring->rx_buffer_info[req_id]; + if (unlikely(rx_info->mbuf == NULL)) { + device_printf(adapter->pdev, "NULL mbuf in rx_info"); + return (NULL); + } + + ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n", + rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); + + bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, + BUS_DMASYNC_POSTREAD); + mbuf = rx_info->mbuf; + mbuf->m_flags |= M_PKTHDR; + mbuf->m_pkthdr.len = len; + mbuf->m_len = len; + mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp; + + /* Fill mbuf with hash key and it's interpretation for optimization */ + ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); + + ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n", + mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); + + /* DMA address is not needed anymore, unmap it */ + bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); + + rx_info->mbuf = NULL; + rx_ring->free_rx_ids[ntc] = req_id; + ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); + + /* + * While we have more than 1 descriptors for one rcvd packet, append + * other mbufs to the main one + */ + while (--descs) { + ++buf; + len = ena_bufs[buf].len; + req_id = ena_bufs[buf].req_id; + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc != 0)) { + /* + * If the req_id is invalid, then the device will be + * reset. In that case we must free all mbufs that + * were already gathered. + */ + m_freem(mbuf); + return (NULL); + } + rx_info = &rx_ring->rx_buffer_info[req_id]; + + if (unlikely(rx_info->mbuf == NULL)) { + device_printf(adapter->pdev, "NULL mbuf in rx_info"); + /* + * If one of the required mbufs was not allocated yet, + * we can break there. + * All earlier used descriptors will be reallocated + * later and not used mbufs can be reused. + * The next_to_clean pointer will not be updated in case + * of an error, so caller should advance it manually + * in error handling routine to keep it up to date + * with hw ring. + */ + m_freem(mbuf); + return (NULL); + } + + bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, + BUS_DMASYNC_POSTREAD); + if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { + counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); + ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p\n", + mbuf); + } + + ena_trace(ENA_DBG | ENA_RXPTH, + "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len); + + /* Free already appended mbuf, it won't be useful anymore */ + bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); + m_freem(rx_info->mbuf); + rx_info->mbuf = NULL; + + rx_ring->free_rx_ids[ntc] = req_id; + ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); + } + + *next_to_clean = ntc; + + return (mbuf); +} + +/** + * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum + **/ +static inline void +ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, + struct mbuf *mbuf) +{ + + /* if IP and error */ + if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && + ena_rx_ctx->l3_csum_err)) { + /* ipv4 checksum error */ + mbuf->m_pkthdr.csum_flags = 0; + counter_u64_add(rx_ring->rx_stats.bad_csum, 1); + ena_trace(ENA_DBG, "RX IPv4 header checksum error\n"); + return; + } + + /* if TCP/UDP */ + if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || + (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) { + if (ena_rx_ctx->l4_csum_err) { + /* TCP/UDP checksum error */ + mbuf->m_pkthdr.csum_flags = 0; + counter_u64_add(rx_ring->rx_stats.bad_csum, 1); + ena_trace(ENA_DBG, "RX L4 checksum error\n"); + } else { + mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; + } + } +} + +/** + * ena_rx_cleanup - handle rx irq + * @arg: ring for which irq is being handled + **/ +static int +ena_rx_cleanup(struct ena_ring *rx_ring) +{ + struct ena_adapter *adapter; + struct mbuf *mbuf; + struct ena_com_rx_ctx ena_rx_ctx; + struct ena_com_io_cq* io_cq; + struct ena_com_io_sq* io_sq; + if_t ifp; + uint16_t ena_qid; + uint16_t next_to_clean; + uint32_t refill_required; + uint32_t refill_threshold; + uint32_t do_if_input = 0; + unsigned int qid; + int rc, i; + int budget = RX_BUDGET; +#ifdef DEV_NETMAP + int done; +#endif /* DEV_NETMAP */ + + adapter = rx_ring->que->adapter; + ifp = adapter->ifp; + qid = rx_ring->que->id; + ena_qid = ENA_IO_RXQ_IDX(qid); + io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; + io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; + next_to_clean = rx_ring->next_to_clean; + +#ifdef DEV_NETMAP + if (netmap_rx_irq(adapter->ifp, rx_ring->qid, &done) != NM_IRQ_PASS) + return (0); +#endif /* DEV_NETMAP */ + + ena_trace(ENA_DBG, "rx: qid %d\n", qid); + + do { + ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; + ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size; + ena_rx_ctx.descs = 0; + bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, + io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); + rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); + + if (unlikely(rc != 0)) + goto error; + + if (unlikely(ena_rx_ctx.descs == 0)) + break; + + ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " + "descs #: %d l3 proto %d l4 proto %d hash: %x\n", + rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, + ena_rx_ctx.l4_proto, ena_rx_ctx.hash); + + /* Receive mbuf from the ring */ + mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, + &ena_rx_ctx, &next_to_clean); + bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, + io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); + /* Exit if we failed to retrieve a buffer */ + if (unlikely(mbuf == NULL)) { + for (i = 0; i < ena_rx_ctx.descs; ++i) { + rx_ring->free_rx_ids[next_to_clean] = + rx_ring->ena_bufs[i].req_id; + next_to_clean = + ENA_RX_RING_IDX_NEXT(next_to_clean, + rx_ring->ring_size); + + } + break; + } + + if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) || + ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) { + ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf); + } + + counter_enter(); + counter_u64_add_protected(rx_ring->rx_stats.bytes, + mbuf->m_pkthdr.len); + counter_u64_add_protected(adapter->hw_stats.rx_bytes, + mbuf->m_pkthdr.len); + counter_exit(); + /* + * LRO is only for IP/TCP packets and TCP checksum of the packet + * should be computed by hardware. + */ + do_if_input = 1; + if (((ifp->if_capenable & IFCAP_LRO) != 0) && + ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && + (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) { + /* + * Send to the stack if: + * - LRO not enabled, or + * - no LRO resources, or + * - lro enqueue fails + */ + if ((rx_ring->lro.lro_cnt != 0) && + (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)) + do_if_input = 0; + } + if (do_if_input != 0) { + ena_trace(ENA_DBG | ENA_RXPTH, + "calling if_input() with mbuf %p\n", mbuf); + (*ifp->if_input)(ifp, mbuf); + } + + counter_enter(); + counter_u64_add_protected(rx_ring->rx_stats.cnt, 1); + counter_u64_add_protected(adapter->hw_stats.rx_packets, 1); + counter_exit(); + } while (--budget); + + rx_ring->next_to_clean = next_to_clean; + + refill_required = ena_com_free_desc(io_sq); + refill_threshold = min_t(int, + rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, + ENA_RX_REFILL_THRESH_PACKET); + + if (refill_required > refill_threshold) { + ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); + ena_refill_rx_bufs(rx_ring, refill_required); + } + + tcp_lro_flush_all(&rx_ring->lro); + + return (RX_BUDGET - budget); + +error: + counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); + + /* Too many desc from the device. Trigger reset */ + if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { + adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; + ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); + } + + return (0); +} + +static void +ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf) +{ + struct ena_com_tx_meta *ena_meta; + struct ether_vlan_header *eh; + struct mbuf *mbuf_next; + u32 mss; + bool offload; + uint16_t etype; + int ehdrlen; + struct ip *ip; + int iphlen; + struct tcphdr *th; + int offset; + + offload = false; + ena_meta = &ena_tx_ctx->ena_meta; + mss = mbuf->m_pkthdr.tso_segsz; + + if (mss != 0) + offload = true; + + if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) + offload = true; + + if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) + offload = true; + + if (!offload) { + ena_tx_ctx->meta_valid = 0; + return; + } + + /* Determine where frame payload starts. */ + eh = mtod(mbuf, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + etype = ntohs(eh->evl_proto); + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + } else { + etype = ntohs(eh->evl_encap_proto); + ehdrlen = ETHER_HDR_LEN; + } + + mbuf_next = m_getptr(mbuf, ehdrlen, &offset); + ip = (struct ip *)(mtodo(mbuf_next, offset)); + iphlen = ip->ip_hl << 2; + + mbuf_next = m_getptr(mbuf, iphlen + ehdrlen, &offset); + th = (struct tcphdr *)(mtodo(mbuf_next, offset)); + + if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) { + ena_tx_ctx->l3_csum_enable = 1; + } + if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) { + ena_tx_ctx->tso_enable = 1; + ena_meta->l4_hdr_len = (th->th_off); + } + + switch (etype) { + case ETHERTYPE_IP: + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; + if ((ip->ip_off & htons(IP_DF)) != 0) + ena_tx_ctx->df = 1; + break; + case ETHERTYPE_IPV6: + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; + + default: + break; + } + + if (ip->ip_p == IPPROTO_TCP) { + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; + if ((mbuf->m_pkthdr.csum_flags & + (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0) + ena_tx_ctx->l4_csum_enable = 1; + else + ena_tx_ctx->l4_csum_enable = 0; + } else if (ip->ip_p == IPPROTO_UDP) { + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; + if ((mbuf->m_pkthdr.csum_flags & + (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0) + ena_tx_ctx->l4_csum_enable = 1; + else + ena_tx_ctx->l4_csum_enable = 0; + } else { + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; + ena_tx_ctx->l4_csum_enable = 0; + } + + ena_meta->mss = mss; + ena_meta->l3_hdr_len = iphlen; + ena_meta->l3_hdr_offset = ehdrlen; + ena_tx_ctx->meta_valid = 1; +} + +static int +ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) +{ + struct ena_adapter *adapter; + struct mbuf *collapsed_mbuf; + int num_frags; + + adapter = tx_ring->adapter; + num_frags = ena_mbuf_count(*mbuf); + + /* One segment must be reserved for configuration descriptor. */ + if (num_frags < adapter->max_tx_sgl_size) + return (0); + counter_u64_add(tx_ring->tx_stats.collapse, 1); + + collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, + adapter->max_tx_sgl_size - 1); + if (unlikely(collapsed_mbuf == NULL)) { + counter_u64_add(tx_ring->tx_stats.collapse_err, 1); + return (ENOMEM); + } + + /* If mbuf was collapsed succesfully, original mbuf is released. */ + *mbuf = collapsed_mbuf; + + return (0); +} + +static int +ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, + struct mbuf *mbuf, void **push_hdr, u16 *header_len) +{ + struct ena_adapter *adapter = tx_ring->adapter; + struct ena_com_buf *ena_buf; + bus_dma_segment_t segs[ENA_BUS_DMA_SEGS]; + size_t iseg = 0; + uint32_t mbuf_head_len, frag_len; + uint16_t push_len = 0; + uint16_t delta = 0; + int rc, nsegs; + + mbuf_head_len = mbuf->m_len; + tx_info->mbuf = mbuf; + ena_buf = tx_info->bufs; + + /* + * For easier maintaining of the DMA map, map the whole mbuf even if + * the LLQ is used. The descriptors will be filled using the segments. + */ + rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->dmamap, mbuf, + segs, &nsegs, BUS_DMA_NOWAIT); + if (unlikely((rc != 0) || (nsegs == 0))) { + ena_trace(ENA_WARNING, + "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs); + goto dma_error; + } + + + if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* + * When the device is LLQ mode, the driver will copy + * the header into the device memory space. + * the ena_com layer assumes the header is in a linear + * memory space. + * This assumption might be wrong since part of the header + * can be in the fragmented buffers. + * First check if header fits in the mbuf. If not, copy it to + * separate buffer that will be holding linearized data. + */ + push_len = min_t(uint32_t, mbuf->m_pkthdr.len, + tx_ring->tx_max_header_size); + *header_len = push_len; + /* If header is in linear space, just point into mbuf's data. */ + if (likely(push_len <= mbuf_head_len)) { + *push_hdr = mbuf->m_data; + /* + * Otherwise, copy whole portion of header from multiple mbufs + * to intermediate buffer. + */ + } else { + m_copydata(mbuf, 0, push_len, + tx_ring->push_buf_intermediate_buf); + *push_hdr = tx_ring->push_buf_intermediate_buf; + + counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); + delta = push_len - mbuf_head_len; + } + + ena_trace(ENA_DBG | ENA_TXPTH, + "mbuf: %p header_buf->vaddr: %p push_len: %d\n", + mbuf, *push_hdr, push_len); + + /* + * If header was in linear memory space, map for the dma rest of the data + * in the first mbuf of the mbuf chain. + */ + if (mbuf_head_len > push_len) { + ena_buf->paddr = segs[iseg].ds_addr + push_len; + ena_buf->len = segs[iseg].ds_len - push_len; + ena_buf++; + tx_info->num_of_bufs++; + } + /* + * Advance the seg index as either the 1st mbuf was mapped or is + * a part of push_hdr. + */ + iseg++; + } else { + *push_hdr = NULL; + /* + * header_len is just a hint for the device. Because FreeBSD is not + * giving us information about packet header length and it is not + * guaranteed that all packet headers will be in the 1st mbuf, setting + * header_len to 0 is making the device ignore this value and resolve + * header on it's own. + */ + *header_len = 0; + } + + /* + * If header is in non linear space (delta > 0), then skip mbufs + * containing header and map the last one containing both header and the + * packet data. + * The first segment is already counted in. + * If LLQ is not supported, the loop will be skipped. + */ + while (delta > 0) { + frag_len = segs[iseg].ds_len; + + /* + * If whole segment contains header just move to the + * next one and reduce delta. + */ + if (unlikely(delta >= frag_len)) { + delta -= frag_len; + } else { + /* + * Map rest of the packet data that was contained in + * the mbuf. + */ + ena_buf->paddr = segs[iseg].ds_addr + delta; + ena_buf->len = frag_len - delta; + ena_buf++; + tx_info->num_of_bufs++; + + delta = 0; + } + iseg++; + } + + if (mbuf == NULL) { + return (0); + } + + /* Map rest of the mbuf */ + while (iseg < nsegs) { + ena_buf->paddr = segs[iseg].ds_addr; + ena_buf->len = segs[iseg].ds_len; + ena_buf++; + iseg++; + tx_info->num_of_bufs++; + } + + return (0); + +dma_error: + counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); + tx_info->mbuf = NULL; + return (rc); +} + +static int +ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) +{ + struct ena_adapter *adapter; + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_com_dev *ena_dev; + struct ena_com_io_sq* io_sq; + void *push_hdr; + uint16_t next_to_use; + uint16_t req_id; + uint16_t ena_qid; + uint16_t header_len; + int rc; + int nb_hw_desc; + + ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); + adapter = tx_ring->que->adapter; + ena_dev = adapter->ena_dev; + io_sq = &ena_dev->io_sq_queues[ena_qid]; + + rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); + if (unlikely(rc != 0)) { + ena_trace(ENA_WARNING, + "Failed to collapse mbuf! err: %d\n", rc); + return (rc); + } + + ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len); + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_tx_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); + if (unlikely(rc != 0)) { + ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); + return (rc); + } + memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); + ena_tx_ctx.ena_bufs = tx_info->bufs; + ena_tx_ctx.push_header = push_hdr; + ena_tx_ctx.num_bufs = tx_info->num_of_bufs; + ena_tx_ctx.req_id = req_id; + ena_tx_ctx.header_len = header_len; + + /* Set flags and meta data */ + ena_tx_csum(&ena_tx_ctx, *mbuf); + + if (tx_ring->acum_pkts == DB_THRESHOLD || + ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { + ena_trace(ENA_DBG | ENA_TXPTH, + "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", + tx_ring->que->id); + wmb(); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + counter_u64_add(tx_ring->tx_stats.doorbells, 1); + tx_ring->acum_pkts = 0; + } + + /* Prepare the packet's descriptors and send them to device */ + rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); + if (unlikely(rc != 0)) { + if (likely(rc == ENA_COM_NO_MEM)) { + ena_trace(ENA_DBG | ENA_TXPTH, + "tx ring[%d] if out of space\n", tx_ring->que->id); + } else { + device_printf(adapter->pdev, + "failed to prepare tx bufs\n"); + } + counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); + goto dma_error; + } + + counter_enter(); + counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); + counter_u64_add_protected(tx_ring->tx_stats.bytes, + (*mbuf)->m_pkthdr.len); + + counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); + counter_u64_add_protected(adapter->hw_stats.tx_bytes, + (*mbuf)->m_pkthdr.len); + counter_exit(); + + tx_info->tx_descs = nb_hw_desc; + getbinuptime(&tx_info->timestamp); + tx_info->print_once = true; + + tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, + tx_ring->ring_size); + + /* stop the queue when no more space available, the packet can have up + * to sgl_size + 2. one for the meta descriptor and one for header + * (if the header is larger than tx_max_header_size). + */ + if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + adapter->max_tx_sgl_size + 2))) { + ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", + tx_ring->que->id); + + tx_ring->running = false; + counter_u64_add(tx_ring->tx_stats.queue_stop, 1); + + /* There is a rare condition where this function decides to + * stop the queue but meanwhile tx_cleanup() updates + * next_to_completion and terminates. + * The queue will remain stopped forever. + * To solve this issue this function performs mb(), checks + * the wakeup condition and wakes up the queue if needed. + */ + mb(); + + if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_RESUME_THRESH)) { + tx_ring->running = true; + counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); + } + } + + bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, + BUS_DMASYNC_PREWRITE); + + return (0); + +dma_error: + tx_info->mbuf = NULL; + bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); + + return (rc); +} + +static void +ena_start_xmit(struct ena_ring *tx_ring) +{ + struct mbuf *mbuf; + struct ena_adapter *adapter = tx_ring->adapter; + struct ena_com_io_sq* io_sq; + int ena_qid; + int ret = 0; + + if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) + return; + + if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))) + return; + + ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); + io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; + + while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { + ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" + " header csum flags %#jx\n", + mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); + + if (unlikely(!tx_ring->running)) { + drbr_putback(adapter->ifp, tx_ring->br, mbuf); + break; + } + + if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { + if (ret == ENA_COM_NO_MEM) { + drbr_putback(adapter->ifp, tx_ring->br, mbuf); + } else if (ret == ENA_COM_NO_SPACE) { + drbr_putback(adapter->ifp, tx_ring->br, mbuf); + } else { + m_freem(mbuf); + drbr_advance(adapter->ifp, tx_ring->br); + } + + break; + } + + drbr_advance(adapter->ifp, tx_ring->br); + + if (unlikely((if_getdrvflags(adapter->ifp) & + IFF_DRV_RUNNING) == 0)) + return; + + tx_ring->acum_pkts++; + + BPF_MTAP(adapter->ifp, mbuf); + } + + if (likely(tx_ring->acum_pkts != 0)) { + wmb(); + /* Trigger the dma engine */ + ena_com_write_sq_doorbell(io_sq); + counter_u64_add(tx_ring->tx_stats.doorbells, 1); + tx_ring->acum_pkts = 0; + } + + if (unlikely(!tx_ring->running)) + taskqueue_enqueue(tx_ring->que->cleanup_tq, + &tx_ring->que->cleanup_task); +} Property changes on: stable/11/sys/dev/ena/ena_datapath.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: stable/11/sys/dev/ena/ena_datapath.h =================================================================== --- stable/11/sys/dev/ena/ena_datapath.h (nonexistent) +++ stable/11/sys/dev/ena/ena_datapath.h (revision 361468) @@ -0,0 +1,42 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef ENA_TXRX_H +#define ENA_TXRX_H + +void ena_cleanup(void *, int); +void ena_qflush(if_t); +int ena_mq_start(if_t, struct mbuf *); +void ena_deferred_mq_start(void *, int); + +#endif /* ENA_TXRX_H */ Property changes on: stable/11/sys/dev/ena/ena_datapath.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: stable/11/sys/dev/ena/ena_netmap.c =================================================================== --- stable/11/sys/dev/ena/ena_netmap.c (nonexistent) +++ stable/11/sys/dev/ena/ena_netmap.c (revision 361468) @@ -0,0 +1,1095 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +__FBSDID("$FreeBSD$"); + +#ifdef DEV_NETMAP + +#include "ena.h" +#include "ena_netmap.h" + +#define ENA_NETMAP_MORE_FRAMES 1 +#define ENA_NETMAP_NO_MORE_FRAMES 0 +#define ENA_MAX_FRAMES 16384 + +struct ena_netmap_ctx { + struct netmap_kring *kring; + struct ena_adapter *adapter; + struct netmap_adapter *na; + struct netmap_slot *slots; + struct ena_ring *ring; + struct ena_com_io_cq *io_cq; + struct ena_com_io_sq *io_sq; + u_int nm_i; + uint16_t nt; + uint16_t lim; +}; + +/* Netmap callbacks */ +static int ena_netmap_reg(struct netmap_adapter *, int); +static int ena_netmap_txsync(struct netmap_kring *, int); +static int ena_netmap_rxsync(struct netmap_kring *, int); + +/* Helper functions */ +static int ena_netmap_tx_frames(struct ena_netmap_ctx *); +static int ena_netmap_tx_frame(struct ena_netmap_ctx *); +static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *); +static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int, + uint16_t); +static int ena_netmap_copy_data(struct netmap_adapter *, + struct netmap_slot *, u_int, uint16_t, uint16_t, void *); +static int ena_netmap_map_single_slot(struct netmap_adapter *, + struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *); +static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *, + struct ena_tx_buffer *, void **, uint16_t *, uint16_t *); +static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *, + struct ena_tx_buffer *); +static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *); +static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, + uint16_t); +static inline int validate_tx_req_id(struct ena_ring *, uint16_t); +static int ena_netmap_rx_frames(struct ena_netmap_ctx *); +static int ena_netmap_rx_frame(struct ena_netmap_ctx *); +static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, + int *); +static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *); +static void ena_netmap_fill_ctx(struct netmap_kring *, + struct ena_netmap_ctx *, uint16_t); + +int +ena_netmap_attach(struct ena_adapter *adapter) +{ + struct netmap_adapter na; + + ena_trace(ENA_NETMAP, "netmap attach\n"); + + bzero(&na, sizeof(na)); + na.na_flags = NAF_MOREFRAG; + na.ifp = adapter->ifp; + na.num_tx_desc = adapter->tx_ring_size; + na.num_rx_desc = adapter->rx_ring_size; + na.num_tx_rings = adapter->num_queues; + na.num_rx_rings = adapter->num_queues; + na.rx_buf_maxsize = adapter->buf_ring_size; + na.nm_txsync = ena_netmap_txsync; + na.nm_rxsync = ena_netmap_rxsync; + na.nm_register = ena_netmap_reg; + + return (netmap_attach(&na)); +} + +int +ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, + struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) +{ + struct netmap_adapter *na = NA(adapter->ifp); + struct netmap_kring *kring; + struct netmap_ring *ring; + struct netmap_slot *slot; + void *addr; + uint64_t paddr; + int nm_i, qid, head, lim, rc; + + /* if previously allocated frag is not used */ + if (unlikely(rx_info->netmap_buf_idx != 0)) + return (0); + + qid = rx_ring->qid; + kring = na->rx_rings[qid]; + nm_i = kring->nr_hwcur; + head = kring->rhead; + + ena_trace(ENA_NETMAP | ENA_DBG, "nr_hwcur: %d, nr_hwtail: %d, " + "rhead: %d, rcur: %d, rtail: %d\n", kring->nr_hwcur, + kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); + + if ((nm_i == head) && rx_ring->initialized) { + ena_trace(ENA_NETMAP, "No free slots in netmap ring\n"); + return (ENOMEM); + } + + ring = kring->ring; + if (ring == NULL) { + device_printf(adapter->pdev, "Rx ring %d is NULL\n", qid); + return (EFAULT); + } + slot = &ring->slot[nm_i]; + + addr = PNMB(na, slot, &paddr); + if (addr == NETMAP_BUF_BASE(na)) { + device_printf(adapter->pdev, "Bad buff in slot\n"); + return (EFAULT); + } + + rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr); + if (rc != 0) { + ena_trace(ENA_WARNING, "DMA mapping error\n"); + return (rc); + } + bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); + + rx_info->ena_buf.paddr = paddr; + rx_info->ena_buf.len = ring->nr_buf_size; + rx_info->mbuf = NULL; + rx_info->netmap_buf_idx = slot->buf_idx; + + slot->buf_idx = 0; + + lim = kring->nkr_num_slots - 1; + kring->nr_hwcur = nm_next(nm_i, lim); + + return (0); +} + +void +ena_netmap_free_rx_slot(struct ena_adapter *adapter, + struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) +{ + struct netmap_adapter *na; + struct netmap_kring *kring; + struct netmap_slot *slot; + int nm_i, qid, lim; + + na = NA(adapter->ifp); + if (na == NULL) { + device_printf(adapter->pdev, "netmap adapter is NULL\n"); + return; + } + + if (na->rx_rings == NULL) { + device_printf(adapter->pdev, "netmap rings are NULL\n"); + return; + } + + qid = rx_ring->qid; + kring = na->rx_rings[qid]; + if (kring == NULL) { + device_printf(adapter->pdev, + "netmap kernel ring %d is NULL\n", qid); + return; + } + + lim = kring->nkr_num_slots - 1; + nm_i = nm_prev(kring->nr_hwcur, lim); + + if (kring->nr_mode != NKR_NETMAP_ON) + return; + + bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, + BUS_DMASYNC_POSTREAD); + netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map); + + KASSERT(kring->ring == NULL, ("Netmap Rx ring is NULL\n")); + + slot = &kring->ring->slot[nm_i]; + + ENA_ASSERT(slot->buf_idx == 0, "Overwrite slot buf\n"); + slot->buf_idx = rx_info->netmap_buf_idx; + slot->flags = NS_BUF_CHANGED; + + rx_info->netmap_buf_idx = 0; + kring->nr_hwcur = nm_i; +} + +static bool +ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x) +{ + struct netmap_adapter *na; + struct netmap_kring *kring; + + if (adapter->ifp->if_capenable & IFCAP_NETMAP) { + na = NA(adapter->ifp); + kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid]; + if (kring->nr_mode == NKR_NETMAP_ON) + return true; + } + return false; +} + +bool +ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid) +{ + return ena_ring_in_netmap(adapter, qid, NR_TX); +} + +bool +ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid) +{ + return ena_ring_in_netmap(adapter, qid, NR_RX); +} + +static void +ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x) +{ + if (!ena_ring_in_netmap(adapter, qid, x)) + return; + + netmap_reset(NA(adapter->ifp), x, qid, 0); + ena_trace(ENA_NETMAP, "%s ring %d is in netmap mode\n", + (x == NR_TX) ? "Tx" : "Rx", qid); +} + +void +ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid) +{ + ena_netmap_reset_ring(adapter, qid, NR_RX); +} + +void +ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid) +{ + ena_netmap_reset_ring(adapter, qid, NR_TX); +} + +static int +ena_netmap_reg(struct netmap_adapter *na, int onoff) +{ + struct ifnet *ifp = na->ifp; + struct ena_adapter* adapter = ifp->if_softc; + struct netmap_kring *kring; + enum txrx t; + int rc, i; + + sx_xlock(&adapter->ioctl_sx); + ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); + ena_down(adapter); + + if (onoff) { + ena_trace(ENA_NETMAP, "netmap on\n"); + for_rx_tx(t) { + for (i = 0; i <= nma_get_nrings(na, t); i++) { + kring = NMR(na, t)[i]; + if (nm_kring_pending_on(kring)) { + kring->nr_mode = NKR_NETMAP_ON; + } + } + } + nm_set_native_flags(na); + } else { + ena_trace(ENA_NETMAP, "netmap off\n"); + nm_clear_native_flags(na); + for_rx_tx(t) { + for (i = 0; i <= nma_get_nrings(na, t); i++) { + kring = NMR(na, t)[i]; + if (nm_kring_pending_off(kring)) { + kring->nr_mode = NKR_NETMAP_OFF; + } + } + } + } + + rc = ena_up(adapter); + if (rc != 0) { + ena_trace(ENA_WARNING, "ena_up failed with rc=%d\n", rc); + adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; + nm_clear_native_flags(na); + ena_destroy_device(adapter, false); + ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); + rc = ena_restore_device(adapter); + } + sx_unlock(&adapter->ioctl_sx); + + return (rc); +} + +static int +ena_netmap_txsync(struct netmap_kring *kring, int flags) +{ + struct ena_netmap_ctx ctx; + int rc = 0; + + ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id)); + ctx.ring = &ctx.adapter->tx_ring[kring->ring_id]; + + ENA_RING_MTX_LOCK(ctx.ring); + if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter))) + goto txsync_end; + + if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter))) + goto txsync_end; + + rc = ena_netmap_tx_frames(&ctx); + ena_netmap_tx_cleanup(&ctx); + +txsync_end: + ENA_RING_MTX_UNLOCK(ctx.ring); + return (rc); +} + +static int +ena_netmap_tx_frames(struct ena_netmap_ctx *ctx) +{ + struct ena_ring *tx_ring = ctx->ring; + int rc = 0; + + ctx->nm_i = ctx->kring->nr_hwcur; + ctx->nt = ctx->ring->next_to_use; + + __builtin_prefetch(&ctx->slots[ctx->nm_i]); + + while (ctx->nm_i != ctx->kring->rhead) { + if ((rc = ena_netmap_tx_frame(ctx)) != 0) { + /* + * When there is no empty space in Tx ring, error is + * still being returned. It should not be passed to the + * netmap, as application knows current ring state from + * netmap ring pointers. Returning error there could + * cause application to exit, but the Tx ring is commonly + * being full. + */ + if (rc == ENA_COM_NO_MEM) + rc = 0; + break; + } + tx_ring->acum_pkts++; + } + + /* If any packet was sent... */ + if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) { + wmb(); + /* ...send the doorbell to the device. */ + ena_com_write_sq_doorbell(ctx->io_sq); + counter_u64_add(ctx->ring->tx_stats.doorbells, 1); + tx_ring->acum_pkts = 0; + + ctx->ring->next_to_use = ctx->nt; + ctx->kring->nr_hwcur = ctx->nm_i; + } + + return (rc); +} + +static int +ena_netmap_tx_frame(struct ena_netmap_ctx *ctx) +{ + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_adapter *adapter; + struct ena_ring *tx_ring; + struct ena_tx_buffer *tx_info; + uint16_t req_id; + uint16_t header_len; + uint16_t packet_len; + int nb_hw_desc; + int rc; + void *push_hdr; + + adapter = ctx->adapter; + if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) { + ena_trace(ENA_WARNING, "Too many slots per packet\n"); + return (EINVAL); + } + + tx_ring = ctx->ring; + + req_id = tx_ring->free_tx_ids[ctx->nt]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + tx_info->nm_info.sockets_used = 0; + + rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len, + &packet_len); + if (unlikely(rc != 0)) { + device_printf(adapter->pdev, "Failed to map Tx slot\n"); + return (rc); + } + + bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx)); + ena_tx_ctx.ena_bufs = tx_info->bufs; + ena_tx_ctx.push_header = push_hdr; + ena_tx_ctx.num_bufs = tx_info->num_of_bufs; + ena_tx_ctx.req_id = req_id; + ena_tx_ctx.header_len = header_len; + + /* There are no any offloads, as the netmap doesn't support them */ + + if (tx_ring->acum_pkts == DB_THRESHOLD || + ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx)) { + wmb(); + ena_com_write_sq_doorbell(ctx->io_sq); + counter_u64_add(tx_ring->tx_stats.doorbells, 1); + tx_ring->acum_pkts = 0; + } + + rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc); + if (unlikely(rc != 0)) { + if (likely(rc == ENA_COM_NO_MEM)) { + ena_trace(ENA_NETMAP | ENA_DBG | ENA_TXPTH, + "Tx ring[%d] is out of space\n", tx_ring->que->id); + } else { + device_printf(adapter->pdev, + "Failed to prepare Tx bufs\n"); + } + counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); + + ena_netmap_unmap_last_socket_chain(ctx, tx_info); + return (rc); + } + + counter_enter(); + counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); + counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len); + counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); + counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len); + counter_exit(); + + tx_info->tx_descs = nb_hw_desc; + + ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); + + for (unsigned int i = 0; i < tx_info->num_of_bufs; i++) + bus_dmamap_sync(adapter->tx_buf_tag, + tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE); + + return (0); +} + +static inline uint16_t +ena_netmap_count_slots(struct ena_netmap_ctx *ctx) +{ + uint16_t slots = 1; + uint16_t nm = ctx->nm_i; + + while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) { + slots++; + nm = nm_next(nm, ctx->lim); + } + + return slots; +} + +static inline uint16_t +ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index, + uint16_t limit) +{ + struct netmap_slot *nm_slot; + uint16_t packet_size = 0; + + do { + nm_slot = &slots[slot_index]; + packet_size += nm_slot->len; + slot_index = nm_next(slot_index, limit); + } while ((nm_slot->flags & NS_MOREFRAG) != 0); + + return packet_size; +} + +static int +ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots, + u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination) +{ + struct netmap_slot *nm_slot; + void *slot_vaddr; + uint16_t packet_size; + uint16_t data_amount; + + packet_size = 0; + do { + nm_slot = &slots[slot_index]; + slot_vaddr = NMB(na, nm_slot); + if (unlikely(slot_vaddr == NULL)) + return (EINVAL); + + data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len); + memcpy(destination, slot_vaddr, data_amount); + bytes_to_copy -= data_amount; + + slot_index = nm_next(slot_index, limit); + } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0); + + return (0); +} + +static int +ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot, + bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr) +{ + int rc; + + *vaddr = PNMB(na, slot, paddr); + if (unlikely(vaddr == NULL)) { + ena_trace(ENA_ALERT, "Slot address is NULL\n"); + return (EINVAL); + } + + rc = netmap_load_map(na, dmatag, dmamap, *vaddr); + if (unlikely(rc != 0)) { + ena_trace(ENA_ALERT, "Failed to map slot %d for DMA\n", + slot->buf_idx); + return (EINVAL); + } + + return (0); +} + +static int +ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx, + struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len, + uint16_t *packet_len) +{ + struct netmap_slot *slot; + struct ena_com_buf *ena_buf; + struct ena_adapter *adapter; + struct ena_ring *tx_ring; + struct ena_netmap_tx_info *nm_info; + bus_dmamap_t *nm_maps; + void *vaddr; + uint64_t paddr; + uint32_t *nm_buf_idx; + uint32_t slot_head_len; + uint32_t frag_len; + uint32_t remaining_len; + uint16_t push_len; + uint16_t delta; + int rc; + + adapter = ctx->adapter; + tx_ring = ctx->ring; + ena_buf = tx_info->bufs; + nm_info = &tx_info->nm_info; + nm_maps = nm_info->map_seg; + nm_buf_idx = nm_info->socket_buf_idx; + slot = &ctx->slots[ctx->nm_i]; + + slot_head_len = slot->len; + *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim); + remaining_len = *packet_len; + delta = 0; + + __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]); + if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* + * When the device is in LLQ mode, the driver will copy + * the header into the device memory space. + * The ena_com layer assumes that the header is in a linear + * memory space. + * This assumption might be wrong since part of the header + * can be in the fragmented buffers. + * First, check if header fits in the first slot. If not, copy + * it to separate buffer that will be holding linearized data. + */ + push_len = min_t(uint32_t, *packet_len, + tx_ring->tx_max_header_size); + *header_len = push_len; + /* If header is in linear space, just point to socket's data. */ + if (likely(push_len <= slot_head_len)) { + *push_hdr = NMB(ctx->na, slot); + if (unlikely(push_hdr == NULL)) { + device_printf(adapter->pdev, + "Slot vaddress is NULL\n"); + return (EINVAL); + } + /* + * Otherwise, copy whole portion of header from multiple slots + * to intermediate buffer. + */ + } else { + rc = ena_netmap_copy_data(ctx->na, + ctx->slots, + ctx->nm_i, + ctx->lim, + push_len, + tx_ring->push_buf_intermediate_buf); + if (unlikely(rc)) { + device_printf(adapter->pdev, + "Failed to copy data from slots to push_buf\n"); + return (EINVAL); + } + + *push_hdr = tx_ring->push_buf_intermediate_buf; + counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); + + delta = push_len - slot_head_len; + } + + ena_trace(ENA_NETMAP | ENA_DBG | ENA_TXPTH, + "slot: %d header_buf->vaddr: %p push_len: %d\n", + slot->buf_idx, *push_hdr, push_len); + + /* + * If header was in linear memory space, map for the dma rest of the data + * in the first mbuf of the mbuf chain. + */ + if (slot_head_len > push_len) { + rc = ena_netmap_map_single_slot(ctx->na, + slot, + adapter->tx_buf_tag, + *nm_maps, + &vaddr, + &paddr); + if (unlikely(rc != 0)) { + device_printf(adapter->pdev, + "DMA mapping error\n"); + return (rc); + } + nm_maps++; + + ena_buf->paddr = paddr + push_len; + ena_buf->len = slot->len - push_len; + ena_buf++; + + tx_info->num_of_bufs++; + } + + remaining_len -= slot->len; + + /* Save buf idx before advancing */ + *nm_buf_idx = slot->buf_idx; + nm_buf_idx++; + slot->buf_idx = 0; + + /* Advance to the next socket */ + ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); + slot = &ctx->slots[ctx->nm_i]; + nm_info->sockets_used++; + + /* + * If header is in non linear space (delta > 0), then skip mbufs + * containing header and map the last one containing both header + * and the packet data. + * The first segment is already counted in. + */ + while (delta > 0) { + __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]); + frag_len = slot->len; + + /* + * If whole segment contains header just move to the + * next one and reduce delta. + */ + if (unlikely(delta >= frag_len)) { + delta -= frag_len; + } else { + /* + * Map the data and then assign it with the + * offsets + */ + rc = ena_netmap_map_single_slot(ctx->na, + slot, + adapter->tx_buf_tag, + *nm_maps, + &vaddr, + &paddr); + if (unlikely(rc != 0)) { + device_printf(adapter->pdev, + "DMA mapping error\n"); + goto error_map; + } + nm_maps++; + + ena_buf->paddr = paddr + delta; + ena_buf->len = slot->len - delta; + ena_buf++; + + tx_info->num_of_bufs++; + delta = 0; + } + + remaining_len -= slot->len; + + /* Save buf idx before advancing */ + *nm_buf_idx = slot->buf_idx; + nm_buf_idx++; + slot->buf_idx = 0; + + /* Advance to the next socket */ + ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); + slot = &ctx->slots[ctx->nm_i]; + nm_info->sockets_used++; + } + } else { + *push_hdr = NULL; + /* + * header_len is just a hint for the device. Because netmap is + * not giving us any information about packet header length and + * it is not guaranteed that all packet headers will be in the + * 1st slot, setting header_len to 0 is making the device ignore + * this value and resolve header on it's own. + */ + *header_len = 0; + } + + /* Map all remaining data (regular routine for non-LLQ mode) */ + while (remaining_len > 0) { + __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]); + + rc = ena_netmap_map_single_slot(ctx->na, + slot, + adapter->tx_buf_tag, + *nm_maps, + &vaddr, + &paddr); + if (unlikely(rc != 0)) { + device_printf(adapter->pdev, + "DMA mapping error\n"); + goto error_map; + } + nm_maps++; + + ena_buf->paddr = paddr; + ena_buf->len = slot->len; + ena_buf++; + + tx_info->num_of_bufs++; + + remaining_len -= slot->len; + + /* Save buf idx before advancing */ + *nm_buf_idx = slot->buf_idx; + nm_buf_idx++; + slot->buf_idx = 0; + + /* Advance to the next socket */ + ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); + slot = &ctx->slots[ctx->nm_i]; + nm_info->sockets_used++; + } + + return (0); + +error_map: + ena_netmap_unmap_last_socket_chain(ctx, tx_info); + + return (rc); +} + +static void +ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx, + struct ena_tx_buffer *tx_info) +{ + struct ena_netmap_tx_info *nm_info; + int n; + + nm_info = &tx_info->nm_info; + + /** + * As the used sockets must not be equal to the buffers used in the LLQ + * mode, they must be treated separately. + * First, unmap the DMA maps. + */ + n = tx_info->num_of_bufs; + while (n--) { + netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag, + nm_info->map_seg[n]); + } + tx_info->num_of_bufs = 0; + + /* Next, retain the sockets back to the userspace */ + n = nm_info->sockets_used; + while (n--) { + ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; + ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; + nm_info->socket_buf_idx[n] = 0; + ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim); + } + nm_info->sockets_used = 0; +} + +static void +ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx) +{ + uint16_t req_id; + uint16_t total_tx_descs = 0; + + ctx->nm_i = ctx->kring->nr_hwtail; + ctx->nt = ctx->ring->next_to_clean; + + /* Reclaim buffers for completed transmissions */ + while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) { + if (validate_tx_req_id(ctx->ring, req_id) != 0) + break; + total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id); + } + + ctx->kring->nr_hwtail = ctx->nm_i; + + if (total_tx_descs > 0) { + /* acknowledge completion of sent packets */ + ctx->ring->next_to_clean = ctx->nt; + ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs); + ena_com_update_dev_comp_head(ctx->ring->ena_com_io_cq); + } +} + +static uint16_t +ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id) +{ + struct ena_tx_buffer *tx_info; + struct ena_netmap_tx_info *nm_info; + int n; + + tx_info = &ctx->ring->tx_buffer_info[req_id]; + nm_info = &tx_info->nm_info; + + /** + * As the used sockets must not be equal to the buffers used in the LLQ + * mode, they must be treated separately. + * First, unmap the DMA maps. + */ + n = tx_info->num_of_bufs; + for (n = 0; n < tx_info->num_of_bufs; n++) { + netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag, + nm_info->map_seg[n]); + } + tx_info->num_of_bufs = 0; + + /* Next, retain the sockets back to the userspace */ + for (n = 0; n < nm_info->sockets_used; n++) { + ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); + ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, + "Tx idx is not 0.\n"); + ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; + ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; + nm_info->socket_buf_idx[n] = 0; + } + nm_info->sockets_used = 0; + + ctx->ring->free_tx_ids[ctx->nt] = req_id; + ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim); + + return tx_info->tx_descs; +} + +static inline int +validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) +{ + struct ena_adapter *adapter = tx_ring->adapter; + + if (likely(req_id < tx_ring->ring_size)) + return (0); + + ena_trace(ENA_WARNING, "Invalid req_id: %hu\n", req_id); + counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); + + adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); + + return (EFAULT); +} + +static int +ena_netmap_rxsync(struct netmap_kring *kring, int flags) +{ + struct ena_netmap_ctx ctx; + int rc; + + ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id)); + ctx.ring = &ctx.adapter->rx_ring[kring->ring_id]; + + if (ctx.kring->rhead > ctx.lim) { + /* Probably not needed to release slots from RX ring. */ + return (netmap_ring_reinit(ctx.kring)); + } + + if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0)) + return (0); + + if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter))) + return (0); + + if ((rc = ena_netmap_rx_frames(&ctx)) != 0) + return (rc); + + ena_netmap_rx_cleanup(&ctx); + + return (0); +} + +static inline int +ena_netmap_rx_frames(struct ena_netmap_ctx *ctx) +{ + int rc = 0; + int frames_counter = 0; + + ctx->nt = ctx->ring->next_to_clean; + ctx->nm_i = ctx->kring->nr_hwtail; + + while((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) { + frames_counter++; + /* In case of multiple frames, it is not an error. */ + rc = 0; + if (frames_counter > ENA_MAX_FRAMES) { + device_printf(ctx->adapter->pdev, + "Driver is stuck in the Rx loop\n"); + break; + } + }; + + ctx->kring->nr_hwtail = ctx->nm_i; + ctx->kring->nr_kflags &= ~NKR_PENDINTR; + ctx->ring->next_to_clean = ctx->nt; + + return (rc); +} + +static inline int +ena_netmap_rx_frame(struct ena_netmap_ctx *ctx) +{ + struct ena_com_rx_ctx ena_rx_ctx; + int rc, len = 0; + uint16_t buf, nm; + + ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs; + ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size; + bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, + ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); + + rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx); + if (unlikely(rc != 0)) { + ena_trace(ENA_ALERT, "Too many desc from the device.\n"); + counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1); + ctx->adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; + ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, ctx->adapter); + return (rc); + } + if (unlikely(ena_rx_ctx.descs == 0)) + return (ENA_NETMAP_NO_MORE_FRAMES); + + ena_trace(ENA_NETMAP | ENA_DBG, "Rx: q %d got packet from ena. descs #:" + " %d l3 proto %d l4 proto %d hash: %x\n", ctx->ring->qid, + ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, + ena_rx_ctx.hash); + + for (buf = 0; buf < ena_rx_ctx.descs; buf++) + if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0) + break; + /* + * ena_netmap_rx_load_desc doesn't know the number of descriptors. + * It just set flag NS_MOREFRAG to all slots, then here flag of + * last slot is cleared. + */ + ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED; + + if (rc != 0) { + goto rx_clear_desc; + } + + bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, + ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); + + counter_enter(); + counter_u64_add_protected(ctx->ring->rx_stats.bytes, len); + counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len); + counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1); + counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1); + counter_exit(); + + return (ENA_NETMAP_MORE_FRAMES); + +rx_clear_desc: + nm = ctx->nm_i; + + /* Remove failed packet from ring */ + while(buf--) { + ctx->slots[nm].flags = 0; + ctx->slots[nm].len = 0; + nm = nm_prev(nm, ctx->lim); + } + + return (rc); +} + +static inline int +ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len) +{ + struct ena_rx_buffer *rx_info; + uint16_t req_id; + int rc; + + req_id = ctx->ring->ena_bufs[buf].req_id; + rc = validate_rx_req_id(ctx->ring, req_id); + if (unlikely(rc != 0)) + return (rc); + + rx_info = &ctx->ring->rx_buffer_info[req_id]; + bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map, + BUS_DMASYNC_POSTREAD); + netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map); + + ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, "Rx idx is not 0.\n"); + + ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx; + rx_info->netmap_buf_idx = 0; + /* + * Set NS_MOREFRAG to all slots. + * Then ena_netmap_rx_frame clears it from last one. + */ + ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED; + ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len; + *len += ctx->slots[ctx->nm_i].len; + ctx->ring->free_rx_ids[ctx->nt] = req_id; + ena_trace(ENA_DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", + rx_info, ctx->slots[ctx->nm_i].buf_idx, + (uintmax_t)rx_info->ena_buf.paddr, ctx->nm_i); + + ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); + ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); + + return (0); +} + +static inline void +ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx) +{ + int refill_required; + + refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur; + if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail) + refill_required -= 1; + + if (refill_required == 0) + return; + else if (refill_required < 0) + refill_required += ctx->kring->nkr_num_slots; + + ena_refill_rx_bufs(ctx->ring, refill_required); +} + +static inline void +ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx, + uint16_t ena_qid) +{ + ctx->kring = kring; + ctx->na = kring->na; + ctx->adapter = ctx->na->ifp->if_softc; + ctx->lim = kring->nkr_num_slots - 1; + ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid]; + ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid]; + ctx->slots = kring->ring->slot; +} + +void +ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map) +{ + struct netmap_adapter *na = NA(adapter->ifp); + + netmap_unload_map(na, adapter->tx_buf_tag, map); +} + +#endif /* DEV_NETMAP */ Property changes on: stable/11/sys/dev/ena/ena_netmap.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: stable/11/sys/dev/ena/ena_netmap.h =================================================================== --- stable/11/sys/dev/ena/ena_netmap.h (nonexistent) +++ stable/11/sys/dev/ena/ena_netmap.h (revision 361468) @@ -0,0 +1,60 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _ENA_NETMAP_H_ +#define _ENA_NETMAP_H_ + +/* Undef (un)likely as they are defined in netmap_kern.h */ +#ifdef likely +#undef likely +#endif /* likely */ +#ifdef unlikely +#undef unlikely +#endif /* unlikely */ + +#include +#include +#include + +int ena_netmap_attach(struct ena_adapter *); +int ena_netmap_alloc_rx_slot(struct ena_adapter *, struct ena_ring *, + struct ena_rx_buffer *); +void ena_netmap_free_rx_slot(struct ena_adapter *, struct ena_ring *, + struct ena_rx_buffer *); +bool ena_rx_ring_in_netmap(struct ena_adapter *, int); +bool ena_tx_ring_in_netmap(struct ena_adapter *, int); +void ena_netmap_reset_rx_ring(struct ena_adapter *, int); +void ena_netmap_reset_tx_ring(struct ena_adapter *, int); +void ena_netmap_unload(struct ena_adapter *, bus_dmamap_t); + +#endif /* _ENA_NETMAP_H_ */ Property changes on: stable/11/sys/dev/ena/ena_netmap.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: stable/11/sys/dev/ena/ena_sysctl.c =================================================================== --- stable/11/sys/dev/ena/ena_sysctl.c (revision 361467) +++ stable/11/sys/dev/ena/ena_sysctl.c (revision 361468) @@ -1,364 +1,375 @@ /*- * BSD LICENSE * * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "ena_sysctl.h" static void ena_sysctl_add_wd(struct ena_adapter *); static void ena_sysctl_add_stats(struct ena_adapter *); static void ena_sysctl_add_tuneables(struct ena_adapter *); static int ena_sysctl_buf_ring_size(SYSCTL_HANDLER_ARGS); static int ena_sysctl_rx_queue_size(SYSCTL_HANDLER_ARGS); static SYSCTL_NODE(_hw, OID_AUTO, ena, CTLFLAG_RD, 0, "ENA driver parameters"); /* * Logging level for changing verbosity of the output */ int ena_log_level = ENA_ALERT | ENA_WARNING; SYSCTL_INT(_hw_ena, OID_AUTO, log_level, CTLFLAG_RWTUN, &ena_log_level, 0, "Logging level indicating verbosity of the logs"); +/* + * Use 9k mbufs for the Rx buffers. Default to 0 (use page size mbufs instead). + * Using 9k mbufs in low memory conditions might cause allocation to take a lot + * of time and lead to the OS instability as it needs to look for the contiguous + * pages. + * However, page size mbufs has a bit smaller throughput than 9k mbufs, so if + * the network performance is the priority, the 9k mbufs can be used. + */ +int ena_enable_9k_mbufs = 0; +SYSCTL_INT(_hw_ena, OID_AUTO, enable_9k_mbufs, CTLFLAG_RDTUN, + &ena_enable_9k_mbufs, 0, "Use 9 kB mbufs for Rx descriptors"); void ena_sysctl_add_nodes(struct ena_adapter *adapter) { ena_sysctl_add_wd(adapter); ena_sysctl_add_stats(adapter); ena_sysctl_add_tuneables(adapter); } static void ena_sysctl_add_wd(struct ena_adapter *adapter) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = adapter->pdev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); /* Sysctl calls for Watchdog service */ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "wd_active", CTLFLAG_RWTUN, &adapter->wd_active, 0, "Watchdog is active"); SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "keep_alive_timeout", CTLFLAG_RWTUN, &adapter->keep_alive_timeout, "Timeout for Keep Alive messages"); SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "missing_tx_timeout", CTLFLAG_RWTUN, &adapter->missing_tx_timeout, "Timeout for TX completion"); SYSCTL_ADD_U32(ctx, child, OID_AUTO, "missing_tx_max_queues", CTLFLAG_RWTUN, &adapter->missing_tx_max_queues, 0, "Number of TX queues to check per run"); SYSCTL_ADD_U32(ctx, child, OID_AUTO, "missing_tx_threshold", CTLFLAG_RWTUN, &adapter->missing_tx_threshold, 0, "Max number of timeouted packets"); } static void ena_sysctl_add_stats(struct ena_adapter *adapter) { device_t dev; struct ena_ring *tx_ring; struct ena_ring *rx_ring; struct ena_hw_stats *hw_stats; struct ena_stats_dev *dev_stats; struct ena_stats_tx *tx_stats; struct ena_stats_rx *rx_stats; struct ena_com_stats_admin *admin_stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; struct sysctl_oid *queue_node, *tx_node, *rx_node, *hw_node; struct sysctl_oid *admin_node; struct sysctl_oid_list *queue_list, *tx_list, *rx_list, *hw_list; struct sysctl_oid_list *admin_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; int i; dev = adapter->pdev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); tx_ring = adapter->tx_ring; rx_ring = adapter->rx_ring; hw_stats = &adapter->hw_stats; dev_stats = &adapter->dev_stats; admin_stats = &adapter->ena_dev->admin_queue.stats; SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "wd_expired", CTLFLAG_RD, &dev_stats->wd_expired, "Watchdog expiry count"); SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_up", CTLFLAG_RD, &dev_stats->interface_up, "Network interface up count"); SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_down", CTLFLAG_RD, &dev_stats->interface_down, "Network interface down count"); SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "admin_q_pause", CTLFLAG_RD, &dev_stats->admin_q_pause, "Admin queue pauses"); for (i = 0; i < adapter->num_queues; ++i, ++tx_ring, ++rx_ring) { snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); /* TX specific stats */ tx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO, "tx_ring", CTLFLAG_RD, NULL, "TX ring"); tx_list = SYSCTL_CHILDREN(tx_node); tx_stats = &tx_ring->tx_stats; SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "count", CTLFLAG_RD, &tx_stats->cnt, "Packets sent"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "bytes", CTLFLAG_RD, &tx_stats->bytes, "Bytes sent"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "prepare_ctx_err", CTLFLAG_RD, &tx_stats->prepare_ctx_err, "TX buffer preparation failures"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "dma_mapping_err", CTLFLAG_RD, &tx_stats->dma_mapping_err, "DMA mapping failures"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "doorbells", CTLFLAG_RD, &tx_stats->doorbells, "Queue doorbells"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "missing_tx_comp", CTLFLAG_RD, &tx_stats->missing_tx_comp, "TX completions missed"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "bad_req_id", CTLFLAG_RD, &tx_stats->bad_req_id, "Bad request id count"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "mbuf_collapses", CTLFLAG_RD, &tx_stats->collapse, "Mbuf collapse count"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "mbuf_collapse_err", CTLFLAG_RD, &tx_stats->collapse_err, "Mbuf collapse failures"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_wakeups", CTLFLAG_RD, &tx_stats->queue_wakeup, "Queue wakeups"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_stops", CTLFLAG_RD, &tx_stats->queue_stop, "Queue stops"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "llq_buffer_copy", CTLFLAG_RD, &tx_stats->llq_buffer_copy, "Header copies for llq transaction"); /* RX specific stats */ rx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO, "rx_ring", CTLFLAG_RD, NULL, "RX ring"); rx_list = SYSCTL_CHILDREN(rx_node); rx_stats = &rx_ring->rx_stats; SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "count", CTLFLAG_RD, &rx_stats->cnt, "Packets received"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bytes", CTLFLAG_RD, &rx_stats->bytes, "Bytes received"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "refil_partial", CTLFLAG_RD, &rx_stats->refil_partial, "Partial refilled mbufs"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bad_csum", CTLFLAG_RD, &rx_stats->bad_csum, "Bad RX checksum"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "mbuf_alloc_fail", CTLFLAG_RD, &rx_stats->mbuf_alloc_fail, "Failed mbuf allocs"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "mjum_alloc_fail", CTLFLAG_RD, &rx_stats->mjum_alloc_fail, "Failed jumbo mbuf allocs"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "dma_mapping_err", CTLFLAG_RD, &rx_stats->dma_mapping_err, "DMA mapping errors"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bad_desc_num", CTLFLAG_RD, &rx_stats->bad_desc_num, "Bad descriptor count"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bad_req_id", CTLFLAG_RD, &rx_stats->bad_req_id, "Bad request id count"); SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "empty_rx_ring", CTLFLAG_RD, &rx_stats->empty_rx_ring, "RX descriptors depletion count"); } /* Stats read from device */ hw_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hw_stats", CTLFLAG_RD, NULL, "Statistics from hardware"); hw_list = SYSCTL_CHILDREN(hw_node); SYSCTL_ADD_COUNTER_U64(ctx, hw_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &hw_stats->rx_packets, "Packets received"); SYSCTL_ADD_COUNTER_U64(ctx, hw_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &hw_stats->tx_packets, "Packets transmitted"); SYSCTL_ADD_COUNTER_U64(ctx, hw_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &hw_stats->rx_bytes, "Bytes received"); SYSCTL_ADD_COUNTER_U64(ctx, hw_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &hw_stats->tx_bytes, "Bytes transmitted"); SYSCTL_ADD_COUNTER_U64(ctx, hw_list, OID_AUTO, "rx_drops", CTLFLAG_RD, &hw_stats->rx_drops, "Receive packet drops"); /* ENA Admin queue stats */ admin_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "admin_stats", CTLFLAG_RD, NULL, "ENA Admin Queue statistics"); admin_list = SYSCTL_CHILDREN(admin_node); SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "aborted_cmd", CTLFLAG_RD, &admin_stats->aborted_cmd, 0, "Aborted commands"); SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "sumbitted_cmd", CTLFLAG_RD, &admin_stats->submitted_cmd, 0, "Submitted commands"); SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "completed_cmd", CTLFLAG_RD, &admin_stats->completed_cmd, 0, "Completed commands"); SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "out_of_space", CTLFLAG_RD, &admin_stats->out_of_space, 0, "Queue out of space"); SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "no_completion", CTLFLAG_RD, &admin_stats->no_completion, 0, "Commands not completed"); } static void ena_sysctl_add_tuneables(struct ena_adapter *adapter) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = adapter->pdev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); /* Tuneable number of buffers in the buf-ring (drbr) */ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "buf_ring_size", CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ena_sysctl_buf_ring_size, "I", "Size of the bufring"); /* Tuneable number of Rx ring size */ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_queue_size", CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ena_sysctl_rx_queue_size, "I", "Size of the Rx ring. The size should be a power of 2. " "Max value is 8K"); } static int ena_sysctl_buf_ring_size(SYSCTL_HANDLER_ARGS) { struct ena_adapter *adapter = arg1; int val; int error; val = 0; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { val = adapter->buf_ring_size; error = sysctl_handle_int(oidp, &val, 0, req); } if (error != 0 || req->newptr == NULL) return (error); if (val < 0) return (EINVAL); device_printf(adapter->pdev, "Requested new buf ring size: %d. Old size: %d\n", val, adapter->buf_ring_size); if (val != adapter->buf_ring_size) { adapter->buf_ring_size = val; adapter->reset_reason = ENA_REGS_RESET_OS_TRIGGER; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } return (0); } static int ena_sysctl_rx_queue_size(SYSCTL_HANDLER_ARGS) { struct ena_adapter *adapter = arg1; int val; int error; val = 0; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { val = adapter->rx_ring_size; error = sysctl_handle_int(oidp, &val, 0, req); } if (error != 0 || req->newptr == NULL) return (error); if (val < 16) return (EINVAL); device_printf(adapter->pdev, "Requested new rx queue size: %d. Old size: %d\n", val, adapter->rx_ring_size); if (val != adapter->rx_ring_size) { adapter->rx_ring_size = val; adapter->reset_reason = ENA_REGS_RESET_OS_TRIGGER; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } return (0); } Index: stable/11/sys/dev/ena/ena_sysctl.h =================================================================== --- stable/11/sys/dev/ena/ena_sysctl.h (revision 361467) +++ stable/11/sys/dev/ena/ena_sysctl.h (revision 361468) @@ -1,44 +1,47 @@ /*- * BSD LICENSE * * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef ENA_SYSCTL_H #define ENA_SYSCTL_H #include #include #include "ena.h" void ena_sysctl_add_nodes(struct ena_adapter *); +extern int ena_enable_9k_mbufs; +#define ena_mbuf_sz (ena_enable_9k_mbufs ? MJUM9BYTES : MJUMPAGESIZE) + #endif /* !(ENA_SYSCTL_H) */ Index: stable/11/sys/modules/ena/Makefile =================================================================== --- stable/11/sys/modules/ena/Makefile (revision 361467) +++ stable/11/sys/modules/ena/Makefile (revision 361468) @@ -1,41 +1,42 @@ # # BSD LICENSE # # Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # $FreeBSD$ # .PATH: ${SRCTOP}/sys/dev/ena \ ${SRCTOP}/sys/contrib/ena-com KMOD = if_ena -SRCS = ena.c ena_com.c ena_eth_com.c ena_sysctl.c +SRCS = ena_com.c ena_eth_com.c +SRCS += ena.c ena_sysctl.c ena_datapath.c ena_netmap.c SRCS += device_if.h bus_if.h pci_if.h CFLAGS += -I${SRCTOP}/sys/contrib .include Index: stable/11 =================================================================== --- stable/11 (revision 361467) +++ stable/11 (revision 361468) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r354219-354225,354242,358289,360777