diff --git a/sys/dev/enic/cq_desc.h b/sys/dev/enic/cq_desc.h index ae8847c6d9a1..4fb8cce7212e 100644 --- a/sys/dev/enic/cq_desc.h +++ b/sys/dev/enic/cq_desc.h @@ -1,97 +1,82 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _CQ_DESC_H_ #define _CQ_DESC_H_ /* * Completion queue descriptor types */ enum cq_desc_types { CQ_DESC_TYPE_WQ_ENET = 0, CQ_DESC_TYPE_DESC_COPY = 1, CQ_DESC_TYPE_WQ_EXCH = 2, CQ_DESC_TYPE_RQ_ENET = 3, CQ_DESC_TYPE_RQ_FCP = 4, CQ_DESC_TYPE_IOMMU_MISS = 5, CQ_DESC_TYPE_SGL = 6, CQ_DESC_TYPE_CLASSIFIER = 7, CQ_DESC_TYPE_TEST = 127, }; /* Completion queue descriptor: 16B * * All completion queues have this basic layout. The * type_specfic area is unique for each completion * queue type. */ struct cq_desc { __le16 completed_index; __le16 q_number; u8 type_specfic[11]; u8 type_color; }; #define CQ_DESC_TYPE_BITS 4 #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) #define CQ_DESC_COLOR_MASK 1 #define CQ_DESC_COLOR_SHIFT 7 #define CQ_DESC_COLOR_MASK_NOSHIFT 0x80 #define CQ_DESC_Q_NUM_BITS 10 #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) #define CQ_DESC_COMP_NDX_BITS 12 #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) -static inline void cq_color_enc(struct cq_desc *desc, const u8 color) -{ - if (color) - desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT); - else - desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT); -} - static inline void cq_desc_enc(struct cq_desc *desc, const u8 type, const u8 color, const u16 q_number, const u16 completed_index) { desc->type_color = (type & CQ_DESC_TYPE_MASK) | ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT); desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK); desc->completed_index = cpu_to_le16(completed_index & CQ_DESC_COMP_NDX_MASK); } static inline void cq_desc_dec(const struct cq_desc *desc_arg, u8 *type, u8 *color, u16 *q_number, u16 *completed_index) { const struct cq_desc *desc = desc_arg; const u8 type_color = desc->type_color; *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; /* * Make sure color bit is read from desc *before* other fields * are read from desc. Hardware guarantees color bit is last * bit (byte) written. Adding the rmb() prevents the compiler * and/or CPU from reordering the reads which would potentially * result in reading stale values. */ rmb(); *type = type_color & CQ_DESC_TYPE_MASK; *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; *completed_index = le16_to_cpu(desc->completed_index) & CQ_DESC_COMP_NDX_MASK; } -static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color) -{ - volatile const struct cq_desc *desc = desc_arg; - - *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; -} - #endif /* _CQ_DESC_H_ */ diff --git a/sys/dev/enic/enic.h b/sys/dev/enic/enic.h index 8c2212726548..eec6de823c9d 100644 --- a/sys/dev/enic/enic.h +++ b/sys/dev/enic/enic.h @@ -1,403 +1,391 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _ENIC_H #define _ENIC_H #include #include #include #include #include #include #include #include #include #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define u64 uint64_t struct enic_bar_info { struct resource *res; bus_space_tag_t tag; bus_space_handle_t handle; bus_size_t size; int rid; int offset; }; #define ENIC_BUS_WRITE_8(res, index, value) \ bus_space_write_8(res->bar.tag, res->bar.handle, \ res->bar.offset + (index), value) #define ENIC_BUS_WRITE_4(res, index, value) \ bus_space_write_4(res->bar.tag, res->bar.handle, \ res->bar.offset + (index), value) #define ENIC_BUS_WRITE_REGION_4(res, index, values, count) \ bus_space_write_region_4(res->bar.tag, res->bar.handle, \ res->bar.offset + (index), values, count); #define ENIC_BUS_READ_8(res, index) \ bus_space_read_8(res->bar.tag, res->bar.handle, \ res->bar.offset + (index)) #define ENIC_BUS_READ_4(res, index) \ bus_space_read_4(res->bar.tag, res->bar.handle, \ res->bar.offset + (index)) #define ENIC_BUS_READ_REGION_4(res, type, index, values, count) \ bus_space_read_region_4(res->type.tag, res->type.handle, \ res->type.offset + (index), values, count); struct vnic_res { unsigned int count; struct enic_bar_info bar; }; #include "vnic_enet.h" #include "vnic_dev.h" #include "vnic_wq.h" #include "vnic_rq.h" #include "vnic_cq.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_nic.h" #include "vnic_rss.h" #include "enic_res.h" #include "cq_enet_desc.h" #define ENIC_LOCK(_softc) mtx_lock(&(_softc)->enic_lock) #define ENIC_UNLOCK(_softc) mtx_unlock(&(_softc)->enic_lock) #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC" #define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc" #define ENIC_MAX_MAC_ADDR 64 #define VLAN_ETH_HLEN 18 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) #define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */ #define ENIC_CALC_IP_CKSUM 1 #define ENIC_CALC_TCP_UDP_CKSUM 2 #define ENIC_MAX_MTU 9000 #define ENIC_PAGE_SIZE 4096 #define PAGE_ROUND_UP(x) \ ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1))) /* must be >= VNIC_COUNTER_DMA_MIN_PERIOD */ #define VNIC_FLOW_COUNTER_UPDATE_MSECS 500 /* PCI IDs */ #define CISCO_VENDOR_ID 0x1137 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ /* Special Filter id for non-specific packet flagging. Don't change value */ #define ENIC_MAGIC_FILTER_ID 0xffff #define ENICPMD_FDIR_MAX 64 /* HW default VXLAN port */ #define ENIC_DEFAULT_VXLAN_PORT 4789 /* - * Interrupt 0: LSC and errors * Interrupt 1: rx queue 0 * Interrupt 2: rx queue 1 * ... + * Interrupt x: LSC and errors */ #define ENICPMD_LSC_INTR_OFFSET 0 -#define ENICPMD_RXQ_INTR_OFFSET 1 +#define ENICPMD_RXQ_INTR_OFFSET 0 #include "vnic_devcmd.h" enum vnic_proxy_type { PROXY_NONE, PROXY_BY_BDF, PROXY_BY_INDEX, }; struct vnic_intr_coal_timer_info { u32 mul; u32 div; u32 max_usec; }; struct enic_softc; struct vnic_dev { void *priv; struct rte_pci_device *pdev; struct vnic_res res[RES_TYPE_MAX]; enum vnic_dev_intr_mode intr_mode; struct vnic_res __iomem *devcmd; struct vnic_devcmd_notify *notify; struct vnic_devcmd_notify notify_copy; bus_addr_t notify_pa; struct iflib_dma_info notify_res; u32 notify_sz; struct iflib_dma_info linkstatus_res; struct vnic_stats *stats; struct iflib_dma_info stats_res; struct vnic_devcmd_fw_info *fw_info; struct iflib_dma_info fw_info_res; enum vnic_proxy_type proxy; u32 proxy_index; u64 args[VNIC_DEVCMD_NARGS]; int in_reset; struct vnic_intr_coal_timer_info intr_coal_timer_info; + struct devcmd2_controller *devcmd2; + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); void *(*alloc_consistent)(void *priv, size_t size, bus_addr_t *dma_handle, struct iflib_dma_info *res, u8 *name); void (*free_consistent)(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle, struct iflib_dma_info *res); struct vnic_counter_counts *flow_counters; struct iflib_dma_info flow_counters_res; u8 flow_counters_dma_active; struct enic_softc *softc; }; struct enic_soft_stats { uint64_t rx_nombuf; uint64_t rx_packet_errors; uint64_t tx_oversized; }; struct intr_queue { struct if_irq intr_irq; struct resource *res; int rid; struct enic_softc *softc; }; +#define ENIC_MAX_LINK_SPEEDS 3 +#define ENIC_LINK_SPEED_10G 10000 +#define ENIC_LINK_SPEED_4G 4000 +#define ENIC_LINK_40G_INDEX 2 +#define ENIC_LINK_10G_INDEX 1 +#define ENIC_LINK_4G_INDEX 0 +#define ENIC_RX_COALESCE_RANGE_END 125 +#define ENIC_AIC_TS_BREAK 100 + +struct enic_rx_coal { + u32 small_pkt_range_start; + u32 large_pkt_range_start; + u32 range_end; + u32 use_adaptive_rx_coalesce; +}; + +/* Store only the lower range. Higher range is given by fw. */ +struct enic_intr_mod_range { + u32 small_pkt_range_start; + u32 large_pkt_range_start; +}; + struct enic { struct enic *next; struct rte_pci_device *pdev; struct vnic_enet_config config; struct vnic_dev_bar bar0; struct vnic_dev *vdev; /* * mbuf_initializer contains 64 bits of mbuf rearm_data, used by * the avx2 handler at this time. */ uint64_t mbuf_initializer; unsigned int port_id; bool overlay_offload; char bdf_name[ENICPMD_BDF_LENGTH]; int dev_fd; int iommu_group_fd; int iommu_groupid; int eventfd; uint8_t mac_addr[ETH_ALEN]; pthread_t err_intr_thread; u8 ig_vlan_strip_en; int link_status; u8 hw_ip_checksum; u16 max_mtu; u8 adv_filters; u32 flow_filter_mode; u8 filter_actions; /* HW supported actions */ bool vxlan; bool disable_overlay; /* devargs disable_overlay=1 */ uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */ bool nic_cfg_chk; /* NIC_CFG_CHK available */ bool udp_rss_weak; /* Bodega style UDP RSS */ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */ uint16_t vxlan_port; /* current vxlan port pushed to NIC */ unsigned int flags; unsigned int priv_flags; /* work queue (len = conf_wq_count) */ struct vnic_wq *wq; unsigned int wq_count; /* equals eth_dev nb_tx_queues */ /* receive queue (len = conf_rq_count) */ struct vnic_rq *rq; unsigned int rq_count; /* equals eth_dev nb_rx_queues */ /* completion queue (len = conf_cq_count) */ struct vnic_cq *cq; unsigned int cq_count; /* equals rq_count + wq_count */ /* interrupt vectors (len = conf_intr_count) */ struct vnic_intr *intr; struct intr_queue *intr_queues; unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */ /* software counters */ struct enic_soft_stats soft_stats; /* configured resources on vic */ unsigned int conf_rq_count; unsigned int conf_wq_count; unsigned int conf_cq_count; unsigned int conf_intr_count; /* linked list storing memory allocations */ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list; LIST_HEAD(enic_flows, rte_flow) flows; int max_flow_counter; /* RSS */ uint16_t reta_size; uint8_t hash_key_size; uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */ /* * Keep a copy of current RSS config for queries, as we cannot retrieve * it from the NIC. */ uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */ uint8_t rss_enable; uint64_t rss_hf; /* ETH_RSS flags */ union vnic_rss_key rss_key; union vnic_rss_cpu rss_cpu; uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */ uint64_t tx_offload_mask; /* PKT_TX flags accepted */ struct enic_softc *softc; int port_mtu; + struct enic_rx_coal rx_coalesce_setting; + u32 rx_coalesce_usecs; + u32 tx_coalesce_usecs; }; struct enic_softc { device_t dev; if_ctx_t ctx; if_softc_ctx_t scctx; if_shared_ctx_t sctx; struct ifmedia *media; if_t ifp; struct mtx enic_lock; struct enic_bar_info mem; struct enic_bar_info io; struct vnic_dev vdev; struct enic enic; int ntxqsets; int nrxqsets; struct if_irq enic_event_intr_irq; struct if_irq enic_err_intr_irq; uint8_t lladdr[ETHER_ADDR_LEN]; int link_active; int stopped; uint8_t mac_addr[ETHER_ADDR_LEN]; int directed; int multicast; int broadcast; int promisc; int allmulti; u_int mc_count; uint8_t *mta; }; /* Per-instance private data structure */ -static inline unsigned int enic_vnic_rq_count(struct enic *enic) -{ - return enic->rq_count; -} - static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) { return rq; } static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) { return enic->rq_count + wq; } -static inline uint32_t -enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -{ - uint32_t d = i0 + i1; - d -= (d >= n_descriptors) ? n_descriptors : 0; - return d; -} - -static inline uint32_t -enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -{ - int32_t d = i1 - i0; - return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d); -} - static inline uint32_t enic_ring_incr(uint32_t n_descriptors, uint32_t idx) { idx++; if (unlikely(idx == n_descriptors)) idx = 0; return idx; } -void enic_free_wq(void *txq); -int enic_alloc_intr_resources(struct enic *enic); int enic_setup_finish(struct enic *enic); -int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - unsigned int socket_id, uint16_t nb_desc); void enic_start_wq(struct enic *enic, uint16_t queue_idx); int enic_stop_wq(struct enic *enic, uint16_t queue_idx); void enic_start_rq(struct enic *enic, uint16_t queue_idx); -void enic_free_rq(void *rxq); -int enic_set_vnic_res(struct enic *enic); -int enic_init_rss_nic_cfg(struct enic *enic); -int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu); -int enic_set_vlan_strip(struct enic *enic); +int enic_stop_rq(struct enic *enic, uint16_t queue_idx); +void enic_dev_disable(struct enic *enic); int enic_enable(struct enic *enic); int enic_disable(struct enic *enic); -void enic_remove(struct enic *enic); -int enic_get_link_status(struct enic *enic); -void enic_dev_stats_clear(struct enic *enic); -void enic_add_packet_filter(struct enic *enic); -int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); -int enic_del_mac_address(struct enic *enic, int mac_index); -unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); - -void enic_post_wq_index(struct vnic_wq *wq); -int enic_probe(struct enic *enic); -int enic_clsf_init(struct enic *enic); -void enic_clsf_destroy(struct enic *enic); -int enic_set_mtu(struct enic *enic, uint16_t new_mtu); int enic_link_update(struct enic *enic); bool enic_use_vector_rx_handler(struct enic *enic); void enic_fdir_info(struct enic *enic); void enic_prep_wq_for_simple_tx(struct enic *, uint16_t); struct enic_ring { uint64_t paddr; caddr_t vaddr; struct enic_softc *softc; uint32_t ring_size; /* Must be a power of two */ uint16_t id; /* Logical ID */ uint16_t phys_id; }; struct enic_cp_ring { struct enic_ring ring; struct if_irq irq; uint32_t cons; bool v_bit; /* Value of valid bit */ struct ctx_hw_stats *stats; uint32_t stats_ctx_id; uint32_t last_idx; /* Used by RX rings only * set to the last read pidx */ }; #endif /* _ENIC_H_ */ diff --git a/sys/dev/enic/enic_res.c b/sys/dev/enic/enic_res.c index d264874557a0..413873ad0fb4 100644 --- a/sys/dev/enic/enic_res.c +++ b/sys/dev/enic/enic_res.c @@ -1,212 +1,212 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "enic_compat.h" #include "wq_enet_desc.h" #include "rq_enet_desc.h" #include "cq_enet_desc.h" #include "vnic_resource.h" #include "vnic_enet.h" #include "vnic_dev.h" #include "vnic_wq.h" #include "vnic_rq.h" #include "vnic_cq.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_nic.h" #include "vnic_rss.h" #include "enic_res.h" #include "enic.h" int enic_get_vnic_config(struct enic *enic) { struct vnic_enet_config *c = &enic->config; int err; err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); if (err) { dev_err(enic_get_dev(enic), "Error getting MAC addr, %d\n", err); return err; } #define GET_CONFIG(m) \ do { \ err = vnic_dev_spec(enic->vdev, \ offsetof(struct vnic_enet_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ dev_err(enic_get_dev(enic), \ "Error getting %s, %d\n", #m, err); \ return err; \ } \ } while (0) GET_CONFIG(flags); GET_CONFIG(wq_desc_count); GET_CONFIG(rq_desc_count); GET_CONFIG(mtu); GET_CONFIG(intr_timer_type); GET_CONFIG(intr_mode); GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); GET_CONFIG(num_arfs); GET_CONFIG(max_pkt_size); /* max packet size is only defined in newer VIC firmware * and will be 0 for legacy firmware and VICs */ if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE) enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4); else enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4); if (c->mtu == 0) c->mtu = 1500; enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev); err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode, &enic->filter_actions); if (err) { dev_err(enic_get_dev(enic), "Error getting filter modes, %d\n", err); return err; } vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk, &enic->udp_rss_weak); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count)); c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c->rq_desc_count = min_t(u32, ENIC_MAX_RQ_DESCS, max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " - "wq/rq %d/%d mtu d, max mtu:%d\n", + "wq/rq %d/%d mtu %d, max mtu:%d\n", enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], c->wq_desc_count, c->rq_desc_count, - /* enic->rte_dev->data->mtu, */ enic->max_mtu); + c->mtu, enic->max_mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "rss %s intr mode %s type %s timer %d usec " "loopback tag 0x%04x\n", ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", ENIC_SETTING(enic, RSS) ? (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" : ((enic->udp_rss_weak ? "+udp" : "yes"))) : "no", c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : c->intr_mode == VENET_INTR_MODE_ANY ? "any" : "unknown", c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : "unknown", c->intr_timer_usec, c->loop_tag); /* RSS settings from vNIC */ enic->reta_size = ENIC_RSS_RETA_SIZE; enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE; enic->flow_type_rss_offloads = 0; /* Zero offloads if RSS is not enabled */ if (!ENIC_SETTING(enic, RSS)) enic->flow_type_rss_offloads = 0; enic->vxlan = ENIC_SETTING(enic, VXLAN) && vnic_dev_capable_vxlan(enic->vdev); /* * Default hardware capabilities. enic_dev_init() may add additional * flags if it enables overlay offloads. */ enic->tx_queue_offload_capa = 0; return 0; } int enic_add_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); if (err) dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); return err; } int enic_del_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); if (err) dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); return err; } int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en) { enum vnic_devcmd_cmd cmd; u64 a0, a1; u32 nic_cfg; int wait = 1000; vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); a0 = nic_cfg; a1 = 0; cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG; return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait); } void enic_get_res_counts(struct enic *enic) { enic->conf_wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); enic->conf_rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); enic->conf_cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); enic->conf_intr_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL); dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", enic->conf_wq_count, enic->conf_rq_count, enic->conf_cq_count, enic->conf_intr_count); enic->conf_rq_count = min(enic->conf_rq_count, enic->conf_wq_count); enic->conf_wq_count = enic->conf_rq_count; enic->conf_cq_count = enic->conf_rq_count + enic->conf_wq_count; dev_info(enic_get_dev(enic), "vNIC resources iflib: wq %d rq %d cq %d intr %d\n", enic->conf_wq_count, enic->conf_rq_count, enic->conf_cq_count, enic->conf_intr_count); dev_info(enic_get_dev(enic), "vNIC resources avail: wq_desc %d rq_desc %d\n", enic->config.wq_desc_count, enic->config.rq_desc_count); enic->wq_count = enic->conf_wq_count; enic->rq_count = enic->conf_rq_count; enic->cq_count = enic->conf_cq_count; } diff --git a/sys/dev/enic/enic_res.h b/sys/dev/enic/enic_res.h index 1a6f3a3ca98f..82963e61a44f 100644 --- a/sys/dev/enic/enic_res.h +++ b/sys/dev/enic/enic_res.h @@ -1,73 +1,71 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _ENIC_RES_H_ #define _ENIC_RES_H_ #include "wq_enet_desc.h" #include "rq_enet_desc.h" #include "vnic_wq.h" #include "vnic_rq.h" #define ENIC_MIN_WQ_DESCS 64 #define ENIC_MAX_WQ_DESCS 4096 #define ENIC_MIN_RQ_DESCS 64 #define ENIC_MAX_RQ_DESCS 4096 #define ENIC_MAX_MULTICAST_ADDRESSES 32 /* A descriptor ring has a multiple of 32 descriptors */ #define ENIC_ALIGN_DESCS 32 #define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1) /* Request a completion index every 32 buffers (roughly packets) */ #define ENIC_WQ_CQ_THRESH 32 #define ENIC_MIN_MTU 68 /* Does not include (possible) inserted VLAN tag and FCS */ #define ENIC_DEFAULT_RX_MAX_PKT_SIZE 9022 /* Does not include (possible) inserted VLAN tag and FCS */ #define ENIC_TX_MAX_PKT_SIZE 9208 #define ENIC_MULTICAST_PERFECT_FILTERS 32 #define ENIC_UNICAST_PERFECT_FILTERS 32 #define ENIC_NON_TSO_MAX_DESC 16 #define ENIC_DEFAULT_RX_FREE_THRESH 32 #define ENIC_TX_XMIT_MAX 64 #define ENIC_RX_BURST_MAX 64 /* Defaults for dev_info.default_{rx,tx}portconf */ #define ENIC_DEFAULT_RX_BURST 32 #define ENIC_DEFAULT_RX_RINGS 1 #define ENIC_DEFAULT_RX_RING_SIZE 512 #define ENIC_DEFAULT_TX_BURST 32 #define ENIC_DEFAULT_TX_RINGS 1 #define ENIC_DEFAULT_TX_RING_SIZE 512 #define ENIC_RSS_DEFAULT_CPU 0 #define ENIC_RSS_BASE_CPU 0 #define ENIC_RSS_HASH_BITS 7 #define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS) #define ENIC_RSS_HASH_KEY_SIZE 40 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) struct enic; int enic_get_vnic_config(struct enic *); int enic_add_vlan(struct enic *enic, u16 vlanid); int enic_del_vlan(struct enic *enic, u16 vlanid); int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en); void enic_get_res_counts(struct enic *enic); void enic_init_vnic_resources(struct enic *enic); -int enic_alloc_vnic_resources(struct enic *); -void enic_free_vnic_resources(struct enic *); #endif /* _ENIC_RES_H_ */ diff --git a/sys/dev/enic/enic_txrx.c b/sys/dev/enic/enic_txrx.c index 5a557fc7f94a..169041587d06 100644 --- a/sys/dev/enic/enic_txrx.c +++ b/sys/dev/enic/enic_txrx.c @@ -1,485 +1,510 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "enic.h" #include "opt_inet.h" #include "opt_inet6.h" static int enic_isc_txd_encap(void *, if_pkt_info_t); static void enic_isc_txd_flush(void *, uint16_t, qidx_t); static int enic_isc_txd_credits_update(void *, uint16_t, bool); static int enic_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); static int enic_isc_rxd_pkt_get(void *, if_rxd_info_t); static void enic_isc_rxd_refill(void *, if_rxd_update_t); static void enic_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); static int enic_legacy_intr(void *); static void enic_initial_post_rx(struct enic *, struct vnic_rq *); static int enic_wq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16, void *); static int enic_rq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16, void *); struct if_txrx enic_txrx = { .ift_txd_encap = enic_isc_txd_encap, .ift_txd_flush = enic_isc_txd_flush, .ift_txd_credits_update = enic_isc_txd_credits_update, .ift_rxd_available = enic_isc_rxd_available, .ift_rxd_pkt_get = enic_isc_rxd_pkt_get, .ift_rxd_refill = enic_isc_rxd_refill, .ift_rxd_flush = enic_isc_rxd_flush, .ift_legacy_intr = enic_legacy_intr }; static int enic_isc_txd_encap(void *vsc, if_pkt_info_t pi) { struct enic_softc *softc; struct enic *enic; struct vnic_wq *wq; int nsegs; int i; struct wq_enet_desc *desc; uint64_t bus_addr; uint16_t mss = 7; uint16_t header_len = 0; uint8_t offload_mode = 0; uint8_t eop = 0, cq; uint8_t vlan_tag_insert = 0; unsigned short vlan_id = 0; unsigned int wq_desc_avail; int head_idx; unsigned int desc_count, data_len; softc = vsc; enic = &softc->enic; + if_softc_ctx_t scctx = softc->scctx; wq = &enic->wq[pi->ipi_qsidx]; nsegs = pi->ipi_nsegs; ENIC_LOCK(softc); wq_desc_avail = vnic_wq_desc_avail(wq); head_idx = wq->head_idx; desc_count = wq->ring.desc_count; + if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) + offload_mode |= WQ_ENET_OFFLOAD_MODE_CSUM; + for (i = 0; i < nsegs; i++) { eop = 0; cq = 0; wq->cq_pend++; if (i + 1 == nsegs) { eop = 1; cq = 1; wq->cq_pend = 0; } desc = wq->ring.descs; bus_addr = pi->ipi_segs[i].ds_addr; data_len = pi->ipi_segs[i].ds_len; wq_enet_desc_enc(&desc[head_idx], bus_addr, data_len, mss, header_len, offload_mode, eop, cq, 0, vlan_tag_insert, vlan_id, 0); head_idx = enic_ring_incr(desc_count, head_idx); wq_desc_avail--; } wq->ring.desc_avail = wq_desc_avail; wq->head_idx = head_idx; pi->ipi_new_pidx = head_idx; ENIC_UNLOCK(softc); return (0); } static void enic_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx) { struct enic_softc *softc; struct enic *enic; struct vnic_wq *wq; int head_idx; softc = vsc; enic = &softc->enic; ENIC_LOCK(softc); wq = &enic->wq[txqid]; head_idx = wq->head_idx; ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, head_idx); ENIC_UNLOCK(softc); } static int enic_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear) { struct enic_softc *softc; struct enic *enic; struct vnic_wq *wq; struct vnic_cq *cq; int processed; unsigned int cq_wq; unsigned int wq_work_to_do = 10; unsigned int wq_work_avail; softc = vsc; enic = &softc->enic; wq = &softc->enic.wq[txqid]; cq_wq = enic_cq_wq(enic, txqid); cq = &enic->cq[cq_wq]; ENIC_LOCK(softc); wq_work_avail = vnic_cq_work(cq, wq_work_to_do); ENIC_UNLOCK(softc); if (wq_work_avail == 0) return (0); if (!clear) return (1); ENIC_LOCK(softc); vnic_cq_service(cq, wq_work_to_do, enic_wq_service, NULL); processed = wq->processed; wq->processed = 0; ENIC_UNLOCK(softc); return (processed); } static int enic_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct enic_softc *softc; struct enic *enic; struct vnic_cq *cq; unsigned int rq_work_to_do = budget; unsigned int rq_work_avail = 0; unsigned int cq_rq; softc = vsc; enic = &softc->enic; cq_rq = enic_cq_rq(&softc->enic, rxqid); cq = &enic->cq[cq_rq]; rq_work_avail = vnic_cq_work(cq, rq_work_to_do); return rq_work_avail; } static int enic_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri) { struct enic_softc *softc; struct enic *enic; struct vnic_cq *cq; unsigned int rq_work_to_do = 1; unsigned int rq_work_done = 0; unsigned int cq_rq; softc = vsc; enic = &softc->enic; cq_rq = enic_cq_rq(&softc->enic, ri->iri_qsidx); cq = &enic->cq[cq_rq]; ENIC_LOCK(softc); rq_work_done = vnic_cq_service(cq, rq_work_to_do, enic_rq_service, ri); if (rq_work_done != 0) { vnic_intr_return_credits(&enic->intr[cq_rq], rq_work_done, 0, 1); ENIC_UNLOCK(softc); return (0); } else { ENIC_UNLOCK(softc); return (-1); } } static void enic_isc_rxd_refill(void *vsc, if_rxd_update_t iru) { struct enic_softc *softc; struct vnic_rq *rq; struct rq_enet_desc *rqd; uint64_t *paddrs; int count; uint32_t pidx; int len; int idx; int i; count = iru->iru_count; len = iru->iru_buf_size; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; softc = vsc; rq = &softc->enic.rq[iru->iru_qsidx]; rqd = rq->ring.descs; idx = pidx; for (i = 0; i < count; i++, idx++) { if (idx == rq->ring.desc_count) idx = 0; rq_enet_desc_enc(&rqd[idx], paddrs[i], RQ_ENET_TYPE_ONLY_SOP, len); } rq->in_use = 1; if (rq->need_initial_post) { ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, 0); } enic_initial_post_rx(&softc->enic, rq); } static void enic_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx) { struct enic_softc *softc; struct vnic_rq *rq; softc = vsc; rq = &softc->enic.rq[rxqid]; /* * pidx is the index of the last descriptor with a buffer the device * can use, and the device needs to be told which index is one past * that. */ ENIC_LOCK(softc); ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, pidx); ENIC_UNLOCK(softc); } static int enic_legacy_intr(void *xsc) { - return -1; + return (1); } static inline void vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service) (struct vnic_wq *wq, struct cq_desc *cq_desc, /* struct vnic_wq_buf * *buf, */ void *opaque), void *opaque) { int processed; processed = completed_index - wq->ring.last_count; if (processed < 0) processed += wq->ring.desc_count; if (processed == 0) processed++; wq->ring.desc_avail += processed; wq->processed += processed; wq->ring.last_count = completed_index; } /* * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has * allocated the buffers and filled the RQ descriptor ring. Just need to push * the post index to the NIC. */ static void enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) { struct enic_softc *softc = enic->softc; if (!rq->in_use || !rq->need_initial_post) return; ENIC_LOCK(softc); /* make sure all prior writes are complete before doing the PIO write */ /* Post all but the last buffer to VIC. */ rq->posted_index = rq->ring.desc_count - 1; rq->rx_nb_hold = 0; ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, rq->posted_index); rq->need_initial_post = false; ENIC_UNLOCK(softc); } static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); vnic_wq_service(&enic->wq[q_number], cq_desc, completed_index, NULL, opaque); - return 0; + return (0); } static void vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc, u16 in_completed_index, int desc_return, void(*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc, /* struct vnic_rq_buf * *buf, */ int skipped, void *opaque), void *opaque) { - + if_softc_ctx_t scctx; if_rxd_info_t ri = (if_rxd_info_t) opaque; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; int cqidx; if_rxd_frag_t frag; + scctx = rq->vdev->softc->scctx; + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); cqidx = ri->iri_cidx; frag = &ri->iri_frags[0]; frag->irf_idx = cqidx; frag->irf_len = bytes_written; if (++cqidx == rq->ring.desc_count) { cqidx = 0; } ri->iri_cidx = cqidx; ri->iri_nfrags = 1; ri->iri_len = bytes_written; + + if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) + if (!csum_not_calc && (tcp_udp_csum_ok || ipv4_csum_ok)) { + ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); + } } static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); if_rxd_info_t ri = (if_rxd_info_t) opaque; vnic_rq_service(&enic->rq[ri->iri_qsidx], cq_desc, completed_index, VNIC_RQ_RETURN_DESC, NULL, /* enic_rq_indicate_buf, */ opaque); - return 0; + return (0); } void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx) { struct wq_enet_desc *desc; struct vnic_wq *wq; unsigned int i; /* * Fill WQ descriptor fields that never change. Every descriptor is * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH * descriptors (i.e. request one completion update every 32 packets). */ wq = &enic->wq[queue_idx]; desc = (struct wq_enet_desc *)wq->ring.descs; for (i = 0; i < wq->ring.desc_count; i++, desc++) { desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT; if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1) desc->header_length_flags |= (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT); } } void enic_start_wq(struct enic *enic, uint16_t queue_idx) { vnic_wq_enable(&enic->wq[queue_idx]); } int enic_stop_wq(struct enic *enic, uint16_t queue_idx) { int ret; ret = vnic_wq_disable(&enic->wq[queue_idx]); - if (ret) - return ret; - return 0; + return (ret); } void enic_start_rq(struct enic *enic, uint16_t queue_idx) { struct vnic_rq *rq; rq = &enic->rq[queue_idx]; vnic_rq_enable(rq); enic_initial_post_rx(enic, rq); } + +int +enic_stop_rq(struct enic *enic, uint16_t queue_idx) +{ + int ret; + + ret = vnic_rq_disable(&enic->rq[queue_idx]); + + return (ret); +} + + +void +enic_dev_disable(struct enic *enic) { + vnic_dev_disable(enic->vdev); +} diff --git a/sys/dev/enic/if_enic.c b/sys/dev/enic/if_enic.c index dc0c0d028e20..26776244778e 100644 --- a/sys/dev/enic/if_enic.c +++ b/sys/dev/enic/if_enic.c @@ -1,1594 +1,1733 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "enic.h" #include "opt_inet.h" #include "opt_inet6.h" static SYSCTL_NODE(_hw, OID_AUTO, enic, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "ENIC"); static const pci_vendor_info_t enic_vendor_info_array[] = { PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET, DRV_DESCRIPTION), PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF, DRV_DESCRIPTION " VF"), /* required last entry */ PVID_END }; static void *enic_register(device_t); static int enic_attach_pre(if_ctx_t); static int enic_msix_intr_assign(if_ctx_t, int); static int enic_attach_post(if_ctx_t); static int enic_detach(if_ctx_t); static int enic_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static int enic_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static void enic_queues_free(if_ctx_t); static int enic_rxq_intr(void *); static int enic_event_intr(void *); static int enic_err_intr(void *); static void enic_stop(if_ctx_t); static void enic_init(if_ctx_t); static void enic_multi_set(if_ctx_t); static int enic_mtu_set(if_ctx_t, uint32_t); static void enic_media_status(if_ctx_t, struct ifmediareq *); static int enic_media_change(if_ctx_t); static int enic_promisc_set(if_ctx_t, int); static uint64_t enic_get_counter(if_ctx_t, ift_counter); static void enic_update_admin_status(if_ctx_t); static void enic_txq_timer(if_ctx_t, uint16_t); static int enic_link_is_up(struct enic_softc *); static void enic_link_status(struct enic_softc *); static void enic_set_lladdr(struct enic_softc *); static void enic_setup_txq_sysctl(struct vnic_wq *, int, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void enic_setup_rxq_sysctl(struct vnic_rq *, int, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void enic_setup_sysctl(struct enic_softc *); static int enic_tx_queue_intr_enable(if_ctx_t, uint16_t); static int enic_rx_queue_intr_enable(if_ctx_t, uint16_t); static void enic_enable_intr(struct enic_softc *, int); static void enic_disable_intr(struct enic_softc *, int); static void enic_intr_enable_all(if_ctx_t); static void enic_intr_disable_all(if_ctx_t); static int enic_dev_open(struct enic *); static int enic_dev_init(struct enic *); static void *enic_alloc_consistent(void *, size_t, bus_addr_t *, struct iflib_dma_info *, u8 *); static void enic_free_consistent(void *, size_t, void *, bus_addr_t, struct iflib_dma_info *); static int enic_pci_mapping(struct enic_softc *); static void enic_pci_mapping_free(struct enic_softc *); static int enic_dev_wait(struct vnic_dev *, int (*) (struct vnic_dev *, int), int (*) (struct vnic_dev *, int *), int arg); static int enic_map_bar(struct enic_softc *, struct enic_bar_info *, int, bool); static void enic_update_packet_filter(struct enic *enic); static bool enic_if_needs_restart(if_ctx_t, enum iflib_restart_event); typedef enum { ENIC_BARRIER_RD, ENIC_BARRIER_WR, ENIC_BARRIER_RDWR, } enic_barrier_t; static device_method_t enic_methods[] = { /* Device interface */ DEVMETHOD(device_register, enic_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t enic_driver = { "enic", enic_methods, sizeof(struct enic_softc) }; DRIVER_MODULE(enic, pci, enic_driver, 0, 0); IFLIB_PNP_INFO(pci, enic, enic_vendor_info_array); MODULE_VERSION(enic, 2); MODULE_DEPEND(enic, pci, 1, 1, 1); MODULE_DEPEND(enic, ether, 1, 1, 1); MODULE_DEPEND(enic, iflib, 1, 1, 1); static device_method_t enic_iflib_methods[] = { DEVMETHOD(ifdi_tx_queues_alloc, enic_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, enic_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, enic_queues_free), DEVMETHOD(ifdi_attach_pre, enic_attach_pre), DEVMETHOD(ifdi_attach_post, enic_attach_post), DEVMETHOD(ifdi_detach, enic_detach), DEVMETHOD(ifdi_init, enic_init), DEVMETHOD(ifdi_stop, enic_stop), DEVMETHOD(ifdi_multi_set, enic_multi_set), DEVMETHOD(ifdi_mtu_set, enic_mtu_set), DEVMETHOD(ifdi_media_status, enic_media_status), DEVMETHOD(ifdi_media_change, enic_media_change), DEVMETHOD(ifdi_promisc_set, enic_promisc_set), DEVMETHOD(ifdi_get_counter, enic_get_counter), DEVMETHOD(ifdi_update_admin_status, enic_update_admin_status), DEVMETHOD(ifdi_timer, enic_txq_timer), DEVMETHOD(ifdi_tx_queue_intr_enable, enic_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, enic_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_enable, enic_intr_enable_all), DEVMETHOD(ifdi_intr_disable, enic_intr_disable_all), DEVMETHOD(ifdi_msix_intr_assign, enic_msix_intr_assign), DEVMETHOD(ifdi_needs_restart, enic_if_needs_restart), DEVMETHOD_END }; static driver_t enic_iflib_driver = { "enic", enic_iflib_methods, sizeof(struct enic_softc) }; extern struct if_txrx enic_txrx; static struct if_shared_ctx enic_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = 512, .isc_tx_maxsize = ENIC_TX_MAX_PKT_SIZE, .isc_tx_maxsegsize = PAGE_SIZE, /* * These values are used to configure the busdma tag used for receive * descriptors. Each receive descriptor only points to one buffer. */ .isc_rx_maxsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE, /* One buf per * descriptor */ .isc_rx_nsegments = 1, /* One mapping per descriptor */ .isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE, - .isc_admin_intrcnt = 3, + .isc_admin_intrcnt = 2, .isc_vendor_info = enic_vendor_info_array, .isc_driver_version = "1", .isc_driver = &enic_iflib_driver, - .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ, + .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_SKIP_MSIX, /* * Number of receive queues per receive queue set, with associated * descriptor settings for each. */ .isc_nrxqs = 2, .isc_nfl = 1, /* one free list for each receive command * queue */ .isc_nrxd_min = {16, 16}, .isc_nrxd_max = {2048, 2048}, .isc_nrxd_default = {64, 64}, /* * Number of transmit queues per transmit queue set, with associated * descriptor settings for each. */ .isc_ntxqs = 2, .isc_ntxd_min = {16, 16}, .isc_ntxd_max = {2048, 2048}, .isc_ntxd_default = {64, 64}, }; static void * enic_register(device_t dev) { return (&enic_sctx_init); } +static int +enic_allocate_msix(struct enic_softc *softc) { + if_ctx_t ctx; + if_softc_ctx_t scctx; + if_shared_ctx_t sctx; + device_t dev; + cpuset_t cpus; + int queues, vectors, requested; + int err = 0; + + dev = softc->dev; + ctx = softc->ctx; + scctx = softc->scctx; + sctx = iflib_get_sctx(ctx); + + if (bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus) != 0) { + device_printf(dev, "Unable to fetch CPU list\n"); + CPU_COPY(&all_cpus, &cpus); + } + + + queues = CPU_COUNT(&cpus); + queues = imin(queues, scctx->isc_nrxqsets); + queues = imin(queues, scctx->isc_ntxqsets); + requested = queues * 2 + sctx->isc_admin_intrcnt; + scctx->isc_nrxqsets = queues; + scctx->isc_ntxqsets = queues; + + vectors = requested; + if ((err = pci_alloc_msix(dev, &vectors)) != 0) { + device_printf(dev, + "failed to allocate %d MSI-X vectors, err: %d\n", requested, + err); + err = 1; + goto enic_allocate_msix_out; + } else { + if (vectors != requested) { + device_printf(dev, + "Unable to allocate sufficient MSI-X vectors " + "(got %d, need %d)\n", requested, vectors); + pci_release_msi(dev); + err = 1; + goto enic_allocate_msix_out; + } + } + + device_printf(dev, "Using MSI-X interrupts with %d vectors\n", + vectors); + + scctx->isc_intr = IFLIB_INTR_MSIX; + scctx->isc_vectors = vectors; + +enic_allocate_msix_out: + return (err); + +} + +static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { + {0, 0}, /* 0 - 4 Gbps */ + {0, 3}, /* 4 - 10 Gbps */ + {3, 6}, /* 10 - 40 Gbps */ +}; + +static void enic_set_rx_coal_setting(struct enic *enic) +{ + unsigned int speed; + int index = -1; + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + + /* 1. Read the link speed from fw + * 2. Pick the default range for the speed + * 3. Update it in enic->rx_coalesce_setting + */ + speed = vnic_dev_port_speed(enic->vdev); + if (ENIC_LINK_SPEED_10G < speed) + index = ENIC_LINK_40G_INDEX; + else if (ENIC_LINK_SPEED_4G < speed) + index = ENIC_LINK_10G_INDEX; + else + index = ENIC_LINK_4G_INDEX; + + rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; + rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; + rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; + + /* Start with the value provided by UCSM */ + for (index = 0; index < enic->rq_count; index++) + enic->cq[index].cur_rx_coal_timeval = + enic->config.intr_timer_usec; + + rx_coal->use_adaptive_rx_coalesce = 1; +} + static int enic_attach_pre(if_ctx_t ctx) { if_softc_ctx_t scctx; struct enic_softc *softc; struct vnic_dev *vdev; struct enic *enic; device_t dev; int err = -1; int rc = 0; int i; u64 a0 = 0, a1 = 0; int wait = 1000; struct vnic_stats *stats; int ret; dev = iflib_get_dev(ctx); softc = iflib_get_softc(ctx); softc->dev = dev; softc->ctx = ctx; softc->sctx = iflib_get_sctx(ctx); softc->scctx = iflib_get_softc_ctx(ctx); softc->ifp = iflib_get_ifp(ctx); softc->media = iflib_get_media(ctx); softc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * ENIC_MAX_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->mta == NULL) return (ENOMEM); scctx = softc->scctx; mtx_init(&softc->enic_lock, "ENIC Lock", NULL, MTX_DEF); pci_enable_busmaster(softc->dev); if (enic_pci_mapping(softc)) return (ENXIO); enic = &softc->enic; enic->softc = softc; vdev = &softc->vdev; vdev->softc = softc; enic->vdev = vdev; vdev->priv = enic; ENIC_LOCK(softc); vnic_dev_register(vdev, &softc->mem, 1); enic->vdev = vdev; + vnic_dev_cmd_init(enic->vdev); + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); bcopy((u_int8_t *) & a0, softc->mac_addr, ETHER_ADDR_LEN); iflib_set_mac(ctx, softc->mac_addr); vnic_register_cbacks(enic->vdev, enic_alloc_consistent, enic_free_consistent); /* * Allocate the consistent memory for stats and counters upfront so * both primary and secondary processes can access them. */ ENIC_UNLOCK(softc); err = vnic_dev_alloc_stats_mem(enic->vdev); ENIC_LOCK(softc); if (err) { dev_err(enic, "Failed to allocate cmd memory, aborting\n"); goto err_out_unregister; } vnic_dev_stats_clear(enic->vdev); ret = vnic_dev_stats_dump(enic->vdev, &stats); if (ret) { dev_err(enic, "Error in getting stats\n"); goto err_out_unregister; } err = vnic_dev_alloc_counter_mem(enic->vdev); if (err) { dev_err(enic, "Failed to allocate counter memory, aborting\n"); goto err_out_unregister; } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { dev_err(enic, "vNIC dev open failed, aborting\n"); goto err_out_unregister; } /* Set ingress vlan rewrite mode before vnic initialization */ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, enic->ig_vlan_rewrite_mode); if (err) { dev_err(enic, "Failed to set ingress vlan rewrite mode, aborting.\n"); goto err_out_dev_close; } /* * Issue device init to initialize the vnic-to-switch link. We'll * start with carrier off and wait for link UP notification later to * turn on carrier. We don't need to wait here for the * vnic-to-switch link initialization to complete; link UP * notification is the indication that the process is complete. */ err = vnic_dev_init(enic->vdev, 0); if (err) { dev_err(enic, "vNIC dev init failed, aborting\n"); goto err_out_dev_close; } err = enic_dev_init(enic); if (err) { dev_err(enic, "Device initialization failed, aborting\n"); goto err_out_dev_close; } ENIC_UNLOCK(softc); enic->port_mtu = vnic_dev_mtu(enic->vdev); softc->scctx = iflib_get_softc_ctx(ctx); scctx = softc->scctx; scctx->isc_txrx = &enic_txrx; - scctx->isc_capabilities = scctx->isc_capenable = 0; + scctx->isc_capabilities = scctx->isc_capenable = \ + IFCAP_HWCSUM; scctx->isc_tx_csum_flags = 0; + if_setmtu(softc->ifp, enic->config.mtu); scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \ ETHER_CRC_LEN; scctx->isc_nrxqsets_max = enic->conf_rq_count; scctx->isc_ntxqsets_max = enic->conf_wq_count; scctx->isc_nrxqsets = enic->conf_rq_count; scctx->isc_ntxqsets = enic->conf_wq_count; for (i = 0; i < enic->conf_wq_count; i++) { scctx->isc_ntxd[i] = enic->config.wq_desc_count; scctx->isc_txqsizes[i] = sizeof(struct cq_enet_wq_desc) * scctx->isc_ntxd[i]; scctx->isc_ntxd[i + enic->conf_wq_count] = enic->config.wq_desc_count; scctx->isc_txqsizes[i + enic->conf_wq_count] = sizeof(struct cq_desc) * scctx->isc_ntxd[i + enic->conf_wq_count]; } for (i = 0; i < enic->conf_rq_count; i++) { scctx->isc_nrxd[i] = enic->config.rq_desc_count; scctx->isc_rxqsizes[i] = sizeof(struct cq_enet_rq_desc) * scctx->isc_nrxd[i]; scctx->isc_nrxd[i + enic->conf_rq_count] = enic->config.rq_desc_count; scctx->isc_rxqsizes[i + enic->conf_rq_count] = sizeof(struct cq_desc) * scctx->isc_nrxd[i + enic->conf_rq_count]; } scctx->isc_tx_nsegments = 31; - scctx->isc_vectors = enic->conf_cq_count; scctx->isc_msix_bar = -1; ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_add(softc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL); /* * Allocate the CQ here since TX is called first before RX for now * assume RX and TX are the same */ if (softc->enic.cq == NULL) softc->enic.cq = malloc(sizeof(struct vnic_cq) * softc->enic.wq_count + softc->enic.rq_count, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->enic.cq == NULL) return (ENOMEM); softc->enic.cq->ntxqsets = softc->enic.wq_count + softc->enic.rq_count; /* * Allocate the consistent memory for stats and counters upfront so * both primary and secondary processes can access them. */ err = vnic_dev_alloc_stats_mem(enic->vdev); if (err) { dev_err(enic, "Failed to allocate cmd memory, aborting\n"); + goto err_out_dev_close; + } + + err = enic_allocate_msix(softc); + if (err) { + dev_err(enic, "Failed to allocate MSIX, aborting\n"); + goto err_out_dev_close; } return (rc); err_out_dev_close: vnic_dev_close(enic->vdev); + vnic_dev_deinit_devcmd2(enic->vdev); err_out_unregister: free(softc->vdev.devcmd, M_DEVBUF); free(softc->enic.intr_queues, M_DEVBUF); free(softc->enic.cq, M_DEVBUF); free(softc->mta, M_DEVBUF); rc = -1; pci_disable_busmaster(softc->dev); enic_pci_mapping_free(softc); mtx_destroy(&softc->enic_lock); return (rc); } static int enic_msix_intr_assign(if_ctx_t ctx, int msix) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx; int error; int i; char irq_name[16]; softc = iflib_get_softc(ctx); enic = &softc->enic; scctx = softc->scctx; ENIC_LOCK(softc); vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); ENIC_UNLOCK(softc); enic->intr_queues = malloc(sizeof(*enic->intr_queues) * enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO); enic->intr = malloc(sizeof(*enic->intr) * msix, M_DEVBUF, M_NOWAIT | M_ZERO); for (i = 0; i < scctx->isc_nrxqsets; i++) { snprintf(irq_name, sizeof(irq_name), "erxq%d:%d", i, device_get_unit(softc->dev)); error = iflib_irq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, i + 1, IFLIB_INTR_RX, enic_rxq_intr, &enic->rq[i], i, irq_name); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register rxq %d interrupt handler\n", i); return (error); } enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); } for (i = scctx->isc_nrxqsets; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) { snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i - scctx->isc_nrxqsets, device_get_unit(softc->dev)); - - iflib_softirq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX, &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets, irq_name); - + iflib_softirq_alloc_generic(ctx, + &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX, + &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets, + irq_name); enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); } i = scctx->isc_nrxqsets + scctx->isc_ntxqsets; error = iflib_irq_alloc_generic(ctx, &softc->enic_event_intr_irq, i + 1, IFLIB_INTR_ADMIN, enic_event_intr, softc, 0, "event"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register event interrupt handler\n"); return (error); } enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); i++; error = iflib_irq_alloc_generic(ctx, &softc->enic_err_intr_irq, i + 1, IFLIB_INTR_ADMIN, enic_err_intr, softc, 0, "err"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register event interrupt handler\n"); return (error); } enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); enic->intr_count = msix; return (0); } static void enic_free_irqs(struct enic_softc *softc) { if_softc_ctx_t scctx; struct enic *enic; int i; scctx = softc->scctx; enic = &softc->enic; for (i = 0; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) { iflib_irq_free(softc->ctx, &enic->intr_queues[i].intr_irq); } iflib_irq_free(softc->ctx, &softc->enic_event_intr_irq); iflib_irq_free(softc->ctx, &softc->enic_err_intr_irq); free(enic->intr_queues, M_DEVBUF); free(enic->intr, M_DEVBUF); } static int enic_attach_post(if_ctx_t ctx) { struct enic *enic; struct enic_softc *softc; int error = 0; softc = iflib_get_softc(ctx); enic = &softc->enic; enic_setup_sysctl(softc); enic_init_vnic_resources(enic); + enic_set_rx_coal_setting(enic); enic_setup_finish(enic); ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); return (error); } static int enic_detach(if_ctx_t ctx) { struct enic_softc *softc; struct enic *enic; softc = iflib_get_softc(ctx); enic = &softc->enic; vnic_dev_notify_unset(enic->vdev); enic_free_irqs(softc); ENIC_LOCK(softc); + vnic_dev_deinit(enic->vdev); vnic_dev_close(enic->vdev); + vnic_dev_deinit_devcmd2(enic->vdev); free(softc->vdev.devcmd, M_DEVBUF); pci_disable_busmaster(softc->dev); enic_pci_mapping_free(softc); ENIC_UNLOCK(softc); return 0; } static int enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs, int ntxqs, int ntxqsets) { struct enic_softc *softc; int q; softc = iflib_get_softc(ctx); /* Allocate the array of transmit queues */ softc->enic.wq = malloc(sizeof(struct vnic_wq) * ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->enic.wq == NULL) return (ENOMEM); /* Initialize driver state for each transmit queue */ /* * Allocate queue state that is shared with the device. This check * and call is performed in both enic_tx_queues_alloc() and * enic_rx_queues_alloc() so that we don't have to care which order * iflib invokes those routines in. */ /* Record descriptor ring vaddrs and paddrs */ ENIC_LOCK(softc); for (q = 0; q < ntxqsets; q++) { struct vnic_wq *wq; struct vnic_cq *cq; unsigned int cq_wq; wq = &softc->enic.wq[q]; cq_wq = enic_cq_wq(&softc->enic, q); cq = &softc->enic.cq[cq_wq]; /* Completion ring */ wq->vdev = softc->enic.vdev; wq->index = q; wq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_WQ, wq->index); vnic_wq_disable(wq); wq->ring.desc_size = sizeof(struct wq_enet_desc); wq->ring.desc_count = softc->scctx->isc_ntxd[q]; wq->ring.desc_avail = wq->ring.desc_count - 1; wq->ring.last_count = wq->ring.desc_count; wq->head_idx = 0; wq->tail_idx = 0; wq->ring.size = wq->ring.desc_count * wq->ring.desc_size; wq->ring.descs = vaddrs[q * ntxqs + 0]; wq->ring.base_addr = paddrs[q * ntxqs + 0]; /* Command ring */ cq->vdev = softc->enic.vdev; cq->index = cq_wq; cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ, cq->index); cq->ring.desc_size = sizeof(struct cq_enet_wq_desc); cq->ring.desc_count = softc->scctx->isc_ntxd[q]; cq->ring.desc_avail = cq->ring.desc_count - 1; cq->ring.size = cq->ring.desc_count * cq->ring.desc_size; cq->ring.descs = vaddrs[q * ntxqs + 1]; cq->ring.base_addr = paddrs[q * ntxqs + 1]; } ENIC_UNLOCK(softc); return (0); } static int enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs, int nrxqs, int nrxqsets) { struct enic_softc *softc; int q; softc = iflib_get_softc(ctx); /* Allocate the array of receive queues */ softc->enic.rq = malloc(sizeof(struct vnic_rq) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->enic.rq == NULL) return (ENOMEM); /* Initialize driver state for each receive queue */ /* * Allocate queue state that is shared with the device. This check * and call is performed in both enic_tx_queues_alloc() and * enic_rx_queues_alloc() so that we don't have to care which order * iflib invokes those routines in. */ /* Record descriptor ring vaddrs and paddrs */ ENIC_LOCK(softc); for (q = 0; q < nrxqsets; q++) { struct vnic_rq *rq; struct vnic_cq *cq; unsigned int cq_rq; rq = &softc->enic.rq[q]; cq_rq = enic_cq_rq(&softc->enic, q); cq = &softc->enic.cq[cq_rq]; /* Completion ring */ cq->vdev = softc->enic.vdev; cq->index = cq_rq; cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ, cq->index); cq->ring.desc_size = sizeof(struct cq_enet_wq_desc); cq->ring.desc_count = softc->scctx->isc_nrxd[1]; cq->ring.desc_avail = cq->ring.desc_count - 1; cq->ring.size = cq->ring.desc_count * cq->ring.desc_size; cq->ring.descs = vaddrs[q * nrxqs + 0]; cq->ring.base_addr = paddrs[q * nrxqs + 0]; /* Command ring(s) */ rq->vdev = softc->enic.vdev; rq->index = q; rq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_RQ, rq->index); vnic_rq_disable(rq); rq->ring.desc_size = sizeof(struct rq_enet_desc); rq->ring.desc_count = softc->scctx->isc_nrxd[0]; rq->ring.desc_avail = rq->ring.desc_count - 1; rq->ring.size = rq->ring.desc_count * rq->ring.desc_size; rq->ring.descs = vaddrs[q * nrxqs + 1]; rq->ring.base_addr = paddrs[q * nrxqs + 1]; rq->need_initial_post = true; } ENIC_UNLOCK(softc); return (0); } static void enic_queues_free(if_ctx_t ctx) { struct enic_softc *softc; softc = iflib_get_softc(ctx); free(softc->enic.rq, M_DEVBUF); free(softc->enic.wq, M_DEVBUF); free(softc->enic.cq, M_DEVBUF); } static int enic_rxq_intr(void *rxq) { struct vnic_rq *rq; if_t ifp; rq = (struct vnic_rq *)rxq; ifp = iflib_get_ifp(rq->vdev->softc->ctx); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return (FILTER_HANDLED); return (FILTER_SCHEDULE_THREAD); } static int enic_event_intr(void *vsc) { struct enic_softc *softc; struct enic *enic; uint32_t mtu; softc = vsc; enic = &softc->enic; mtu = vnic_dev_mtu(enic->vdev); if (mtu && mtu != enic->port_mtu) { enic->port_mtu = mtu; } enic_link_status(softc); return (FILTER_HANDLED); } static int enic_err_intr(void *vsc) { struct enic_softc *softc; softc = vsc; enic_stop(softc->ctx); enic_init(softc->ctx); return (FILTER_HANDLED); } static void enic_stop(if_ctx_t ctx) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx; unsigned int index; + struct vnic_wq *wq; + struct vnic_rq *rq; + struct vnic_cq *cq; + unsigned int cq_wq, cq_rq; + softc = iflib_get_softc(ctx); scctx = softc->scctx; enic = &softc->enic; if (softc->stopped) return; softc->link_active = 0; softc->stopped = 1; + enic_dev_disable(enic); + for (index = 0; index < scctx->isc_ntxqsets; index++) { enic_stop_wq(enic, index); vnic_wq_clean(&enic->wq[index]); vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]); + + wq = &softc->enic.wq[index]; + wq->ring.desc_avail = wq->ring.desc_count - 1; + wq->ring.last_count = wq->ring.desc_count; + wq->head_idx = 0; + wq->tail_idx = 0; + + cq_wq = enic_cq_wq(&softc->enic, index); + cq = &softc->enic.cq[cq_wq]; + cq->ring.desc_avail = cq->ring.desc_count - 1; } for (index = 0; index < scctx->isc_nrxqsets; index++) { + enic_stop_rq(enic, index); vnic_rq_clean(&enic->rq[index]); vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]); + + rq = &softc->enic.rq[index]; + cq_rq = enic_cq_rq(&softc->enic, index); + cq = &softc->enic.cq[cq_rq]; + + cq->ring.desc_avail = cq->ring.desc_count - 1; + rq->ring.desc_avail = rq->ring.desc_count - 1; + rq->need_initial_post = true; } for (index = 0; index < scctx->isc_vectors; index++) { vnic_intr_clean(&enic->intr[index]); } } static void enic_init(if_ctx_t ctx) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx; unsigned int index; softc = iflib_get_softc(ctx); scctx = softc->scctx; enic = &softc->enic; + + enic_init_vnic_resources(enic); + for (index = 0; index < scctx->isc_ntxqsets; index++) enic_prep_wq_for_simple_tx(&softc->enic, index); for (index = 0; index < scctx->isc_ntxqsets; index++) enic_start_wq(enic, index); for (index = 0; index < scctx->isc_nrxqsets; index++) enic_start_rq(enic, index); /* Use the current MAC address. */ bcopy(if_getlladdr(softc->ifp), softc->lladdr, ETHER_ADDR_LEN); enic_set_lladdr(softc); ENIC_LOCK(softc); vnic_dev_enable_wait(enic->vdev); ENIC_UNLOCK(softc); + softc->stopped = 0; + enic_link_status(softc); } static void enic_del_mcast(struct enic_softc *softc) { struct enic *enic; int i; enic = &softc->enic; for (i=0; i < softc->mc_count; i++) { vnic_dev_del_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]); } softc->multicast = 0; softc->mc_count = 0; } static void enic_add_mcast(struct enic_softc *softc) { struct enic *enic; int i; enic = &softc->enic; for (i=0; i < softc->mc_count; i++) { vnic_dev_add_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]); } softc->multicast = 1; } static u_int enic_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) { uint8_t *mta = arg; if (idx == ENIC_MAX_MULTICAST_ADDRESSES) return (0); bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); return (1); } static void enic_multi_set(if_ctx_t ctx) { if_t ifp; struct enic_softc *softc; u_int count; softc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); ENIC_LOCK(softc); enic_del_mcast(softc); count = if_foreach_llmaddr(ifp, enic_copy_maddr, softc->mta); softc->mc_count = count; enic_add_mcast(softc); ENIC_UNLOCK(softc); if (if_getflags(ifp) & IFF_PROMISC) { softc->promisc = 1; } else { softc->promisc = 0; } if (if_getflags(ifp) & IFF_ALLMULTI) { softc->allmulti = 1; } else { softc->allmulti = 0; } enic_update_packet_filter(&softc->enic); } static int enic_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); softc = iflib_get_softc(ctx); enic = &softc->enic; + enic_stop(softc->ctx); if (mtu > enic->port_mtu){ return (EINVAL); } enic->config.mtu = mtu; scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + enic_init(softc->ctx); return (0); } static void enic_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct enic_softc *softc; struct ifmedia_entry *next; uint32_t speed; uint64_t target_baudrate; softc = iflib_get_softc(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (enic_link_is_up(softc) != 0) { ENIC_LOCK(softc); speed = vnic_dev_port_speed(&softc->vdev); ENIC_UNLOCK(softc); target_baudrate = 1000ull * speed; LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) { if (ifmedia_baudrate(next->ifm_media) == target_baudrate) { ifmr->ifm_active |= next->ifm_media; } } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_AUTO; } else ifmr->ifm_active |= IFM_NONE; } static int enic_media_change(if_ctx_t ctx) { return (ENODEV); } static int enic_promisc_set(if_ctx_t ctx, int flags) { if_t ifp; struct enic_softc *softc; softc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); if (if_getflags(ifp) & IFF_PROMISC) { softc->promisc = 1; } else { softc->promisc = 0; } if (if_getflags(ifp) & IFF_ALLMULTI) { softc->allmulti = 1; } else { softc->allmulti = 0; } enic_update_packet_filter(&softc->enic); return (0); } static uint64_t enic_get_counter(if_ctx_t ctx, ift_counter cnt) { if_t ifp = iflib_get_ifp(ctx); if (cnt < IFCOUNTERS) return if_get_counter_default(ifp, cnt); return (0); } static void enic_update_admin_status(if_ctx_t ctx) { struct enic_softc *softc; - softc = iflib_get_softc(ctx); enic_link_status(softc); } static void enic_txq_timer(if_ctx_t ctx, uint16_t qid) { struct enic_softc *softc; struct enic *enic; struct vnic_stats *stats; int ret; softc = iflib_get_softc(ctx); enic = &softc->enic; ENIC_LOCK(softc); ret = vnic_dev_stats_dump(enic->vdev, &stats); ENIC_UNLOCK(softc); if (ret) { dev_err(enic, "Error in getting stats\n"); } } static int enic_link_is_up(struct enic_softc *softc) { return (vnic_dev_link_status(&softc->vdev) == 1); } static void enic_link_status(struct enic_softc *softc) { if_ctx_t ctx; uint64_t speed; int link; ctx = softc->ctx; link = enic_link_is_up(softc); speed = IF_Gbps(10); ENIC_LOCK(softc); speed = vnic_dev_port_speed(&softc->vdev); ENIC_UNLOCK(softc); if (link != 0 && softc->link_active == 0) { softc->link_active = 1; iflib_link_state_change(ctx, LINK_STATE_UP, speed); } else if (link == 0 && softc->link_active != 0) { softc->link_active = 0; iflib_link_state_change(ctx, LINK_STATE_DOWN, speed); } } static void enic_set_lladdr(struct enic_softc *softc) { struct enic *enic; enic = &softc->enic; ENIC_LOCK(softc); vnic_dev_add_addr(enic->vdev, softc->lladdr); ENIC_UNLOCK(softc); } static void enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *txsnode; struct sysctl_oid_list *txslist; struct vnic_stats *stats = wq[i].vdev->stats; txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics"); txslist = SYSCTL_CHILDREN(txsnode); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_frames_ok", CTLFLAG_RD, &stats->tx.tx_frames_ok, "TX Frames OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_frames_ok", CTLFLAG_RD, &stats->tx.tx_unicast_frames_ok, "TX unicast frames OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_frames_ok", CTLFLAG_RD, &stats->tx.tx_multicast_frames_ok, "TX multicast framse OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_frames_ok", CTLFLAG_RD, &stats->tx.tx_broadcast_frames_ok, "TX Broadcast frames OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_bytes_ok", CTLFLAG_RD, &stats->tx.tx_bytes_ok, "TX bytes OK "); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_bytes_ok", CTLFLAG_RD, &stats->tx.tx_unicast_bytes_ok, "TX unicast bytes OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_bytes_ok", CTLFLAG_RD, &stats->tx.tx_multicast_bytes_ok, "TX multicast bytes OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_bytes_ok", CTLFLAG_RD, &stats->tx.tx_broadcast_bytes_ok, "TX broadcast bytes OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_drops", CTLFLAG_RD, &stats->tx.tx_drops, "TX drops"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_errors", CTLFLAG_RD, &stats->tx.tx_errors, "TX errors"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_tso", CTLFLAG_RD, &stats->tx.tx_tso, "TX TSO"); } static void enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *rxsnode; struct sysctl_oid_list *rxslist; struct vnic_stats *stats = rq[i].vdev->stats; rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics"); rxslist = SYSCTL_CHILDREN(rxsnode); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_ok", CTLFLAG_RD, &stats->rx.rx_frames_ok, "RX Frames OK"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_total", CTLFLAG_RD, &stats->rx.rx_frames_total, "RX frames total"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_frames_ok", CTLFLAG_RD, &stats->rx.rx_unicast_frames_ok, "RX unicast frames ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_frames_ok", CTLFLAG_RD, &stats->rx.rx_multicast_frames_ok, "RX multicast Frames ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_frames_ok", CTLFLAG_RD, &stats->rx.rx_broadcast_frames_ok, "RX broadcast frames ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_bytes_ok", CTLFLAG_RD, &stats->rx.rx_bytes_ok, "RX bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_bytes_ok", CTLFLAG_RD, &stats->rx.rx_unicast_bytes_ok, "RX unicast bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_bytes_ok", CTLFLAG_RD, &stats->rx.rx_multicast_bytes_ok, "RX multicast bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_bytes_ok", CTLFLAG_RD, &stats->rx.rx_broadcast_bytes_ok, "RX broadcast bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_drop", CTLFLAG_RD, &stats->rx.rx_drop, "RX drop"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_errors", CTLFLAG_RD, &stats->rx.rx_errors, "RX errors"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_rss", CTLFLAG_RD, &stats->rx.rx_rss, "RX rss"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, &stats->rx.rx_crc_errors, "RX crc errors"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &stats->rx.rx_frames_64, "RX frames 64"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_127", CTLFLAG_RD, &stats->rx.rx_frames_127, "RX frames 127"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_255", CTLFLAG_RD, &stats->rx.rx_frames_255, "RX frames 255"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_511", CTLFLAG_RD, &stats->rx.rx_frames_511, "RX frames 511"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1023", CTLFLAG_RD, &stats->rx.rx_frames_1023, "RX frames 1023"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1518", CTLFLAG_RD, &stats->rx.rx_frames_1518, "RX frames 1518"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_to_max", CTLFLAG_RD, &stats->rx.rx_frames_to_max, "RX frames to max"); } static void enic_setup_queue_sysctl(struct enic_softc *softc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { enic_setup_txq_sysctl(softc->enic.wq, 0, ctx, child); enic_setup_rxq_sysctl(softc->enic.rq, 0, ctx, child); } static void enic_setup_sysctl(struct enic_softc *softc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = softc->dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); enic_setup_queue_sysctl(softc, ctx, child); } static void enic_enable_intr(struct enic_softc *softc, int irq) { struct enic *enic = &softc->enic; vnic_intr_unmask(&enic->intr[irq]); vnic_intr_return_all_credits(&enic->intr[irq]); } static void enic_disable_intr(struct enic_softc *softc, int irq) { struct enic *enic = &softc->enic; vnic_intr_mask(&enic->intr[irq]); vnic_intr_masked(&enic->intr[irq]); /* flush write */ } static int enic_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enic_softc *softc; if_softc_ctx_t scctx; softc = iflib_get_softc(ctx); scctx = softc->scctx; enic_enable_intr(softc, qid + scctx->isc_nrxqsets); return 0; } static int enic_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enic_softc *softc; softc = iflib_get_softc(ctx); enic_enable_intr(softc, qid); return 0; } static void enic_intr_enable_all(if_ctx_t ctx) { struct enic_softc *softc; if_softc_ctx_t scctx; int i; softc = iflib_get_softc(ctx); scctx = softc->scctx; for (i = 0; i < scctx->isc_vectors; i++) { enic_enable_intr(softc, i); } } static void enic_intr_disable_all(if_ctx_t ctx) { struct enic_softc *softc; if_softc_ctx_t scctx; int i; softc = iflib_get_softc(ctx); scctx = softc->scctx; /* * iflib may invoke this routine before enic_attach_post() has run, * which is before the top level shared data area is initialized and * the device made aware of it. */ for (i = 0; i < scctx->isc_vectors; i++) { enic_disable_intr(softc, i); } } static int enic_dev_open(struct enic *enic) { int err; int flags = CMD_OPENF_IG_DESCCACHE; err = enic_dev_wait(enic->vdev, vnic_dev_open, vnic_dev_open_done, flags); if (err) dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", err); return err; } static int enic_dev_init(struct enic *enic) { int err; vnic_dev_intr_coal_timer_info_default(enic->vdev); /* * Get vNIC configuration */ err = enic_get_vnic_config(enic); if (err) { dev_err(dev, "Get vNIC configuration failed, aborting\n"); return err; } /* Get available resource counts */ enic_get_res_counts(enic); /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ enic->intr_queues = malloc(sizeof(*enic->intr_queues) * enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO); vnic_dev_set_reset_flag(enic->vdev, 0); enic->max_flow_counter = -1; /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ enic->overlay_offload = false; if (enic->disable_overlay && enic->vxlan) { /* * Explicitly disable overlay offload as the setting is * sticky, and resetting vNIC does not disable it. */ if (vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE)) { dev_err(enic, "failed to disable overlay offload\n"); } else { dev_info(enic, "Overlay offload is disabled\n"); } } if (!enic->disable_overlay && enic->vxlan && /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */ vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) == 0) { enic->overlay_offload = true; enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT; dev_info(enic, "Overlay offload is enabled\n"); /* * Reset the vxlan port to the default, as the NIC firmware * does not reset it automatically and keeps the old setting. */ if (vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) { dev_err(enic, "failed to update vxlan port\n"); - return -EINVAL; + return (EINVAL); } } return 0; } static void * enic_alloc_consistent(void *priv, size_t size, bus_addr_t * dma_handle, struct iflib_dma_info *res, u8 * name) { void *vaddr; *dma_handle = 0; struct enic *enic = (struct enic *)priv; int rz; rz = iflib_dma_alloc(enic->softc->ctx, size, res, BUS_DMA_NOWAIT); if (rz) { pr_err("%s : Failed to allocate memory requested for %s\n", __func__, name); return NULL; } vaddr = res->idi_vaddr; *dma_handle = res->idi_paddr; return vaddr; } static void enic_free_consistent(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle, struct iflib_dma_info *res) { iflib_dma_free(res); } static int enic_pci_mapping(struct enic_softc *softc) { int rc; rc = enic_map_bar(softc, &softc->mem, 0, true); if (rc) return rc; rc = enic_map_bar(softc, &softc->io, 2, false); return rc; } static void enic_pci_mapping_free(struct enic_softc *softc) { if (softc->mem.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->mem.rid, softc->mem.res); softc->mem.res = NULL; if (softc->io.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->io.rid, softc->io.res); softc->io.res = NULL; } static int enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int), int (*finished) (struct vnic_dev *, int *), int arg) { int done; int err; int i; err = start(vdev, arg); if (err) return err; /* Wait for func to complete...2 seconds max */ for (i = 0; i < 2000; i++) { err = finished(vdev, &done); if (err) return err; if (done) return 0; usleep(1000); } - return -ETIMEDOUT; + return (ETIMEDOUT); } static int enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num, bool shareable) { uint32_t flag; if (bar->res != NULL) { device_printf(softc->dev, "Bar %d already mapped\n", bar_num); - return EDOOFUS; + return (EDOOFUS); } bar->rid = PCIR_BAR(bar_num); flag = RF_ACTIVE; if (shareable) flag |= RF_SHAREABLE; if ((bar->res = bus_alloc_resource_any(softc->dev, SYS_RES_MEMORY, &bar->rid, flag)) == NULL) { device_printf(softc->dev, "PCI BAR%d mapping failure\n", bar_num); return (ENXIO); } bar->tag = rman_get_bustag(bar->res); bar->handle = rman_get_bushandle(bar->res); bar->size = rman_get_size(bar->res); return 0; } void enic_init_vnic_resources(struct enic *enic) { unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; unsigned int rxq_interrupt_enable = 0; unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; unsigned int txq_interrupt_enable = 0; - unsigned int txq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; + unsigned int txq_interrupt_offset; unsigned int index = 0; unsigned int cq_idx; if_softc_ctx_t scctx; scctx = enic->softc->scctx; - rxq_interrupt_enable = 1; - txq_interrupt_enable = 1; + txq_interrupt_enable = 0; rxq_interrupt_offset = 0; - txq_interrupt_offset = enic->intr_count - 2; - txq_interrupt_offset = 1; + txq_interrupt_offset = scctx->isc_nrxqsets; for (index = 0; index < enic->intr_count; index++) { vnic_intr_alloc(enic->vdev, &enic->intr[index], index); } for (index = 0; index < scctx->isc_nrxqsets; index++) { cq_idx = enic_cq_rq(enic, index); vnic_rq_clean(&enic->rq[index]); vnic_rq_init(&enic->rq[index], cq_idx, error_interrupt_enable, error_interrupt_offset); vnic_cq_clean(&enic->cq[cq_idx]); vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */ , 1 /* color_enable */ , 0 /* cq_head */ , 0 /* cq_tail */ , 1 /* cq_tail_color */ , rxq_interrupt_enable, 1 /* cq_entry_enable */ , 0 /* cq_message_enable */ , rxq_interrupt_offset, 0 /* cq_message_addr */ ); if (rxq_interrupt_enable) rxq_interrupt_offset++; } for (index = 0; index < scctx->isc_ntxqsets; index++) { cq_idx = enic_cq_wq(enic, index); vnic_wq_clean(&enic->wq[index]); vnic_wq_init(&enic->wq[index], cq_idx, error_interrupt_enable, error_interrupt_offset); /* Compute unsupported ol flags for enic_prep_pkts() */ enic->wq[index].tx_offload_notsup_mask = 0; vnic_cq_clean(&enic->cq[cq_idx]); vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */ , 1 /* color_enable */ , 0 /* cq_head */ , 0 /* cq_tail */ , 1 /* cq_tail_color */ , txq_interrupt_enable, 1, 0, txq_interrupt_offset, 0 /* (u64)enic->wq[index].cqmsg_rz->iova */ ); } for (index = 0; index < enic->intr_count; index++) { vnic_intr_init(&enic->intr[index], 125, enic->config.intr_timer_type, /* mask_on_assertion */ 1); } } static void enic_update_packet_filter(struct enic *enic) { struct enic_softc *softc = enic->softc; ENIC_LOCK(softc); vnic_dev_packet_filter(enic->vdev, softc->directed, softc->multicast, softc->broadcast, softc->promisc, softc->allmulti); ENIC_UNLOCK(softc); } static bool -enic_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) +enic_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (false); } } int enic_setup_finish(struct enic *enic) { struct enic_softc *softc = enic->softc; /* Default conf */ softc->directed = 1; softc->multicast = 0; softc->broadcast = 1; softc->promisc = 0; softc->allmulti = 1; enic_update_packet_filter(enic); return 0; } diff --git a/sys/dev/enic/vnic_cq.h b/sys/dev/enic/vnic_cq.h index 26f9009612c5..b4549ee58c64 100644 --- a/sys/dev/enic/vnic_cq.h +++ b/sys/dev/enic/vnic_cq.h @@ -1,164 +1,163 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_CQ_H_ #define _VNIC_CQ_H_ #include "cq_desc.h" #include "vnic_dev.h" /* Completion queue control */ struct vnic_cq_ctrl { u64 ring_base; /* 0x00 */ #define CQ_RING_BASE 0x00 u32 ring_size; /* 0x08 */ #define CQ_RING_SIZE 0x08 u32 pad0; u32 flow_control_enable; /* 0x10 */ #define CQ_FLOW_CONTROL_ENABLE 0x10 u32 pad1; u32 color_enable; /* 0x18 */ #define CQ_COLOR_ENABLE 0x18 u32 pad2; u32 cq_head; /* 0x20 */ #define CQ_HEAD 0x20 u32 pad3; u32 cq_tail; /* 0x28 */ #define CQ_TAIL 0x28 u32 pad4; u32 cq_tail_color; /* 0x30 */ #define CQ_TAIL_COLOR 0x30 u32 pad5; u32 interrupt_enable; /* 0x38 */ #define CQ_INTR_ENABLE 0x38 u32 pad6; u32 cq_entry_enable; /* 0x40 */ #define CQ_ENTRY_ENABLE 0x40 u32 pad7; u32 cq_message_enable; /* 0x48 */ #define CQ_MESSAGE_ENABLE 0x48 u32 pad8; u32 interrupt_offset; /* 0x50 */ #define CQ_INTR_OFFSET 0x50 u32 pad9; u64 cq_message_addr; /* 0x58 */ #define CQ_MESSAGE_ADDR 0x58 u32 pad10; }; #ifdef ENIC_AIC struct vnic_rx_bytes_counter { unsigned int small_pkt_bytes_cnt; unsigned int large_pkt_bytes_cnt; }; #endif struct vnic_cq { unsigned int index; struct vnic_dev *vdev; struct vnic_res *ctrl; struct vnic_dev_ring ring; unsigned int to_clean; unsigned int last_color; unsigned int interrupt_offset; + unsigned int cur_rx_coal_timeval; + unsigned int tobe_rx_coal_timeval; #ifdef ENIC_AIC struct vnic_rx_bytes_counter pkt_size_counter; unsigned int cur_rx_coal_timeval; unsigned int tobe_rx_coal_timeval; ktime_t prev_ts; #endif int ntxqsets; int nrxqsets; int ntxqsets_start; int nrxqsets_start; }; -void vnic_cq_free(struct vnic_cq *cq); void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int message_enable, unsigned int interrupt_offset, u64 message_addr); void vnic_cq_clean(struct vnic_cq *cq); -int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count, - unsigned int desc_size); static inline unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do, int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque), void *opaque) { struct cq_desc *cq_desc; unsigned int work_done = 0; u16 q_number, completed_index; u8 type, color; cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); while (color != cq->last_color) { if ((*q_service)(cq->vdev, cq_desc, type, q_number, completed_index, opaque)) break; cq->to_clean++; if (cq->to_clean == cq->ring.desc_count) { cq->to_clean = 0; cq->last_color = cq->last_color ? 0 : 1; } cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); work_done++; if (work_done >= work_to_do) break; } return work_done; } static inline unsigned int vnic_cq_work(struct vnic_cq *cq, unsigned int work_to_do) { struct cq_desc *cq_desc; unsigned int work_avail = 0; u16 q_number, completed_index; u8 type, color; u32 to_clean, last_color; to_clean = cq->to_clean; last_color = cq->last_color; cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); while (color != last_color) { to_clean++; if (to_clean == cq->ring.desc_count) { to_clean = 0; last_color = last_color ? 0 : 1; } cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); work_avail++; if (work_avail >= work_to_do) break; } return work_avail; } #endif /* _VNIC_CQ_H_ */ diff --git a/sys/dev/enic/vnic_dev.c b/sys/dev/enic/vnic_dev.c index 3425d7372e56..2d555cb2b34d 100644 --- a/sys/dev/enic/vnic_dev.c +++ b/sys/dev/enic/vnic_dev.c @@ -1,1039 +1,1214 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_nic.h" #include "vnic_stats.h" #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ sizeof(struct vnic_resource) * RES_TYPE_MAX) #define VNIC_RES_STRIDE 128 #define VNIC_MAX_FLOW_COUNTERS 2048 void *vnic_dev_priv(struct vnic_dev *vdev) { return vdev->priv; } void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name), void (*free_consistent)(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle,struct iflib_dma_info *res)) { vdev->alloc_consistent = alloc_consistent; vdev->free_consistent = free_consistent; } static int vnic_dev_discover_res(struct vnic_dev *vdev, struct vnic_dev_bar *bar, unsigned int num_bars) { struct enic_softc *softc = vdev->softc; struct vnic_resource_header __iomem *rh; struct mgmt_barmap_hdr __iomem *mrh; struct vnic_resource __iomem *r; int r_offset; u8 type; if (num_bars == 0) - return -EINVAL; + return (EINVAL); rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO); mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO); if (!rh) { pr_err("vNIC BAR0 res hdr not mem-mapped\n"); free(rh, M_DEVBUF); free(mrh, M_DEVBUF); - return -EINVAL; + return (EINVAL); } /* Check for mgmt vnic in addition to normal vnic */ ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4); ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4); if ((rh->magic != VNIC_RES_MAGIC) || (rh->version != VNIC_RES_VERSION)) { if ((mrh->magic != MGMTVNIC_MAGIC) || mrh->version != MGMTVNIC_VERSION) { pr_err("vNIC BAR0 res magic/version error " \ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, MGMTVNIC_MAGIC, MGMTVNIC_VERSION, rh->magic, rh->version); free(rh, M_DEVBUF); free(mrh, M_DEVBUF); - return -EINVAL; + return (EINVAL); } } if (mrh->magic == MGMTVNIC_MAGIC) r_offset = sizeof(*mrh); else r_offset = sizeof(*rh); r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO); ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4); while ((type = r->type) != RES_TYPE_EOL) { u8 bar_num = r->bar; u32 bar_offset =r->bar_offset; u32 count = r->count; r_offset += sizeof(*r); if (bar_num >= num_bars) continue; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: + case RES_TYPE_DEVCMD2: break; default: ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4); continue; } vdev->res[type].count = count; bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem)); vdev->res[type].bar.offset = bar_offset; ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4); } free(rh, M_DEVBUF); free(mrh, M_DEVBUF); free(r, M_DEVBUF); return 0; } unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, enum vnic_res_type type) { return vdev->res[type].count; } void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index) { struct vnic_res *res; if (!vdev->res[type].bar.tag) return NULL; res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO); bcopy(&vdev->res[type], res, sizeof(*res)); switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: res->bar.offset += index * VNIC_RES_STRIDE; default: res->bar.offset += 0; } return res; } unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { /* The base address of the desc rings must be 512 byte aligned. * Descriptor count is aligned to groups of 32 descriptors. A * count of 0 means the maximum 4096 descriptors. Descriptor * size is aligned to 16 bytes. */ unsigned int count_align = 32; unsigned int desc_align = 16; ring->base_align = 512; if (desc_count == 0) desc_count = 4096; ring->desc_count = VNIC_ALIGN(desc_count, count_align); ring->desc_size = VNIC_ALIGN(desc_size, desc_align); ring->size = ring->desc_count * ring->desc_size; ring->size_unaligned = ring->size + ring->base_align; return ring->size_unaligned; } void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) { memset(ring->descs, 0, ring->size); } static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_res __iomem *devcmd = vdev->devcmd; int delay; u32 status; int err; status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ - return -ENODEV; + return (ENODEV); } if (status & STAT_BUSY) { pr_err("Busy devcmd %d\n", _CMD_N(cmd)); - return -EBUSY; + return (EBUSY); } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2); } ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) { return 0; } for (delay = 0; delay < wait; delay++) { udelay(100); status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ - return -ENODEV; + return (ENODEV); } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0)); if (cmd != CMD_CAPABILITY) pr_err("Devcmd %d failed " \ "with error code %d\n", _CMD_N(cmd), err); - return err; + return (err); } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2); } return 0; } } pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); - return -ETIMEDOUT; + return (ETIMEDOUT); +} + +static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result; + u8 color; + unsigned int i; + u32 fetch_index, new_posted; + int delay, err; + u32 posted = dc2c->posted; + + fetch_index = ENIC_BUS_READ_4(dc2c->wq_ctrl, TX_FETCH_INDEX); + if (fetch_index == 0xFFFFFFFF) + return (ENODEV); + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + + if (new_posted == fetch_index) { + device_printf(dev_from_vnic_dev(vdev), + "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", + _CMD_N(cmd), fetch_index, posted); + return (EBUSY); + } + + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + + ENIC_BUS_WRITE_4(dc2c->wq_ctrl, TX_POSTED_INDEX, new_posted); + dc2c->posted = new_posted; + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return (0); + + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + + for (delay = 0; delay < wait; delay++) { + if (result->color == color) { + if (result->error) { + err = result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + device_printf(dev_from_vnic_dev(vdev), + "Error %d devcmd %d\n", err, + _CMD_N(cmd)); + return (err); + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) + for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) + vdev->args[i] = result->results[i]; + + return 0; + } + udelay(100); + } + + device_printf(dev_from_vnic_dev(vdev), + "devcmd %d timed out\n", _CMD_N(cmd)); + + + return (ETIMEDOUT); } static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) { u32 status; int err; /* * Proxy command consumes 2 arguments. One for proxy index, * the other is for command to be proxied */ if (nargs > VNIC_DEVCMD_NARGS - 2) { pr_err("number of args %d exceeds the maximum\n", nargs); - return -EINVAL; + return (EINVAL); } memset(vdev->args, 0, sizeof(vdev->args)); vdev->args[0] = vdev->proxy_index; vdev->args[1] = cmd; memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); - err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); if (err) - return err; + return (err); status = (u32)vdev->args[0]; if (status & STAT_ERROR) { err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); - return err; + return (err); } memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); return 0; } static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) { int err; if (nargs > VNIC_DEVCMD_NARGS) { pr_err("number of args %d exceeds the maximum\n", nargs); - return -EINVAL; + return (EINVAL); } memset(vdev->args, 0, sizeof(vdev->args)); memcpy(vdev->args, args, nargs * sizeof(args[0])); - err = _vnic_dev_cmd(vdev, cmd, wait); + err = vdev->devcmd_rtn(vdev, cmd, wait); memcpy(args, vdev->args, nargs * sizeof(args[0])); - return err; + return (err); } int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { u64 args[2]; int err; args[0] = *a0; args[1] = *a1; memset(vdev->args, 0, sizeof(vdev->args)); switch (vdev->proxy) { case PROXY_BY_INDEX: err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, args, ARRAY_SIZE(args), wait); break; case PROXY_BY_BDF: err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, args, ARRAY_SIZE(args), wait); break; case PROXY_NONE: default: err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); break; } if (err == 0) { *a0 = args[0]; *a1 = args[1]; } - return err; + return (err); } int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) { switch (vdev->proxy) { case PROXY_BY_INDEX: return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, args, nargs, wait); case PROXY_BY_BDF: return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, args, nargs, wait); case PROXY_NONE: default: return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); } } static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args, int nargs) { memset(args, 0, nargs * sizeof(*args)); args[0] = CMD_ADD_ADV_FILTER; args[1] = FILTER_CAP_MODE_V1_FLAG; return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); } int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) { u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); if (err) return 0; return (a1 >= (u32)FILTER_DPDK_1); } /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3 * value or 0 on error: * FILTER_DPDK_1- advanced filters availabile * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that * the IP layer must explicitly specified. I.e. cannot have a UDP * filter that matches both IPv4 and IPv6. * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. * all other filter types are not available. * Retrun true in filter_tags if supported */ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, u8 *filter_actions) { u64 args[4]; int err; u32 max_level = 0; err = vnic_dev_advanced_filters_cap(vdev, args, 4); /* determine supported filter actions */ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ if (args[2] == FILTER_CAP_MODE_V1) *filter_actions = args[3]; if (err || ((args[0] == 1) && (args[1] == 0))) { /* Adv filter Command not supported or adv filters available but * not enabled. Try the normal filter capability command. */ args[0] = CMD_ADD_FILTER; args[1] = 0; err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); if (err) - return err; + return (err); max_level = args[1]; goto parse_max_level; } else if (args[2] == FILTER_CAP_MODE_V1) { /* parse filter capability mask in args[1] */ if (args[1] & FILTER_DPDK_1_FLAG) *mode = FILTER_DPDK_1; else if (args[1] & FILTER_USNIC_IP_FLAG) *mode = FILTER_USNIC_IP; else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) *mode = FILTER_IPV4_5TUPLE; return 0; } max_level = args[1]; parse_max_level: if (max_level >= (u32)FILTER_USNIC_IP) *mode = FILTER_USNIC_IP; else *mode = FILTER_IPV4_5TUPLE; return 0; } void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, bool *weak) { u64 a0 = CMD_NIC_CFG, a1 = 0; int wait = 1000; int err; *cfg_chk = false; *weak = false; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); if (err == 0 && a0 != 0 && a1 != 0) { *cfg_chk = true; *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); } } int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) { u64 a0 = (u32)cmd, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); return !(err || a0); } int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value) { u64 a0, a1; int wait = 1000; int err; a0 = offset; a1 = size; err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); switch (size) { case 1: *(u8 *)value = (u8)a0; break; case 2: *(u16 *)value = (u16)a0; break; case 4: *(u32 *)value = (u32)a0; break; case 8: *(u64 *)value = a0; break; default: BUG(); break; } - return err; + return (err); } int vnic_dev_stats_clear(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); } int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; int rc; if (!vdev->stats) - return -ENOMEM; + return (ENOMEM); *stats = vdev->stats; a0 = vdev->stats_res.idi_paddr; a1 = sizeof(struct vnic_stats); bus_dmamap_sync(vdev->stats_res.idi_tag, vdev->stats_res.idi_map, BUS_DMASYNC_POSTREAD); rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); bus_dmamap_sync(vdev->stats_res.idi_tag, vdev->stats_res.idi_map, BUS_DMASYNC_PREREAD); return (rc); } /* * Configure counter DMA */ int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period, u32 num_counters) { u64 args[3]; int wait = 1000; int err; if (num_counters > VNIC_MAX_FLOW_COUNTERS) - return -ENOMEM; + return (ENOMEM); if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD || num_counters == 0)) - return -EINVAL; + return (EINVAL); args[0] = num_counters; args[1] = vdev->flow_counters_res.idi_paddr; args[2] = period; bus_dmamap_sync(vdev->flow_counters_res.idi_tag, vdev->flow_counters_res.idi_map, BUS_DMASYNC_POSTREAD); err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait); bus_dmamap_sync(vdev->flow_counters_res.idi_tag, vdev->flow_counters_res.idi_map, BUS_DMASYNC_PREREAD); /* record if DMAs need to be stopped on close */ if (!err) vdev->flow_counters_dma_active = (num_counters != 0 && period != 0); - return err; + return (err); } int vnic_dev_close(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); } int vnic_dev_enable_wait(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); else return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); } int vnic_dev_disable(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); } int vnic_dev_open(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); } int vnic_dev_open_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); if (err) - return err; + return (err); *done = (a0 == 0); return 0; } int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); if (err) - return err; + return (err); for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = ((u8 *)&a0)[i]; return 0; } int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti) { u64 a0, a1 = 0; int wait = 1000; int err; a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | (multicast ? CMD_PFILTER_MULTICAST : 0) | (broadcast ? CMD_PFILTER_BROADCAST : 0) | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) pr_err("Can't set packet filter\n"); - return err; + return (err); } int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], err); - return err; + return (err); } int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], err); - return err; + return (err); } int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, u8 ig_vlan_rewrite_mode) { u64 a0 = ig_vlan_rewrite_mode, a1 = 0; int wait = 1000; if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait); else return 0; } void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) { vdev->in_reset = state; } static inline int vnic_dev_in_reset(struct vnic_dev *vdev) { return vdev->in_reset; } int vnic_dev_notify_setcmd(struct vnic_dev *vdev, void *notify_addr, bus_addr_t notify_pa, u16 intr) { u64 a0, a1; int wait = 1000; int r; bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_PREWRITE); memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_POSTWRITE); if (!vnic_dev_in_reset(vdev)) { vdev->notify = notify_addr; vdev->notify_pa = notify_pa; } a0 = (u64)notify_pa; a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; a1 += sizeof(struct vnic_devcmd_notify); r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); if (!vnic_dev_in_reset(vdev)) vdev->notify_sz = (r == 0) ? (u32)a1 : 0; return r; } int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) { void *notify_addr = NULL; bus_addr_t notify_pa = 0; char name[NAME_MAX]; static u32 instance; if (vdev->notify || vdev->notify_pa) { return vnic_dev_notify_setcmd(vdev, vdev->notify, vdev->notify_pa, intr); } if (!vnic_dev_in_reset(vdev)) { snprintf((char *)name, sizeof(name), "vnic_notify-%u", instance++); iflib_dma_alloc(vdev->softc->ctx, sizeof(struct vnic_devcmd_notify), &vdev->notify_res, BUS_DMA_NOWAIT); notify_pa = vdev->notify_res.idi_paddr; notify_addr = vdev->notify_res.idi_vaddr; } return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); } int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; int err; a0 = 0; /* paddr = 0 to unset notify buffer */ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ a1 += sizeof(struct vnic_devcmd_notify); err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); if (!vnic_dev_in_reset(vdev)) { vdev->notify = NULL; vdev->notify_pa = 0; vdev->notify_sz = 0; } - return err; + return (err); } int vnic_dev_notify_unset(struct vnic_dev *vdev) { if (vdev->notify && !vnic_dev_in_reset(vdev)) { iflib_dma_free(&vdev->notify_res); } return vnic_dev_notify_unsetcmd(vdev); } static int vnic_dev_notify_ready(struct vnic_dev *vdev) { u32 *words; unsigned int nwords = vdev->notify_sz / 4; unsigned int i; u32 csum; if (!vdev->notify || !vdev->notify_sz) return 0; do { csum = 0; bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_PREREAD); memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_POSTREAD); words = (u32 *)&vdev->notify_copy; for (i = 1; i < nwords; i++) csum += words[i]; } while (csum != words[0]); - return 1; + + return (1); } int vnic_dev_init(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; int r = 0; if (vnic_dev_capable(vdev, CMD_INIT)) r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); else { vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); if (a0 & CMD_INITF_DEFAULT_MAC) { /* Emulate these for old CMD_INIT_v1 which * didn't pass a0 so no CMD_INITF_*. */ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); } } return r; } void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) { /* Default: hardware intr coal timer is in units of 1.5 usecs */ vdev->intr_coal_timer_info.mul = 2; vdev->intr_coal_timer_info.div = 3; vdev->intr_coal_timer_info.max_usec = vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); } int vnic_dev_link_status(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.link_state; } u32 vnic_dev_port_speed(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.port_speed; } u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) { return (usec * vdev->intr_coal_timer_info.mul) / vdev->intr_coal_timer_info.div; } u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) { return (hw_cycles * vdev->intr_coal_timer_info.div) / vdev->intr_coal_timer_info.mul; } u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) { return vdev->intr_coal_timer_info.max_usec; } u32 vnic_dev_mtu(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.mtu; } void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode) { vdev->intr_mode = intr_mode; } enum vnic_dev_intr_mode vnic_dev_get_intr_mode( struct vnic_dev *vdev) { return vdev->intr_mode; } int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) { char name[NAME_MAX]; static u32 instance; struct enic_softc *softc; softc = vdev->softc; snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0); vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr; return vdev->stats == NULL ? -ENOMEM : 0; } /* * Initialize for up to VNIC_MAX_FLOW_COUNTERS */ int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev) { char name[NAME_MAX]; static u32 instance; struct enic_softc *softc; softc = vdev->softc; snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++); iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0); vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr; vdev->flow_counters_dma_active = 0; - return vdev->flow_counters == NULL ? -ENOMEM : 0; + return (vdev->flow_counters == NULL ? ENOMEM : 0); } struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, struct enic_bar_info *mem, unsigned int num_bars) { if (vnic_dev_discover_res(vdev, NULL, num_bars)) goto err_out; vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); if (!vdev->devcmd) goto err_out; return vdev; err_out: return NULL; } +static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) +{ + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + return (ENODEV); + vdev->devcmd_rtn = _vnic_dev_cmd; + + return 0; +} + +static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + int err; + unsigned int fetch_index; + + + err = 0; + + if (vdev->devcmd2) + return (0); + + vdev->devcmd2 = malloc(sizeof(*vdev->devcmd2), M_DEVBUF, + M_NOWAIT | M_ZERO); + + if (!vdev->devcmd2) { + return (ENOMEM); + } + + vdev->devcmd2->color = 1; + vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; + + err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + + if (err) { + goto err_free_devcmd2; + } + vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; + vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; + + fetch_index = ENIC_BUS_READ_4(vdev->devcmd2->wq.ctrl, TX_FETCH_INDEX); + if (fetch_index == 0xFFFFFFFF) + return (ENODEV); + + enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, + 0); + vdev->devcmd2->posted = fetch_index; + vnic_wq_enable(&vdev->devcmd2->wq); + + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_devcmd2; + + vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; + vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | + VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); + if (err) + goto err_free_devcmd2; + + vdev->devcmd_rtn = _vnic_dev_cmd2; + + return (err); + +err_free_devcmd2: + err = ENOMEM; + if (vdev->devcmd2->wq_ctrl) + vnic_wq_free(&vdev->devcmd2->wq); + if (vdev->devcmd2->result) + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + free(vdev->devcmd2, M_DEVBUF); + vdev->devcmd2 = NULL; + + return (err); +} + /* * vnic_dev_classifier: Add/Delete classifier entries * @vdev: vdev of the device * @cmd: CLSF_ADD for Add filter * CLSF_DEL for Delete filter * @entry: In case of ADD filter, the caller passes the RQ number in this * variable. * This function stores the filter_id returned by the * firmware in the same variable before return; * * In case of DEL filter, the caller passes the RQ number. Return * value is irrelevant. * @data: filter data * @action: action data */ int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) { u64 a0 = overlay; u64 a1 = config; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); } int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number) { u64 a1 = vxlan_udp_port_number; u64 a0 = overlay; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); } int vnic_dev_capable_vxlan(struct vnic_dev *vdev) { u64 a0 = VIC_FEATURE_VXLAN; u64 a1 = 0; int wait = 1000; int ret; ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ return ret == 0 && (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); } bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx) { u64 a0 = 0; u64 a1 = 0; int wait = 1000; if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait)) return false; *idx = (uint32_t)a0; return true; } bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx) { u64 a0 = idx; u64 a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1, wait) == 0; } bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx, bool reset, uint64_t *packets, uint64_t *bytes) { u64 a0 = idx; u64 a1 = reset ? 1 : 0; int wait = 1000; if (reset) { /* query/reset returns updated counters */ if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait)) return false; *packets = a0; *bytes = a1; } else { /* Get values DMA'd from the adapter */ *packets = vdev->flow_counters[idx].vcc_packets; *bytes = vdev->flow_counters[idx].vcc_bytes; } return true; } device_t dev_from_vnic_dev(struct vnic_dev *vdev) { return (vdev->softc->dev); } + +int vnic_dev_cmd_init(struct vnic_dev *vdev) { + int err; + void __iomem *res; + + res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (res) { + err = vnic_dev_init_devcmd2(vdev); + if (err) + device_printf(dev_from_vnic_dev(vdev), + "DEVCMD2 init failed, Using DEVCMD1\n"); + else + return 0; + } + + err = vnic_dev_init_devcmd1(vdev); + + return (err); +} diff --git a/sys/dev/enic/vnic_dev.h b/sys/dev/enic/vnic_dev.h index f8ca29f4e175..5e2d01d985f3 100644 --- a/sys/dev/enic/vnic_dev.h +++ b/sys/dev/enic/vnic_dev.h @@ -1,170 +1,176 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_DEV_H_ #define _VNIC_DEV_H_ #include "enic_compat.h" #include "vnic_resource.h" #include "vnic_devcmd.h" #ifndef VNIC_PADDR_TARGET #define VNIC_PADDR_TARGET 0x0000000000000000ULL #endif enum vnic_dev_intr_mode { VNIC_DEV_INTR_MODE_UNKNOWN, VNIC_DEV_INTR_MODE_INTX, VNIC_DEV_INTR_MODE_MSI, VNIC_DEV_INTR_MODE_MSIX, }; struct vnic_dev_bar { void __iomem *vaddr; unsigned long len; }; struct vnic_dev_ring { void *descs; /* vaddr */ size_t size; bus_addr_t base_addr; /* paddr */ size_t base_align; void *descs_unaligned; size_t size_unaligned; bus_addr_t base_addr_unaligned; unsigned int desc_size; unsigned int desc_count; unsigned int desc_avail; unsigned int last_count; + iflib_dma_info_t ifdip; }; struct vnic_dev_iomap_info { bus_addr_t bus_addr; unsigned long len; void __iomem *vaddr; }; struct vnic_dev; struct vnic_stats; void *vnic_dev_priv(struct vnic_dev *vdev); unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, enum vnic_res_type type); void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, bus_addr_t *dma_handle, struct iflib_dma_info *res, u8 *name), void (*free_consistent)(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle, struct iflib_dma_info *res)); void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index); uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev, enum vnic_res_type type); uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index); unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev, enum vnic_res_type type); unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size); void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait); int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait); void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index); void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf); void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev); int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info); int vnic_dev_capable_adv_filters(struct vnic_dev *vdev); int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd); int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, u8 *filter_actions); void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, bool *weak); int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev); int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value); int vnic_dev_stats_clear(struct vnic_dev *vdev); int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period, u32 num_counters); int vnic_dev_hang_notify(struct vnic_dev *vdev); int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti); int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti); int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state); int vnic_dev_notify_unset(struct vnic_dev *vdev); int vnic_dev_notify_setcmd(struct vnic_dev *vdev, void *notify_addr, bus_addr_t notify_pa, u16 intr); int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); int vnic_dev_link_status(struct vnic_dev *vdev); u32 vnic_dev_port_speed(struct vnic_dev *vdev); u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); u32 vnic_dev_mtu(struct vnic_dev *vdev); u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); u32 vnic_dev_notify_status(struct vnic_dev *vdev); u32 vnic_dev_uif(struct vnic_dev *vdev); int vnic_dev_close(struct vnic_dev *vdev); int vnic_dev_enable(struct vnic_dev *vdev); int vnic_dev_enable_wait(struct vnic_dev *vdev); int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err); int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_deinit(struct vnic_dev *vdev); void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev); int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode); enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec); u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles); u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev); int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, u8 ig_vlan_rewrite_mode); struct enic; struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, struct enic_bar_info *mem, unsigned int num_bars); struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev); -int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); +int vnic_dev_cmd_init(struct vnic_dev *vdev); int vnic_dev_get_size(void); int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op); u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_enable2(struct vnic_dev *vdev, int active); int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter_v2 *data, struct filter_action_v2 *action_v2); int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); int vnic_dev_capable_vxlan(struct vnic_dev *vdev); bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx); bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx); bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx, bool reset, uint64_t *packets, uint64_t *bytes); +void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev); device_t dev_from_vnic_dev(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/sys/dev/enic/vnic_intr.c b/sys/dev/enic/vnic_intr.c index 38e2ea6e066b..8a6494efd5f3 100644 --- a/sys/dev/enic/vnic_intr.c +++ b/sys/dev/enic/vnic_intr.c @@ -1,49 +1,49 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_intr.h" void vnic_intr_free(struct vnic_intr *intr) { intr->ctrl = NULL; } int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, unsigned int index) { intr->index = index; intr->vdev = vdev; intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { pr_err("Failed to hook INTR[%d].ctrl resource\n", index); - return -EINVAL; + return (EINVAL); } return 0; } void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer, unsigned int coalescing_type, unsigned int mask_on_assertion) { vnic_intr_coalescing_timer_set(intr, coalescing_timer); ENIC_BUS_WRITE_4(intr->ctrl, INTR_COALESCING_TYPE, coalescing_type); ENIC_BUS_WRITE_4(intr->ctrl, INTR_MASK_ON_ASSERTION, mask_on_assertion); ENIC_BUS_WRITE_4(intr->ctrl, INTR_CREDITS, 0); } void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, u32 coalescing_timer) { ENIC_BUS_WRITE_4(intr->ctrl, INTR_COALESCING_TIMER, vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev, coalescing_timer)); } void vnic_intr_clean(struct vnic_intr *intr) { ENIC_BUS_WRITE_4(intr->ctrl, INTR_CREDITS, 0); } diff --git a/sys/dev/enic/vnic_intr.h b/sys/dev/enic/vnic_intr.h index 22db66096aae..6d1e8e1cf050 100644 --- a/sys/dev/enic/vnic_intr.h +++ b/sys/dev/enic/vnic_intr.h @@ -1,100 +1,100 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_INTR_H_ #define _VNIC_INTR_H_ #include "vnic_dev.h" #define VNIC_INTR_TIMER_TYPE_ABS 0 #define VNIC_INTR_TIMER_TYPE_QUIET 1 /* Interrupt control */ struct vnic_intr_ctrl { u32 coalescing_timer; /* 0x00 */ #define INTR_COALESCING_TIMER 0x00 u32 pad0; u32 coalescing_value; /* 0x08 */ #define INTR_COALESCING_VALUE 0x08 u32 pad1; u32 coalescing_type; /* 0x10 */ #define INTR_COALESCING_TYPE 0x10 u32 pad2; u32 mask_on_assertion; /* 0x18 */ #define INTR_MASK_ON_ASSERTION 0x18 u32 pad3; u32 mask; /* 0x20 */ #define INTR_MASK 0x20 u32 pad4; u32 int_credits; /* 0x28 */ #define INTR_CREDITS 0x28 u32 pad5; u32 int_credit_return; /* 0x30 */ #define INTR_CREDIT_RETURN 0x30 u32 pad6; }; struct vnic_intr { unsigned int index; struct vnic_dev *vdev; struct vnic_res *ctrl; }; static inline void vnic_intr_mask(struct vnic_intr *intr) { ENIC_BUS_WRITE_4(intr->ctrl, INTR_MASK, 1); } static inline int vnic_intr_masked(struct vnic_intr *intr) { int ret; ret = ENIC_BUS_READ_4(intr->ctrl, INTR_MASK); return ret; } static inline void vnic_intr_unmask(struct vnic_intr *intr) { ENIC_BUS_WRITE_4(intr->ctrl, INTR_MASK, 0); } static inline void vnic_intr_return_credits(struct vnic_intr *intr, unsigned int credits, int unmask, int reset_timer) { #define VNIC_INTR_UNMASK_SHIFT 16 #define VNIC_INTR_RESET_TIMER_SHIFT 17 u32 int_credit_return = (credits & 0xffff) | (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); ENIC_BUS_WRITE_4(intr->ctrl, INTR_CREDIT_RETURN, int_credit_return); } static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) { - return ENIC_BUS_READ_4(intr->ctrl, INTR_CREDITS); + return (ENIC_BUS_READ_4(intr->ctrl, INTR_CREDITS)); } static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) { unsigned int credits = vnic_intr_credits(intr); int unmask = 1; int reset_timer = 1; vnic_intr_return_credits(intr, credits, unmask, reset_timer); } void vnic_intr_free(struct vnic_intr *intr); int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, unsigned int index); void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer, unsigned int coalescing_type, unsigned int mask_on_assertion); void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, u32 coalescing_timer); void vnic_intr_clean(struct vnic_intr *intr); #endif /* _VNIC_INTR_H_ */ diff --git a/sys/dev/enic/vnic_resource.h b/sys/dev/enic/vnic_resource.h index 184bfa7401df..d365b8d914ba 100644 --- a/sys/dev/enic/vnic_resource.h +++ b/sys/dev/enic/vnic_resource.h @@ -1,67 +1,68 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_RESOURCE_H_ #define _VNIC_RESOURCE_H_ #define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ #define VNIC_RES_VERSION 0x00000000L #define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */ #define MGMTVNIC_VERSION 0x00000000L /* The MAC address assigned to the CFG vNIC is fixed. */ #define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d } /* vNIC resource types */ enum vnic_res_type { RES_TYPE_EOL, /* End-of-list */ RES_TYPE_WQ, /* Work queues */ RES_TYPE_RQ, /* Receive queues */ RES_TYPE_CQ, /* Completion queues */ RES_TYPE_MEM, /* Window to dev memory */ RES_TYPE_NIC_CFG, /* Enet NIC config registers */ RES_TYPE_RSS_KEY, /* Enet RSS secret key */ RES_TYPE_RSS_CPU, /* Enet RSS indirection table */ RES_TYPE_TX_STATS, /* Netblock Tx statistic regs */ RES_TYPE_RX_STATS, /* Netblock Rx statistic regs */ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ RES_TYPE_DEBUG, /* Debug-only info */ RES_TYPE_DEV, /* Device-specific region */ RES_TYPE_DEVCMD, /* Device command region */ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ RES_TYPE_SUBVNIC, /* subvnic resource type */ RES_TYPE_MQ_WQ, /* MQ Work queues */ RES_TYPE_MQ_RQ, /* MQ Receive queues */ RES_TYPE_MQ_CQ, /* MQ Completion queues */ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ RES_TYPE_DEVCMD2, /* Device control region */ RES_TYPE_MAX, /* Count of resource types */ }; struct vnic_resource_header { u32 magic; u32 version; }; struct mgmt_barmap_hdr { u32 magic; /* magic number */ u32 version; /* header format version */ u16 lif; /* loopback lif for mgmt frames */ u16 pci_slot; /* installed pci slot */ char serial[16]; /* card serial number */ }; struct vnic_resource { u8 type; u8 bar; u8 pad[2]; u32 bar_offset; u32 count; }; #endif /* _VNIC_RESOURCE_H_ */ diff --git a/sys/dev/enic/vnic_rq.c b/sys/dev/enic/vnic_rq.c index 3720da5f9aa6..ef30563fa2f3 100644 --- a/sys/dev/enic/vnic_rq.c +++ b/sys/dev/enic/vnic_rq.c @@ -1,97 +1,98 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_rq.h" void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = rq->ring.desc_count; paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; ENIC_BUS_WRITE_8(rq->ctrl, RX_RING_BASE, paddr); ENIC_BUS_WRITE_4(rq->ctrl, RX_RING_SIZE, count); ENIC_BUS_WRITE_4(rq->ctrl, RX_CQ_INDEX, cq_index); ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_INTR_ENABLE, error_interrupt_enable); ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_INTR_OFFSET, error_interrupt_offset); ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_STATUS, 0); ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, fetch_index); ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, posted_index); } void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u32 fetch_index = 0; /* Use current fetch_index as the ring starting point */ fetch_index = ENIC_BUS_READ_4(rq->ctrl, RX_FETCH_INDEX); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } + fetch_index = 0; vnic_rq_init_start(rq, cq_index, fetch_index, fetch_index, error_interrupt_enable, error_interrupt_offset); rq->rxst_idx = 0; rq->tot_pkts = 0; } unsigned int vnic_rq_error_status(struct vnic_rq *rq) { - return ENIC_BUS_READ_4(rq->ctrl, RX_ERROR_STATUS); + return (ENIC_BUS_READ_4(rq->ctrl, RX_ERROR_STATUS)); } void vnic_rq_enable(struct vnic_rq *rq) { ENIC_BUS_WRITE_4(rq->ctrl, RX_ENABLE, 1); } int vnic_rq_disable(struct vnic_rq *rq) { unsigned int wait; ENIC_BUS_WRITE_4(rq->ctrl, RX_ENABLE, 0); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 1000; wait++) { if (!(ENIC_BUS_READ_4(rq->ctrl, RX_RUNNING))) return 0; udelay(10); } pr_err("Failed to disable RQ[%d]\n", rq->index); - return -ETIMEDOUT; + return (ETIMEDOUT); } void vnic_rq_clean(struct vnic_rq *rq) { u32 fetch_index; unsigned int count = rq->ring.desc_count; rq->ring.desc_avail = count - 1; rq->rx_nb_hold = 0; /* Use current fetch_index as the ring starting point */ fetch_index = ENIC_BUS_READ_4(rq->ctrl, RX_FETCH_INDEX); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, fetch_index); vnic_dev_clear_desc_ring(&rq->ring); } diff --git a/sys/dev/enic/vnic_rq.h b/sys/dev/enic/vnic_rq.h index ae8c1fdc39bd..9e3d239809c4 100644 --- a/sys/dev/enic/vnic_rq.h +++ b/sys/dev/enic/vnic_rq.h @@ -1,142 +1,141 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_RQ_H_ #define _VNIC_RQ_H_ #include "vnic_dev.h" #include "vnic_cq.h" /* Receive queue control */ struct vnic_rq_ctrl { u64 ring_base; /* 0x00 */ #define RX_RING_BASE 0x00 u32 ring_size; /* 0x08 */ #define RX_RING_SIZE 0x08 u32 pad0; u32 posted_index; /* 0x10 */ #define RX_POSTED_INDEX 0x10 u32 pad1; u32 cq_index; /* 0x18 */ #define RX_CQ_INDEX 0x18 u32 pad2; u32 enable; /* 0x20 */ #define RX_ENABLE 0x20 u32 pad3; u32 running; /* 0x28 */ #define RX_RUNNING 0x28 u32 pad4; u32 fetch_index; /* 0x30 */ #define RX_FETCH_INDEX 0x30 u32 pad5; u32 error_interrupt_enable; /* 0x38 */ #define RX_ERROR_INTR_ENABLE 0x38 u32 pad6; u32 error_interrupt_offset; /* 0x40 */ #define RX_ERROR_INTR_OFFSET 0x40 u32 pad7; u32 error_status; /* 0x48 */ #define RX_ERROR_STATUS 0x48 u32 pad8; u32 tcp_sn; /* 0x50 */ #define RX_TCP_SN 0x50 u32 pad9; u32 unused; /* 0x58 */ u32 pad10; u32 dca_select; /* 0x60 */ #define RX_DCA_SELECT 0x60 u32 pad11; u32 dca_value; /* 0x68 */ #define RX_DCA_VALUE 0x68 u32 pad12; u32 data_ring; /* 0x70 */ }; struct vnic_rq { unsigned int index; unsigned int posted_index; struct vnic_dev *vdev; struct vnic_res *ctrl; struct vnic_dev_ring ring; int num_free_mbufs; struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ unsigned int mbuf_next_idx; /* next mb to consume */ void *os_buf_head; unsigned int pkts_outstanding; uint16_t rx_nb_hold; uint16_t rx_free_thresh; unsigned int socket_id; struct rte_mempool *mp; uint16_t rxst_idx; uint32_t tot_pkts; uint8_t in_use; unsigned int max_mbufs_per_pkt; uint16_t tot_nb_desc; bool need_initial_post; struct iflib_dma_info data; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) { /* how many does SW own? */ return rq->ring.desc_avail; } static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) { /* how many does HW own? */ return rq->ring.desc_count - rq->ring.desc_avail - 1; } enum desc_return_options { VNIC_RQ_RETURN_DESC, VNIC_RQ_DEFER_RETURN_DESC, }; static inline int vnic_rq_fill(struct vnic_rq *rq, int (*buf_fill)(struct vnic_rq *rq)) { int err; while (vnic_rq_desc_avail(rq) > 0) { err = (*buf_fill)(rq); if (err) return err; } return 0; } static inline int vnic_rq_fill_count(struct vnic_rq *rq, int (*buf_fill)(struct vnic_rq *rq), unsigned int count) { int err; while ((vnic_rq_desc_avail(rq) > 0) && (count--)) { err = (*buf_fill)(rq); if (err) return err; } return 0; } void vnic_rq_free(struct vnic_rq *rq); void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset); void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset); -void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error); unsigned int vnic_rq_error_status(struct vnic_rq *rq); void vnic_rq_enable(struct vnic_rq *rq); int vnic_rq_disable(struct vnic_rq *rq); void vnic_rq_clean(struct vnic_rq *rq); #endif /* _VNIC_RQ_H_ */ diff --git a/sys/dev/enic/vnic_rss.h b/sys/dev/enic/vnic_rss.h index abd7b9f131aa..039041ece5b2 100644 --- a/sys/dev/enic/vnic_rss.h +++ b/sys/dev/enic/vnic_rss.h @@ -1,32 +1,27 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_RSS_H_ #define _VNIC_RSS_H_ /* RSS key array */ union vnic_rss_key { struct { u8 b[10]; u8 b_pad[6]; } key[4]; u64 raw[8]; }; /* RSS cpu array */ union vnic_rss_cpu { struct { u8 b[4]; u8 b_pad[4]; } cpu[32]; u64 raw[32]; }; -void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key); -void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu); -void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key); -void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu); - #endif /* _VNIC_RSS_H_ */ diff --git a/sys/dev/enic/vnic_wq.c b/sys/dev/enic/vnic_wq.c index b032df3392b2..995af3270a21 100644 --- a/sys/dev/enic/vnic_wq.c +++ b/sys/dev/enic/vnic_wq.c @@ -1,89 +1,185 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_wq.h" -void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) +{ + iflib_dma_info_t ifdip; + int err; + + if ((ifdip = malloc(sizeof(struct iflib_dma_info), + M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { + device_printf(dev_from_vnic_dev(vdev), + "Unable to allocate DMA info memory\n"); + return (ENOMEM); + } + + err = iflib_dma_alloc(vdev->softc->ctx, desc_count * desc_size, + ifdip, 0); + if (err) { + device_printf(dev_from_vnic_dev(vdev), + "Unable to allocate DEVCMD2 descriptors\n"); + err = ENOMEM; + goto err_out_alloc; + } + + ring->base_addr = ifdip->idi_paddr; + ring->descs = ifdip->idi_vaddr; + ring->ifdip = ifdip; + ring->desc_size = desc_size; + ring->desc_count = desc_count; + ring->last_count = 0; + ring->desc_avail = ring->desc_count - 1; + + ring->size = ring->desc_count * ring->desc_size; + ring->base_align = 512; + ring->size_unaligned = ring->size + ring->base_align; + + return (err); + + iflib_dma_free(ifdip); + +err_out_alloc: + free(ifdip, M_DEVBUF); + return (err); +} + +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) +{ + if (ring && ring->descs) { + iflib_dma_free(ring->ifdip); + free(ring->ifdip, M_DEVBUF); + ring->descs = NULL; + } +} + +void vnic_wq_free(struct vnic_wq *wq) { + vnic_dev_free_desc_ring(wq->vdev, &wq->ring); + wq->ctrl = NULL; +} + +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (!wq->ctrl) + return (EINVAL); + vnic_wq_disable(wq); + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + + return (err); +} + +void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + if (vdev->devcmd2) { + vnic_wq_disable(&vdev->devcmd2->wq); + if (vdev->devcmd2->wq_ctrl) + vnic_wq_free(&vdev->devcmd2->wq); + if (vdev->devcmd2->result) + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + free(vdev->devcmd2, M_DEVBUF); + vdev->devcmd2 = NULL; + } +} + +int vnic_dev_deinit(struct vnic_dev *vdev) { + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return (vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait)); + return (0); +} + +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; ENIC_BUS_WRITE_8(wq->ctrl, TX_RING_BASE, paddr); ENIC_BUS_WRITE_4(wq->ctrl, TX_RING_SIZE, count); ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, fetch_index); ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, posted_index); ENIC_BUS_WRITE_4(wq->ctrl, TX_CQ_INDEX, cq_index); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_ENABLE, error_interrupt_enable); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_OFFSET, error_interrupt_offset); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0); wq->head_idx = fetch_index; wq->tail_idx = wq->head_idx; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { - vnic_wq_init_start(wq, cq_index, 0, 0, + enic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); wq->cq_pend = 0; wq->last_completed_index = 0; } unsigned int vnic_wq_error_status(struct vnic_wq *wq) { - return ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS); + return (ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS)); } void vnic_wq_enable(struct vnic_wq *wq) { ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 1); } int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 0); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 1000; wait++) { if (!(ENIC_BUS_READ_4(wq->ctrl, TX_RUNNING))) return 0; udelay(10); } pr_err("Failed to disable WQ[%d]\n", wq->index); - return -ETIMEDOUT; + return (ETIMEDOUT); } void vnic_wq_clean(struct vnic_wq *wq) { unsigned int to_clean = wq->tail_idx; while (vnic_wq_desc_used(wq) > 0) { to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); wq->ring.desc_avail++; } wq->head_idx = 0; wq->tail_idx = 0; wq->last_completed_index = 0; ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, 0); ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, 0); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0); vnic_dev_clear_desc_ring(&wq->ring); } diff --git a/sys/dev/enic/vnic_wq.h b/sys/dev/enic/vnic_wq.h index c4f551de8441..9ef492adba24 100644 --- a/sys/dev/enic/vnic_wq.h +++ b/sys/dev/enic/vnic_wq.h @@ -1,108 +1,124 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_WQ_H_ #define _VNIC_WQ_H_ #include "vnic_dev.h" #include "vnic_cq.h" /* Work queue control */ struct vnic_wq_ctrl { u64 ring_base; /* 0x00 */ #define TX_RING_BASE 0x00 u32 ring_size; /* 0x08 */ #define TX_RING_SIZE 0x08 u32 pad0; u32 posted_index; /* 0x10 */ #define TX_POSTED_INDEX 0x10 u32 pad1; u32 cq_index; /* 0x18 */ #define TX_CQ_INDEX 0x18 u32 pad2; u32 enable; /* 0x20 */ #define TX_ENABLE 0x20 u32 pad3; u32 running; /* 0x28 */ #define TX_RUNNING 0x28 u32 pad4; u32 fetch_index; /* 0x30 */ #define TX_FETCH_INDEX 0x30 u32 pad5; u32 dca_value; /* 0x38 */ #define TX_DCA_VALUE 0x38 u32 pad6; u32 error_interrupt_enable; /* 0x40 */ #define TX_ERROR_INTR_ENABLE 0x40 u32 pad7; u32 error_interrupt_offset; /* 0x48 */ #define TX_ERROR_INTR_OFFSET 0x48 u32 pad8; u32 error_status; /* 0x50 */ #define TX_ERROR_STATUS 0x50 u32 pad9; }; struct vnic_wq { unsigned int index; uint64_t tx_offload_notsup_mask; struct vnic_dev *vdev; struct vnic_res *ctrl; struct vnic_dev_ring ring; unsigned int head_idx; unsigned int cq_pend; unsigned int tail_idx; unsigned int socket_id; unsigned int processed; const struct rte_memzone *cqmsg_rz; uint16_t last_completed_index; uint64_t offloads; }; +struct devcmd2_controller { + struct vnic_res *wq_ctrl; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; + struct vnic_dev_ring results_ring; + struct vnic_res *results_ctrl; + struct vnic_wq wq; + u32 posted; +}; + + static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) { /* how many does SW own? */ return wq->ring.desc_avail; } static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) { /* how many does HW own? */ return wq->ring.desc_count - wq->ring.desc_avail - 1; } #define PI_LOG2_CACHE_LINE_SIZE 5 #define PI_INDEX_BITS 12 #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) #define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1) #define PI_PREFETCH_LEN_OFF 16 #define PI_PREFETCH_ADDR_BITS 43 #define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1) #define PI_PREFETCH_ADDR_OFF 21 static inline uint32_t buf_idx_incr(uint32_t n_descriptors, uint32_t idx) { idx++; if (unlikely(idx == n_descriptors)) idx = 0; return idx; } void vnic_wq_free(struct vnic_wq *wq); -void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset); void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset); void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); unsigned int vnic_wq_error_status(struct vnic_wq *wq); void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq); +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); #endif /* _VNIC_WQ_H_ */