diff --git a/sys/dev/enic/if_enic.c b/sys/dev/enic/if_enic.c index 26776244778e..35620fece6bf 100644 --- a/sys/dev/enic/if_enic.c +++ b/sys/dev/enic/if_enic.c @@ -1,1733 +1,1730 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "enic.h" #include "opt_inet.h" #include "opt_inet6.h" static SYSCTL_NODE(_hw, OID_AUTO, enic, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "ENIC"); static const pci_vendor_info_t enic_vendor_info_array[] = { PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET, DRV_DESCRIPTION), PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF, DRV_DESCRIPTION " VF"), /* required last entry */ PVID_END }; static void *enic_register(device_t); static int enic_attach_pre(if_ctx_t); static int enic_msix_intr_assign(if_ctx_t, int); static int enic_attach_post(if_ctx_t); static int enic_detach(if_ctx_t); static int enic_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static int enic_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static void enic_queues_free(if_ctx_t); static int enic_rxq_intr(void *); static int enic_event_intr(void *); static int enic_err_intr(void *); static void enic_stop(if_ctx_t); static void enic_init(if_ctx_t); static void enic_multi_set(if_ctx_t); static int enic_mtu_set(if_ctx_t, uint32_t); static void enic_media_status(if_ctx_t, struct ifmediareq *); static int enic_media_change(if_ctx_t); static int enic_promisc_set(if_ctx_t, int); static uint64_t enic_get_counter(if_ctx_t, ift_counter); static void enic_update_admin_status(if_ctx_t); static void enic_txq_timer(if_ctx_t, uint16_t); static int enic_link_is_up(struct enic_softc *); static void enic_link_status(struct enic_softc *); static void enic_set_lladdr(struct enic_softc *); static void enic_setup_txq_sysctl(struct vnic_wq *, int, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void enic_setup_rxq_sysctl(struct vnic_rq *, int, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void enic_setup_sysctl(struct enic_softc *); static int enic_tx_queue_intr_enable(if_ctx_t, uint16_t); static int enic_rx_queue_intr_enable(if_ctx_t, uint16_t); static void enic_enable_intr(struct enic_softc *, int); static void enic_disable_intr(struct enic_softc *, int); static void enic_intr_enable_all(if_ctx_t); static void enic_intr_disable_all(if_ctx_t); static int enic_dev_open(struct enic *); static int enic_dev_init(struct enic *); static void *enic_alloc_consistent(void *, size_t, bus_addr_t *, struct iflib_dma_info *, u8 *); static void enic_free_consistent(void *, size_t, void *, bus_addr_t, struct iflib_dma_info *); static int enic_pci_mapping(struct enic_softc *); static void enic_pci_mapping_free(struct enic_softc *); static int enic_dev_wait(struct vnic_dev *, int (*) (struct vnic_dev *, int), int (*) (struct vnic_dev *, int *), int arg); static int enic_map_bar(struct enic_softc *, struct enic_bar_info *, int, bool); static void enic_update_packet_filter(struct enic *enic); static bool enic_if_needs_restart(if_ctx_t, enum iflib_restart_event); typedef enum { ENIC_BARRIER_RD, ENIC_BARRIER_WR, ENIC_BARRIER_RDWR, } enic_barrier_t; static device_method_t enic_methods[] = { /* Device interface */ DEVMETHOD(device_register, enic_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t enic_driver = { "enic", enic_methods, sizeof(struct enic_softc) }; DRIVER_MODULE(enic, pci, enic_driver, 0, 0); IFLIB_PNP_INFO(pci, enic, enic_vendor_info_array); MODULE_VERSION(enic, 2); MODULE_DEPEND(enic, pci, 1, 1, 1); MODULE_DEPEND(enic, ether, 1, 1, 1); MODULE_DEPEND(enic, iflib, 1, 1, 1); static device_method_t enic_iflib_methods[] = { DEVMETHOD(ifdi_tx_queues_alloc, enic_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, enic_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, enic_queues_free), DEVMETHOD(ifdi_attach_pre, enic_attach_pre), DEVMETHOD(ifdi_attach_post, enic_attach_post), DEVMETHOD(ifdi_detach, enic_detach), DEVMETHOD(ifdi_init, enic_init), DEVMETHOD(ifdi_stop, enic_stop), DEVMETHOD(ifdi_multi_set, enic_multi_set), DEVMETHOD(ifdi_mtu_set, enic_mtu_set), DEVMETHOD(ifdi_media_status, enic_media_status), DEVMETHOD(ifdi_media_change, enic_media_change), DEVMETHOD(ifdi_promisc_set, enic_promisc_set), DEVMETHOD(ifdi_get_counter, enic_get_counter), DEVMETHOD(ifdi_update_admin_status, enic_update_admin_status), DEVMETHOD(ifdi_timer, enic_txq_timer), DEVMETHOD(ifdi_tx_queue_intr_enable, enic_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, enic_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_enable, enic_intr_enable_all), DEVMETHOD(ifdi_intr_disable, enic_intr_disable_all), DEVMETHOD(ifdi_msix_intr_assign, enic_msix_intr_assign), DEVMETHOD(ifdi_needs_restart, enic_if_needs_restart), DEVMETHOD_END }; static driver_t enic_iflib_driver = { "enic", enic_iflib_methods, sizeof(struct enic_softc) }; extern struct if_txrx enic_txrx; static struct if_shared_ctx enic_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = 512, .isc_tx_maxsize = ENIC_TX_MAX_PKT_SIZE, .isc_tx_maxsegsize = PAGE_SIZE, /* * These values are used to configure the busdma tag used for receive * descriptors. Each receive descriptor only points to one buffer. */ .isc_rx_maxsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE, /* One buf per * descriptor */ .isc_rx_nsegments = 1, /* One mapping per descriptor */ .isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE, .isc_admin_intrcnt = 2, .isc_vendor_info = enic_vendor_info_array, .isc_driver_version = "1", .isc_driver = &enic_iflib_driver, .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_SKIP_MSIX, /* * Number of receive queues per receive queue set, with associated * descriptor settings for each. */ .isc_nrxqs = 2, .isc_nfl = 1, /* one free list for each receive command * queue */ .isc_nrxd_min = {16, 16}, .isc_nrxd_max = {2048, 2048}, .isc_nrxd_default = {64, 64}, /* * Number of transmit queues per transmit queue set, with associated * descriptor settings for each. */ .isc_ntxqs = 2, .isc_ntxd_min = {16, 16}, .isc_ntxd_max = {2048, 2048}, .isc_ntxd_default = {64, 64}, }; static void * enic_register(device_t dev) { return (&enic_sctx_init); } static int enic_allocate_msix(struct enic_softc *softc) { if_ctx_t ctx; if_softc_ctx_t scctx; if_shared_ctx_t sctx; device_t dev; cpuset_t cpus; int queues, vectors, requested; int err = 0; dev = softc->dev; ctx = softc->ctx; scctx = softc->scctx; sctx = iflib_get_sctx(ctx); if (bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus) != 0) { device_printf(dev, "Unable to fetch CPU list\n"); CPU_COPY(&all_cpus, &cpus); } queues = CPU_COUNT(&cpus); queues = imin(queues, scctx->isc_nrxqsets); queues = imin(queues, scctx->isc_ntxqsets); requested = queues * 2 + sctx->isc_admin_intrcnt; scctx->isc_nrxqsets = queues; scctx->isc_ntxqsets = queues; vectors = requested; if ((err = pci_alloc_msix(dev, &vectors)) != 0) { device_printf(dev, "failed to allocate %d MSI-X vectors, err: %d\n", requested, err); err = 1; goto enic_allocate_msix_out; } else { if (vectors != requested) { device_printf(dev, "Unable to allocate sufficient MSI-X vectors " "(got %d, need %d)\n", requested, vectors); pci_release_msi(dev); err = 1; goto enic_allocate_msix_out; } } device_printf(dev, "Using MSI-X interrupts with %d vectors\n", vectors); scctx->isc_intr = IFLIB_INTR_MSIX; scctx->isc_vectors = vectors; enic_allocate_msix_out: return (err); } static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { {0, 0}, /* 0 - 4 Gbps */ {0, 3}, /* 4 - 10 Gbps */ {3, 6}, /* 10 - 40 Gbps */ }; static void enic_set_rx_coal_setting(struct enic *enic) { unsigned int speed; int index = -1; struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; /* 1. Read the link speed from fw * 2. Pick the default range for the speed * 3. Update it in enic->rx_coalesce_setting */ speed = vnic_dev_port_speed(enic->vdev); if (ENIC_LINK_SPEED_10G < speed) index = ENIC_LINK_40G_INDEX; else if (ENIC_LINK_SPEED_4G < speed) index = ENIC_LINK_10G_INDEX; else index = ENIC_LINK_4G_INDEX; rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; /* Start with the value provided by UCSM */ for (index = 0; index < enic->rq_count; index++) enic->cq[index].cur_rx_coal_timeval = enic->config.intr_timer_usec; rx_coal->use_adaptive_rx_coalesce = 1; } static int enic_attach_pre(if_ctx_t ctx) { if_softc_ctx_t scctx; struct enic_softc *softc; struct vnic_dev *vdev; struct enic *enic; device_t dev; int err = -1; int rc = 0; int i; u64 a0 = 0, a1 = 0; int wait = 1000; struct vnic_stats *stats; int ret; dev = iflib_get_dev(ctx); softc = iflib_get_softc(ctx); softc->dev = dev; softc->ctx = ctx; softc->sctx = iflib_get_sctx(ctx); softc->scctx = iflib_get_softc_ctx(ctx); softc->ifp = iflib_get_ifp(ctx); softc->media = iflib_get_media(ctx); softc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * ENIC_MAX_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->mta == NULL) return (ENOMEM); scctx = softc->scctx; mtx_init(&softc->enic_lock, "ENIC Lock", NULL, MTX_DEF); pci_enable_busmaster(softc->dev); if (enic_pci_mapping(softc)) return (ENXIO); enic = &softc->enic; enic->softc = softc; vdev = &softc->vdev; vdev->softc = softc; enic->vdev = vdev; vdev->priv = enic; ENIC_LOCK(softc); vnic_dev_register(vdev, &softc->mem, 1); enic->vdev = vdev; vnic_dev_cmd_init(enic->vdev); vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); bcopy((u_int8_t *) & a0, softc->mac_addr, ETHER_ADDR_LEN); iflib_set_mac(ctx, softc->mac_addr); vnic_register_cbacks(enic->vdev, enic_alloc_consistent, enic_free_consistent); /* * Allocate the consistent memory for stats and counters upfront so * both primary and secondary processes can access them. */ ENIC_UNLOCK(softc); err = vnic_dev_alloc_stats_mem(enic->vdev); ENIC_LOCK(softc); if (err) { dev_err(enic, "Failed to allocate cmd memory, aborting\n"); goto err_out_unregister; } vnic_dev_stats_clear(enic->vdev); ret = vnic_dev_stats_dump(enic->vdev, &stats); if (ret) { dev_err(enic, "Error in getting stats\n"); goto err_out_unregister; } err = vnic_dev_alloc_counter_mem(enic->vdev); if (err) { dev_err(enic, "Failed to allocate counter memory, aborting\n"); goto err_out_unregister; } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { dev_err(enic, "vNIC dev open failed, aborting\n"); goto err_out_unregister; } /* Set ingress vlan rewrite mode before vnic initialization */ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, enic->ig_vlan_rewrite_mode); if (err) { dev_err(enic, "Failed to set ingress vlan rewrite mode, aborting.\n"); goto err_out_dev_close; } /* * Issue device init to initialize the vnic-to-switch link. We'll * start with carrier off and wait for link UP notification later to * turn on carrier. We don't need to wait here for the * vnic-to-switch link initialization to complete; link UP * notification is the indication that the process is complete. */ err = vnic_dev_init(enic->vdev, 0); if (err) { dev_err(enic, "vNIC dev init failed, aborting\n"); goto err_out_dev_close; } err = enic_dev_init(enic); if (err) { dev_err(enic, "Device initialization failed, aborting\n"); goto err_out_dev_close; } ENIC_UNLOCK(softc); enic->port_mtu = vnic_dev_mtu(enic->vdev); softc->scctx = iflib_get_softc_ctx(ctx); scctx = softc->scctx; scctx->isc_txrx = &enic_txrx; scctx->isc_capabilities = scctx->isc_capenable = \ IFCAP_HWCSUM; scctx->isc_tx_csum_flags = 0; if_setmtu(softc->ifp, enic->config.mtu); scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \ ETHER_CRC_LEN; scctx->isc_nrxqsets_max = enic->conf_rq_count; scctx->isc_ntxqsets_max = enic->conf_wq_count; scctx->isc_nrxqsets = enic->conf_rq_count; scctx->isc_ntxqsets = enic->conf_wq_count; for (i = 0; i < enic->conf_wq_count; i++) { scctx->isc_ntxd[i] = enic->config.wq_desc_count; scctx->isc_txqsizes[i] = sizeof(struct cq_enet_wq_desc) * scctx->isc_ntxd[i]; scctx->isc_ntxd[i + enic->conf_wq_count] = enic->config.wq_desc_count; scctx->isc_txqsizes[i + enic->conf_wq_count] = sizeof(struct cq_desc) * scctx->isc_ntxd[i + enic->conf_wq_count]; } for (i = 0; i < enic->conf_rq_count; i++) { scctx->isc_nrxd[i] = enic->config.rq_desc_count; scctx->isc_rxqsizes[i] = sizeof(struct cq_enet_rq_desc) * scctx->isc_nrxd[i]; scctx->isc_nrxd[i + enic->conf_rq_count] = enic->config.rq_desc_count; scctx->isc_rxqsizes[i + enic->conf_rq_count] = sizeof(struct cq_desc) * scctx->isc_nrxd[i + enic->conf_rq_count]; } scctx->isc_tx_nsegments = 31; scctx->isc_msix_bar = -1; ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_add(softc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL); /* - * Allocate the CQ here since TX is called first before RX for now - * assume RX and TX are the same + * Allocate the CQ here since TX is called first before RX. */ if (softc->enic.cq == NULL) softc->enic.cq = malloc(sizeof(struct vnic_cq) * softc->enic.wq_count + softc->enic.rq_count, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->enic.cq == NULL) return (ENOMEM); - softc->enic.cq->ntxqsets = softc->enic.wq_count + softc->enic.rq_count; - /* * Allocate the consistent memory for stats and counters upfront so * both primary and secondary processes can access them. */ err = vnic_dev_alloc_stats_mem(enic->vdev); if (err) { dev_err(enic, "Failed to allocate cmd memory, aborting\n"); goto err_out_dev_close; } err = enic_allocate_msix(softc); if (err) { dev_err(enic, "Failed to allocate MSIX, aborting\n"); goto err_out_dev_close; } return (rc); err_out_dev_close: vnic_dev_close(enic->vdev); vnic_dev_deinit_devcmd2(enic->vdev); err_out_unregister: free(softc->vdev.devcmd, M_DEVBUF); free(softc->enic.intr_queues, M_DEVBUF); free(softc->enic.cq, M_DEVBUF); free(softc->mta, M_DEVBUF); rc = -1; pci_disable_busmaster(softc->dev); enic_pci_mapping_free(softc); mtx_destroy(&softc->enic_lock); return (rc); } static int enic_msix_intr_assign(if_ctx_t ctx, int msix) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx; int error; int i; char irq_name[16]; softc = iflib_get_softc(ctx); enic = &softc->enic; scctx = softc->scctx; ENIC_LOCK(softc); vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); ENIC_UNLOCK(softc); enic->intr_queues = malloc(sizeof(*enic->intr_queues) * enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO); enic->intr = malloc(sizeof(*enic->intr) * msix, M_DEVBUF, M_NOWAIT | M_ZERO); for (i = 0; i < scctx->isc_nrxqsets; i++) { snprintf(irq_name, sizeof(irq_name), "erxq%d:%d", i, device_get_unit(softc->dev)); error = iflib_irq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, i + 1, IFLIB_INTR_RX, enic_rxq_intr, &enic->rq[i], i, irq_name); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register rxq %d interrupt handler\n", i); return (error); } enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); } for (i = scctx->isc_nrxqsets; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) { snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i - scctx->isc_nrxqsets, device_get_unit(softc->dev)); iflib_softirq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX, &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets, irq_name); enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); } i = scctx->isc_nrxqsets + scctx->isc_ntxqsets; error = iflib_irq_alloc_generic(ctx, &softc->enic_event_intr_irq, i + 1, IFLIB_INTR_ADMIN, enic_event_intr, softc, 0, "event"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register event interrupt handler\n"); return (error); } enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); i++; error = iflib_irq_alloc_generic(ctx, &softc->enic_err_intr_irq, i + 1, IFLIB_INTR_ADMIN, enic_err_intr, softc, 0, "err"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register event interrupt handler\n"); return (error); } enic->intr[i].index = i; enic->intr[i].vdev = enic->vdev; ENIC_LOCK(softc); enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL, i); vnic_intr_mask(&enic->intr[i]); ENIC_UNLOCK(softc); enic->intr_count = msix; return (0); } static void enic_free_irqs(struct enic_softc *softc) { if_softc_ctx_t scctx; struct enic *enic; int i; scctx = softc->scctx; enic = &softc->enic; for (i = 0; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) { iflib_irq_free(softc->ctx, &enic->intr_queues[i].intr_irq); } iflib_irq_free(softc->ctx, &softc->enic_event_intr_irq); iflib_irq_free(softc->ctx, &softc->enic_err_intr_irq); free(enic->intr_queues, M_DEVBUF); free(enic->intr, M_DEVBUF); } static int enic_attach_post(if_ctx_t ctx) { struct enic *enic; struct enic_softc *softc; int error = 0; softc = iflib_get_softc(ctx); enic = &softc->enic; enic_setup_sysctl(softc); enic_init_vnic_resources(enic); enic_set_rx_coal_setting(enic); enic_setup_finish(enic); ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); return (error); } static int enic_detach(if_ctx_t ctx) { struct enic_softc *softc; struct enic *enic; softc = iflib_get_softc(ctx); enic = &softc->enic; vnic_dev_notify_unset(enic->vdev); enic_free_irqs(softc); ENIC_LOCK(softc); vnic_dev_deinit(enic->vdev); vnic_dev_close(enic->vdev); vnic_dev_deinit_devcmd2(enic->vdev); free(softc->vdev.devcmd, M_DEVBUF); pci_disable_busmaster(softc->dev); enic_pci_mapping_free(softc); ENIC_UNLOCK(softc); return 0; } static int enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs, int ntxqs, int ntxqsets) { struct enic_softc *softc; int q; softc = iflib_get_softc(ctx); /* Allocate the array of transmit queues */ softc->enic.wq = malloc(sizeof(struct vnic_wq) * ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->enic.wq == NULL) return (ENOMEM); /* Initialize driver state for each transmit queue */ /* * Allocate queue state that is shared with the device. This check * and call is performed in both enic_tx_queues_alloc() and * enic_rx_queues_alloc() so that we don't have to care which order * iflib invokes those routines in. */ /* Record descriptor ring vaddrs and paddrs */ ENIC_LOCK(softc); for (q = 0; q < ntxqsets; q++) { struct vnic_wq *wq; struct vnic_cq *cq; - unsigned int cq_wq; + unsigned int cq_wq; wq = &softc->enic.wq[q]; cq_wq = enic_cq_wq(&softc->enic, q); cq = &softc->enic.cq[cq_wq]; /* Completion ring */ wq->vdev = softc->enic.vdev; wq->index = q; wq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_WQ, wq->index); vnic_wq_disable(wq); wq->ring.desc_size = sizeof(struct wq_enet_desc); wq->ring.desc_count = softc->scctx->isc_ntxd[q]; wq->ring.desc_avail = wq->ring.desc_count - 1; wq->ring.last_count = wq->ring.desc_count; wq->head_idx = 0; wq->tail_idx = 0; - wq->ring.size = wq->ring.desc_count * wq->ring.desc_size; wq->ring.descs = vaddrs[q * ntxqs + 0]; wq->ring.base_addr = paddrs[q * ntxqs + 0]; /* Command ring */ cq->vdev = softc->enic.vdev; cq->index = cq_wq; cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ, cq->index); cq->ring.desc_size = sizeof(struct cq_enet_wq_desc); cq->ring.desc_count = softc->scctx->isc_ntxd[q]; cq->ring.desc_avail = cq->ring.desc_count - 1; - cq->ring.size = cq->ring.desc_count * cq->ring.desc_size; cq->ring.descs = vaddrs[q * ntxqs + 1]; cq->ring.base_addr = paddrs[q * ntxqs + 1]; } ENIC_UNLOCK(softc); return (0); } static int enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs, int nrxqs, int nrxqsets) { struct enic_softc *softc; int q; softc = iflib_get_softc(ctx); /* Allocate the array of receive queues */ softc->enic.rq = malloc(sizeof(struct vnic_rq) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->enic.rq == NULL) return (ENOMEM); /* Initialize driver state for each receive queue */ /* * Allocate queue state that is shared with the device. This check * and call is performed in both enic_tx_queues_alloc() and * enic_rx_queues_alloc() so that we don't have to care which order * iflib invokes those routines in. */ /* Record descriptor ring vaddrs and paddrs */ ENIC_LOCK(softc); for (q = 0; q < nrxqsets; q++) { struct vnic_rq *rq; struct vnic_cq *cq; unsigned int cq_rq; rq = &softc->enic.rq[q]; cq_rq = enic_cq_rq(&softc->enic, q); cq = &softc->enic.cq[cq_rq]; /* Completion ring */ cq->vdev = softc->enic.vdev; cq->index = cq_rq; cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ, cq->index); cq->ring.desc_size = sizeof(struct cq_enet_wq_desc); cq->ring.desc_count = softc->scctx->isc_nrxd[1]; cq->ring.desc_avail = cq->ring.desc_count - 1; - cq->ring.size = cq->ring.desc_count * cq->ring.desc_size; cq->ring.descs = vaddrs[q * nrxqs + 0]; cq->ring.base_addr = paddrs[q * nrxqs + 0]; /* Command ring(s) */ rq->vdev = softc->enic.vdev; rq->index = q; rq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_RQ, rq->index); vnic_rq_disable(rq); rq->ring.desc_size = sizeof(struct rq_enet_desc); rq->ring.desc_count = softc->scctx->isc_nrxd[0]; rq->ring.desc_avail = rq->ring.desc_count - 1; - rq->ring.size = rq->ring.desc_count * rq->ring.desc_size; rq->ring.descs = vaddrs[q * nrxqs + 1]; rq->ring.base_addr = paddrs[q * nrxqs + 1]; rq->need_initial_post = true; } ENIC_UNLOCK(softc); return (0); } static void enic_queues_free(if_ctx_t ctx) { struct enic_softc *softc; softc = iflib_get_softc(ctx); free(softc->enic.rq, M_DEVBUF); free(softc->enic.wq, M_DEVBUF); free(softc->enic.cq, M_DEVBUF); } static int enic_rxq_intr(void *rxq) { struct vnic_rq *rq; if_t ifp; rq = (struct vnic_rq *)rxq; ifp = iflib_get_ifp(rq->vdev->softc->ctx); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return (FILTER_HANDLED); return (FILTER_SCHEDULE_THREAD); } static int enic_event_intr(void *vsc) { struct enic_softc *softc; struct enic *enic; uint32_t mtu; softc = vsc; enic = &softc->enic; mtu = vnic_dev_mtu(enic->vdev); if (mtu && mtu != enic->port_mtu) { enic->port_mtu = mtu; } enic_link_status(softc); return (FILTER_HANDLED); } static int enic_err_intr(void *vsc) { struct enic_softc *softc; softc = vsc; enic_stop(softc->ctx); enic_init(softc->ctx); return (FILTER_HANDLED); } static void enic_stop(if_ctx_t ctx) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx; unsigned int index; struct vnic_wq *wq; struct vnic_rq *rq; struct vnic_cq *cq; unsigned int cq_wq, cq_rq; softc = iflib_get_softc(ctx); scctx = softc->scctx; enic = &softc->enic; if (softc->stopped) return; softc->link_active = 0; softc->stopped = 1; enic_dev_disable(enic); for (index = 0; index < scctx->isc_ntxqsets; index++) { enic_stop_wq(enic, index); vnic_wq_clean(&enic->wq[index]); vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]); wq = &softc->enic.wq[index]; wq->ring.desc_avail = wq->ring.desc_count - 1; wq->ring.last_count = wq->ring.desc_count; wq->head_idx = 0; wq->tail_idx = 0; cq_wq = enic_cq_wq(&softc->enic, index); cq = &softc->enic.cq[cq_wq]; cq->ring.desc_avail = cq->ring.desc_count - 1; } for (index = 0; index < scctx->isc_nrxqsets; index++) { enic_stop_rq(enic, index); vnic_rq_clean(&enic->rq[index]); vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]); rq = &softc->enic.rq[index]; cq_rq = enic_cq_rq(&softc->enic, index); cq = &softc->enic.cq[cq_rq]; cq->ring.desc_avail = cq->ring.desc_count - 1; rq->ring.desc_avail = rq->ring.desc_count - 1; rq->need_initial_post = true; } for (index = 0; index < scctx->isc_vectors; index++) { vnic_intr_clean(&enic->intr[index]); } } static void enic_init(if_ctx_t ctx) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx; unsigned int index; softc = iflib_get_softc(ctx); scctx = softc->scctx; enic = &softc->enic; enic_init_vnic_resources(enic); for (index = 0; index < scctx->isc_ntxqsets; index++) enic_prep_wq_for_simple_tx(&softc->enic, index); for (index = 0; index < scctx->isc_ntxqsets; index++) enic_start_wq(enic, index); for (index = 0; index < scctx->isc_nrxqsets; index++) enic_start_rq(enic, index); /* Use the current MAC address. */ bcopy(if_getlladdr(softc->ifp), softc->lladdr, ETHER_ADDR_LEN); enic_set_lladdr(softc); ENIC_LOCK(softc); vnic_dev_enable_wait(enic->vdev); ENIC_UNLOCK(softc); softc->stopped = 0; enic_link_status(softc); } static void enic_del_mcast(struct enic_softc *softc) { struct enic *enic; int i; enic = &softc->enic; for (i=0; i < softc->mc_count; i++) { vnic_dev_del_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]); } softc->multicast = 0; softc->mc_count = 0; } static void enic_add_mcast(struct enic_softc *softc) { struct enic *enic; int i; enic = &softc->enic; for (i=0; i < softc->mc_count; i++) { vnic_dev_add_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]); } softc->multicast = 1; } static u_int enic_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) { uint8_t *mta = arg; if (idx == ENIC_MAX_MULTICAST_ADDRESSES) return (0); bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); return (1); } static void enic_multi_set(if_ctx_t ctx) { if_t ifp; struct enic_softc *softc; u_int count; softc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); ENIC_LOCK(softc); enic_del_mcast(softc); count = if_foreach_llmaddr(ifp, enic_copy_maddr, softc->mta); softc->mc_count = count; enic_add_mcast(softc); ENIC_UNLOCK(softc); if (if_getflags(ifp) & IFF_PROMISC) { softc->promisc = 1; } else { softc->promisc = 0; } if (if_getflags(ifp) & IFF_ALLMULTI) { softc->allmulti = 1; } else { softc->allmulti = 0; } enic_update_packet_filter(&softc->enic); } static int enic_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct enic_softc *softc; struct enic *enic; if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); softc = iflib_get_softc(ctx); enic = &softc->enic; enic_stop(softc->ctx); if (mtu > enic->port_mtu){ return (EINVAL); } enic->config.mtu = mtu; scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; enic_init(softc->ctx); return (0); } static void enic_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct enic_softc *softc; struct ifmedia_entry *next; uint32_t speed; uint64_t target_baudrate; softc = iflib_get_softc(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (enic_link_is_up(softc) != 0) { ENIC_LOCK(softc); speed = vnic_dev_port_speed(&softc->vdev); ENIC_UNLOCK(softc); target_baudrate = 1000ull * speed; LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) { if (ifmedia_baudrate(next->ifm_media) == target_baudrate) { ifmr->ifm_active |= next->ifm_media; } } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_AUTO; } else ifmr->ifm_active |= IFM_NONE; } static int enic_media_change(if_ctx_t ctx) { return (ENODEV); } static int enic_promisc_set(if_ctx_t ctx, int flags) { if_t ifp; struct enic_softc *softc; softc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); if (if_getflags(ifp) & IFF_PROMISC) { softc->promisc = 1; } else { softc->promisc = 0; } if (if_getflags(ifp) & IFF_ALLMULTI) { softc->allmulti = 1; } else { softc->allmulti = 0; } enic_update_packet_filter(&softc->enic); return (0); } static uint64_t enic_get_counter(if_ctx_t ctx, ift_counter cnt) { if_t ifp = iflib_get_ifp(ctx); if (cnt < IFCOUNTERS) return if_get_counter_default(ifp, cnt); return (0); } static void enic_update_admin_status(if_ctx_t ctx) { struct enic_softc *softc; softc = iflib_get_softc(ctx); enic_link_status(softc); } static void enic_txq_timer(if_ctx_t ctx, uint16_t qid) { struct enic_softc *softc; struct enic *enic; struct vnic_stats *stats; int ret; softc = iflib_get_softc(ctx); enic = &softc->enic; ENIC_LOCK(softc); ret = vnic_dev_stats_dump(enic->vdev, &stats); ENIC_UNLOCK(softc); if (ret) { dev_err(enic, "Error in getting stats\n"); } } static int enic_link_is_up(struct enic_softc *softc) { return (vnic_dev_link_status(&softc->vdev) == 1); } static void enic_link_status(struct enic_softc *softc) { if_ctx_t ctx; uint64_t speed; int link; ctx = softc->ctx; link = enic_link_is_up(softc); speed = IF_Gbps(10); ENIC_LOCK(softc); speed = vnic_dev_port_speed(&softc->vdev); ENIC_UNLOCK(softc); if (link != 0 && softc->link_active == 0) { softc->link_active = 1; iflib_link_state_change(ctx, LINK_STATE_UP, speed); } else if (link == 0 && softc->link_active != 0) { softc->link_active = 0; iflib_link_state_change(ctx, LINK_STATE_DOWN, speed); } } static void enic_set_lladdr(struct enic_softc *softc) { struct enic *enic; enic = &softc->enic; ENIC_LOCK(softc); vnic_dev_add_addr(enic->vdev, softc->lladdr); ENIC_UNLOCK(softc); } static void enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *txsnode; struct sysctl_oid_list *txslist; - struct vnic_stats *stats = wq[i].vdev->stats; + struct vnic_stats *stats; + + stats = wq[i].vdev->stats; txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics"); txslist = SYSCTL_CHILDREN(txsnode); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_frames_ok", CTLFLAG_RD, &stats->tx.tx_frames_ok, "TX Frames OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_frames_ok", CTLFLAG_RD, &stats->tx.tx_unicast_frames_ok, "TX unicast frames OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_frames_ok", CTLFLAG_RD, &stats->tx.tx_multicast_frames_ok, "TX multicast framse OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_frames_ok", CTLFLAG_RD, &stats->tx.tx_broadcast_frames_ok, "TX Broadcast frames OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_bytes_ok", CTLFLAG_RD, &stats->tx.tx_bytes_ok, "TX bytes OK "); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_bytes_ok", CTLFLAG_RD, &stats->tx.tx_unicast_bytes_ok, "TX unicast bytes OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_bytes_ok", CTLFLAG_RD, &stats->tx.tx_multicast_bytes_ok, "TX multicast bytes OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_bytes_ok", CTLFLAG_RD, &stats->tx.tx_broadcast_bytes_ok, "TX broadcast bytes OK"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_drops", CTLFLAG_RD, &stats->tx.tx_drops, "TX drops"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_errors", CTLFLAG_RD, &stats->tx.tx_errors, "TX errors"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_tso", CTLFLAG_RD, &stats->tx.tx_tso, "TX TSO"); } static void enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *rxsnode; struct sysctl_oid_list *rxslist; - struct vnic_stats *stats = rq[i].vdev->stats; + struct vnic_stats *stats; + + stats = rq[i].vdev->stats; rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics"); rxslist = SYSCTL_CHILDREN(rxsnode); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_ok", CTLFLAG_RD, &stats->rx.rx_frames_ok, "RX Frames OK"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_total", CTLFLAG_RD, &stats->rx.rx_frames_total, "RX frames total"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_frames_ok", CTLFLAG_RD, &stats->rx.rx_unicast_frames_ok, "RX unicast frames ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_frames_ok", CTLFLAG_RD, &stats->rx.rx_multicast_frames_ok, "RX multicast Frames ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_frames_ok", CTLFLAG_RD, &stats->rx.rx_broadcast_frames_ok, "RX broadcast frames ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_bytes_ok", CTLFLAG_RD, &stats->rx.rx_bytes_ok, "RX bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_bytes_ok", CTLFLAG_RD, &stats->rx.rx_unicast_bytes_ok, "RX unicast bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_bytes_ok", CTLFLAG_RD, &stats->rx.rx_multicast_bytes_ok, "RX multicast bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_bytes_ok", CTLFLAG_RD, &stats->rx.rx_broadcast_bytes_ok, "RX broadcast bytes ok"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_drop", CTLFLAG_RD, &stats->rx.rx_drop, "RX drop"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_errors", CTLFLAG_RD, &stats->rx.rx_errors, "RX errors"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_rss", CTLFLAG_RD, &stats->rx.rx_rss, "RX rss"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, &stats->rx.rx_crc_errors, "RX crc errors"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &stats->rx.rx_frames_64, "RX frames 64"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_127", CTLFLAG_RD, &stats->rx.rx_frames_127, "RX frames 127"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_255", CTLFLAG_RD, &stats->rx.rx_frames_255, "RX frames 255"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_511", CTLFLAG_RD, &stats->rx.rx_frames_511, "RX frames 511"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1023", CTLFLAG_RD, &stats->rx.rx_frames_1023, "RX frames 1023"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1518", CTLFLAG_RD, &stats->rx.rx_frames_1518, "RX frames 1518"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_to_max", CTLFLAG_RD, &stats->rx.rx_frames_to_max, "RX frames to max"); } static void enic_setup_queue_sysctl(struct enic_softc *softc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { enic_setup_txq_sysctl(softc->enic.wq, 0, ctx, child); enic_setup_rxq_sysctl(softc->enic.rq, 0, ctx, child); } static void enic_setup_sysctl(struct enic_softc *softc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = softc->dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); enic_setup_queue_sysctl(softc, ctx, child); } static void enic_enable_intr(struct enic_softc *softc, int irq) { struct enic *enic = &softc->enic; vnic_intr_unmask(&enic->intr[irq]); vnic_intr_return_all_credits(&enic->intr[irq]); } static void enic_disable_intr(struct enic_softc *softc, int irq) { struct enic *enic = &softc->enic; vnic_intr_mask(&enic->intr[irq]); vnic_intr_masked(&enic->intr[irq]); /* flush write */ } static int enic_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enic_softc *softc; if_softc_ctx_t scctx; softc = iflib_get_softc(ctx); scctx = softc->scctx; enic_enable_intr(softc, qid + scctx->isc_nrxqsets); return 0; } static int enic_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enic_softc *softc; softc = iflib_get_softc(ctx); enic_enable_intr(softc, qid); return 0; } static void enic_intr_enable_all(if_ctx_t ctx) { struct enic_softc *softc; if_softc_ctx_t scctx; int i; softc = iflib_get_softc(ctx); scctx = softc->scctx; for (i = 0; i < scctx->isc_vectors; i++) { enic_enable_intr(softc, i); } } static void enic_intr_disable_all(if_ctx_t ctx) { struct enic_softc *softc; if_softc_ctx_t scctx; int i; softc = iflib_get_softc(ctx); scctx = softc->scctx; /* * iflib may invoke this routine before enic_attach_post() has run, * which is before the top level shared data area is initialized and * the device made aware of it. */ for (i = 0; i < scctx->isc_vectors; i++) { enic_disable_intr(softc, i); } } static int enic_dev_open(struct enic *enic) { int err; int flags = CMD_OPENF_IG_DESCCACHE; err = enic_dev_wait(enic->vdev, vnic_dev_open, vnic_dev_open_done, flags); if (err) dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", err); return err; } static int enic_dev_init(struct enic *enic) { int err; vnic_dev_intr_coal_timer_info_default(enic->vdev); /* * Get vNIC configuration */ err = enic_get_vnic_config(enic); if (err) { dev_err(dev, "Get vNIC configuration failed, aborting\n"); return err; } /* Get available resource counts */ enic_get_res_counts(enic); /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ enic->intr_queues = malloc(sizeof(*enic->intr_queues) * enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO); vnic_dev_set_reset_flag(enic->vdev, 0); enic->max_flow_counter = -1; /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ enic->overlay_offload = false; if (enic->disable_overlay && enic->vxlan) { /* * Explicitly disable overlay offload as the setting is * sticky, and resetting vNIC does not disable it. */ if (vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE)) { dev_err(enic, "failed to disable overlay offload\n"); } else { dev_info(enic, "Overlay offload is disabled\n"); } } if (!enic->disable_overlay && enic->vxlan && /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */ vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) == 0) { enic->overlay_offload = true; enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT; dev_info(enic, "Overlay offload is enabled\n"); /* * Reset the vxlan port to the default, as the NIC firmware * does not reset it automatically and keeps the old setting. */ if (vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) { dev_err(enic, "failed to update vxlan port\n"); return (EINVAL); } } return 0; } static void * enic_alloc_consistent(void *priv, size_t size, bus_addr_t * dma_handle, struct iflib_dma_info *res, u8 * name) { void *vaddr; *dma_handle = 0; struct enic *enic = (struct enic *)priv; int rz; rz = iflib_dma_alloc(enic->softc->ctx, size, res, BUS_DMA_NOWAIT); if (rz) { pr_err("%s : Failed to allocate memory requested for %s\n", __func__, name); return NULL; } vaddr = res->idi_vaddr; *dma_handle = res->idi_paddr; return vaddr; } static void enic_free_consistent(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle, struct iflib_dma_info *res) { iflib_dma_free(res); } static int enic_pci_mapping(struct enic_softc *softc) { int rc; rc = enic_map_bar(softc, &softc->mem, 0, true); if (rc) return rc; rc = enic_map_bar(softc, &softc->io, 2, false); return rc; } static void enic_pci_mapping_free(struct enic_softc *softc) { if (softc->mem.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->mem.rid, softc->mem.res); softc->mem.res = NULL; if (softc->io.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->io.rid, softc->io.res); softc->io.res = NULL; } static int enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int), int (*finished) (struct vnic_dev *, int *), int arg) { int done; int err; int i; err = start(vdev, arg); if (err) return err; /* Wait for func to complete...2 seconds max */ for (i = 0; i < 2000; i++) { err = finished(vdev, &done); if (err) return err; if (done) return 0; usleep(1000); } return (ETIMEDOUT); } static int enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num, bool shareable) { uint32_t flag; if (bar->res != NULL) { device_printf(softc->dev, "Bar %d already mapped\n", bar_num); return (EDOOFUS); } bar->rid = PCIR_BAR(bar_num); flag = RF_ACTIVE; if (shareable) flag |= RF_SHAREABLE; if ((bar->res = bus_alloc_resource_any(softc->dev, SYS_RES_MEMORY, &bar->rid, flag)) == NULL) { device_printf(softc->dev, "PCI BAR%d mapping failure\n", bar_num); return (ENXIO); } bar->tag = rman_get_bustag(bar->res); bar->handle = rman_get_bushandle(bar->res); bar->size = rman_get_size(bar->res); return 0; } void enic_init_vnic_resources(struct enic *enic) { unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; unsigned int rxq_interrupt_enable = 0; unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; unsigned int txq_interrupt_enable = 0; unsigned int txq_interrupt_offset; unsigned int index = 0; unsigned int cq_idx; if_softc_ctx_t scctx; scctx = enic->softc->scctx; rxq_interrupt_enable = 1; txq_interrupt_enable = 0; rxq_interrupt_offset = 0; txq_interrupt_offset = scctx->isc_nrxqsets; for (index = 0; index < enic->intr_count; index++) { vnic_intr_alloc(enic->vdev, &enic->intr[index], index); } for (index = 0; index < scctx->isc_nrxqsets; index++) { cq_idx = enic_cq_rq(enic, index); vnic_rq_clean(&enic->rq[index]); vnic_rq_init(&enic->rq[index], cq_idx, error_interrupt_enable, error_interrupt_offset); vnic_cq_clean(&enic->cq[cq_idx]); vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */ , 1 /* color_enable */ , 0 /* cq_head */ , 0 /* cq_tail */ , 1 /* cq_tail_color */ , rxq_interrupt_enable, 1 /* cq_entry_enable */ , 0 /* cq_message_enable */ , rxq_interrupt_offset, 0 /* cq_message_addr */ ); if (rxq_interrupt_enable) rxq_interrupt_offset++; } for (index = 0; index < scctx->isc_ntxqsets; index++) { cq_idx = enic_cq_wq(enic, index); vnic_wq_clean(&enic->wq[index]); vnic_wq_init(&enic->wq[index], cq_idx, error_interrupt_enable, error_interrupt_offset); /* Compute unsupported ol flags for enic_prep_pkts() */ enic->wq[index].tx_offload_notsup_mask = 0; vnic_cq_clean(&enic->cq[cq_idx]); vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */ , 1 /* color_enable */ , 0 /* cq_head */ , 0 /* cq_tail */ , 1 /* cq_tail_color */ , txq_interrupt_enable, 1, 0, txq_interrupt_offset, 0 /* (u64)enic->wq[index].cqmsg_rz->iova */ ); } for (index = 0; index < enic->intr_count; index++) { vnic_intr_init(&enic->intr[index], 125, enic->config.intr_timer_type, /* mask_on_assertion */ 1); } } static void enic_update_packet_filter(struct enic *enic) { struct enic_softc *softc = enic->softc; ENIC_LOCK(softc); vnic_dev_packet_filter(enic->vdev, softc->directed, softc->multicast, softc->broadcast, softc->promisc, softc->allmulti); ENIC_UNLOCK(softc); } static bool enic_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (false); } } int enic_setup_finish(struct enic *enic) { struct enic_softc *softc = enic->softc; /* Default conf */ softc->directed = 1; softc->multicast = 0; softc->broadcast = 1; softc->promisc = 0; softc->allmulti = 1; enic_update_packet_filter(enic); return 0; } diff --git a/sys/dev/enic/vnic_cq.c b/sys/dev/enic/vnic_cq.c index 72de29e5a381..bd3629530a61 100644 --- a/sys/dev/enic/vnic_cq.c +++ b/sys/dev/enic/vnic_cq.c @@ -1,45 +1,43 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_cq.h" void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int cq_message_enable, unsigned int interrupt_offset, u64 cq_message_addr) { u64 paddr; paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; ENIC_BUS_WRITE_8(cq->ctrl, CQ_RING_BASE, paddr); ENIC_BUS_WRITE_4(cq->ctrl, CQ_RING_SIZE, cq->ring.desc_count); ENIC_BUS_WRITE_4(cq->ctrl, CQ_FLOW_CONTROL_ENABLE, flow_control_enable); ENIC_BUS_WRITE_4(cq->ctrl, CQ_COLOR_ENABLE, color_enable); ENIC_BUS_WRITE_4(cq->ctrl, CQ_HEAD, cq_head); ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL, cq_tail); ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL_COLOR, cq_tail_color); ENIC_BUS_WRITE_4(cq->ctrl, CQ_INTR_ENABLE, interrupt_enable); ENIC_BUS_WRITE_4(cq->ctrl, CQ_ENTRY_ENABLE, cq_entry_enable); ENIC_BUS_WRITE_4(cq->ctrl, CQ_MESSAGE_ENABLE, cq_message_enable); ENIC_BUS_WRITE_4(cq->ctrl, CQ_INTR_OFFSET, interrupt_offset); ENIC_BUS_WRITE_8(cq->ctrl, CQ_MESSAGE_ADDR, cq_message_addr); cq->interrupt_offset = interrupt_offset; } void vnic_cq_clean(struct vnic_cq *cq) { cq->to_clean = 0; cq->last_color = 0; ENIC_BUS_WRITE_4(cq->ctrl, CQ_HEAD, 0); ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL, 0); ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL_COLOR, 1); - - vnic_dev_clear_desc_ring(&cq->ring); } diff --git a/sys/dev/enic/vnic_cq.h b/sys/dev/enic/vnic_cq.h index b4549ee58c64..7f875d57ed74 100644 --- a/sys/dev/enic/vnic_cq.h +++ b/sys/dev/enic/vnic_cq.h @@ -1,163 +1,159 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_CQ_H_ #define _VNIC_CQ_H_ #include "cq_desc.h" #include "vnic_dev.h" /* Completion queue control */ struct vnic_cq_ctrl { u64 ring_base; /* 0x00 */ #define CQ_RING_BASE 0x00 u32 ring_size; /* 0x08 */ #define CQ_RING_SIZE 0x08 u32 pad0; u32 flow_control_enable; /* 0x10 */ #define CQ_FLOW_CONTROL_ENABLE 0x10 u32 pad1; u32 color_enable; /* 0x18 */ #define CQ_COLOR_ENABLE 0x18 u32 pad2; u32 cq_head; /* 0x20 */ #define CQ_HEAD 0x20 u32 pad3; u32 cq_tail; /* 0x28 */ #define CQ_TAIL 0x28 u32 pad4; u32 cq_tail_color; /* 0x30 */ #define CQ_TAIL_COLOR 0x30 u32 pad5; u32 interrupt_enable; /* 0x38 */ #define CQ_INTR_ENABLE 0x38 u32 pad6; u32 cq_entry_enable; /* 0x40 */ #define CQ_ENTRY_ENABLE 0x40 u32 pad7; u32 cq_message_enable; /* 0x48 */ #define CQ_MESSAGE_ENABLE 0x48 u32 pad8; u32 interrupt_offset; /* 0x50 */ #define CQ_INTR_OFFSET 0x50 u32 pad9; u64 cq_message_addr; /* 0x58 */ #define CQ_MESSAGE_ADDR 0x58 u32 pad10; }; #ifdef ENIC_AIC struct vnic_rx_bytes_counter { unsigned int small_pkt_bytes_cnt; unsigned int large_pkt_bytes_cnt; }; #endif struct vnic_cq { unsigned int index; struct vnic_dev *vdev; struct vnic_res *ctrl; struct vnic_dev_ring ring; unsigned int to_clean; unsigned int last_color; unsigned int interrupt_offset; unsigned int cur_rx_coal_timeval; unsigned int tobe_rx_coal_timeval; #ifdef ENIC_AIC struct vnic_rx_bytes_counter pkt_size_counter; unsigned int cur_rx_coal_timeval; unsigned int tobe_rx_coal_timeval; ktime_t prev_ts; #endif - int ntxqsets; - int nrxqsets; - int ntxqsets_start; - int nrxqsets_start; }; void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int message_enable, unsigned int interrupt_offset, u64 message_addr); void vnic_cq_clean(struct vnic_cq *cq); static inline unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do, int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque), void *opaque) { struct cq_desc *cq_desc; unsigned int work_done = 0; u16 q_number, completed_index; u8 type, color; cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); while (color != cq->last_color) { if ((*q_service)(cq->vdev, cq_desc, type, q_number, completed_index, opaque)) break; cq->to_clean++; if (cq->to_clean == cq->ring.desc_count) { cq->to_clean = 0; cq->last_color = cq->last_color ? 0 : 1; } cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); work_done++; if (work_done >= work_to_do) break; } return work_done; } static inline unsigned int vnic_cq_work(struct vnic_cq *cq, unsigned int work_to_do) { struct cq_desc *cq_desc; unsigned int work_avail = 0; u16 q_number, completed_index; u8 type, color; u32 to_clean, last_color; to_clean = cq->to_clean; last_color = cq->last_color; cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); while (color != last_color) { to_clean++; if (to_clean == cq->ring.desc_count) { to_clean = 0; last_color = last_color ? 0 : 1; } cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); work_avail++; if (work_avail >= work_to_do) break; } return work_avail; } #endif /* _VNIC_CQ_H_ */ diff --git a/sys/dev/enic/vnic_dev.c b/sys/dev/enic/vnic_dev.c index 2d555cb2b34d..a8228aed69aa 100644 --- a/sys/dev/enic/vnic_dev.c +++ b/sys/dev/enic/vnic_dev.c @@ -1,1214 +1,1209 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_nic.h" #include "vnic_stats.h" #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ sizeof(struct vnic_resource) * RES_TYPE_MAX) #define VNIC_RES_STRIDE 128 #define VNIC_MAX_FLOW_COUNTERS 2048 void *vnic_dev_priv(struct vnic_dev *vdev) { return vdev->priv; } void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name), void (*free_consistent)(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle,struct iflib_dma_info *res)) { vdev->alloc_consistent = alloc_consistent; vdev->free_consistent = free_consistent; } static int vnic_dev_discover_res(struct vnic_dev *vdev, struct vnic_dev_bar *bar, unsigned int num_bars) { struct enic_softc *softc = vdev->softc; struct vnic_resource_header __iomem *rh; struct mgmt_barmap_hdr __iomem *mrh; struct vnic_resource __iomem *r; int r_offset; u8 type; if (num_bars == 0) return (EINVAL); rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO); mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO); if (!rh) { pr_err("vNIC BAR0 res hdr not mem-mapped\n"); free(rh, M_DEVBUF); free(mrh, M_DEVBUF); return (EINVAL); } /* Check for mgmt vnic in addition to normal vnic */ ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4); ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4); if ((rh->magic != VNIC_RES_MAGIC) || (rh->version != VNIC_RES_VERSION)) { if ((mrh->magic != MGMTVNIC_MAGIC) || mrh->version != MGMTVNIC_VERSION) { pr_err("vNIC BAR0 res magic/version error " \ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, MGMTVNIC_MAGIC, MGMTVNIC_VERSION, rh->magic, rh->version); free(rh, M_DEVBUF); free(mrh, M_DEVBUF); return (EINVAL); } } if (mrh->magic == MGMTVNIC_MAGIC) r_offset = sizeof(*mrh); else r_offset = sizeof(*rh); r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO); ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4); while ((type = r->type) != RES_TYPE_EOL) { u8 bar_num = r->bar; u32 bar_offset =r->bar_offset; u32 count = r->count; r_offset += sizeof(*r); if (bar_num >= num_bars) continue; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: case RES_TYPE_DEVCMD2: break; default: ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4); continue; } vdev->res[type].count = count; bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem)); vdev->res[type].bar.offset = bar_offset; ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4); } free(rh, M_DEVBUF); free(mrh, M_DEVBUF); free(r, M_DEVBUF); return 0; } unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, enum vnic_res_type type) { return vdev->res[type].count; } void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index) { struct vnic_res *res; if (!vdev->res[type].bar.tag) return NULL; res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO); bcopy(&vdev->res[type], res, sizeof(*res)); switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: res->bar.offset += index * VNIC_RES_STRIDE; default: res->bar.offset += 0; } return res; } unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { /* The base address of the desc rings must be 512 byte aligned. * Descriptor count is aligned to groups of 32 descriptors. A * count of 0 means the maximum 4096 descriptors. Descriptor * size is aligned to 16 bytes. */ unsigned int count_align = 32; unsigned int desc_align = 16; ring->base_align = 512; if (desc_count == 0) desc_count = 4096; ring->desc_count = VNIC_ALIGN(desc_count, count_align); ring->desc_size = VNIC_ALIGN(desc_size, desc_align); - ring->size = ring->desc_count * ring->desc_size; - ring->size_unaligned = ring->size + ring->base_align; + ring->size_unaligned = ring->desc_count * ring->desc_size \ + + ring->base_align; return ring->size_unaligned; } -void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) -{ - memset(ring->descs, 0, ring->size); -} - static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_res __iomem *devcmd = vdev->devcmd; int delay; u32 status; int err; status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return (ENODEV); } if (status & STAT_BUSY) { pr_err("Busy devcmd %d\n", _CMD_N(cmd)); return (EBUSY); } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2); } ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) { return 0; } for (delay = 0; delay < wait; delay++) { udelay(100); status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return (ENODEV); } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0)); if (cmd != CMD_CAPABILITY) pr_err("Devcmd %d failed " \ "with error code %d\n", _CMD_N(cmd), err); return (err); } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2); } return 0; } } pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); return (ETIMEDOUT); } static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct devcmd2_controller *dc2c = vdev->devcmd2; struct devcmd2_result *result; u8 color; unsigned int i; u32 fetch_index, new_posted; int delay, err; u32 posted = dc2c->posted; fetch_index = ENIC_BUS_READ_4(dc2c->wq_ctrl, TX_FETCH_INDEX); if (fetch_index == 0xFFFFFFFF) return (ENODEV); new_posted = (posted + 1) % DEVCMD2_RING_SIZE; if (new_posted == fetch_index) { device_printf(dev_from_vnic_dev(vdev), "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", _CMD_N(cmd), fetch_index, posted); return (EBUSY); } dc2c->cmd_ring[posted].cmd = cmd; dc2c->cmd_ring[posted].flags = 0; if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) for (i = 0; i < VNIC_DEVCMD_NARGS; i++) dc2c->cmd_ring[posted].args[i] = vdev->args[i]; ENIC_BUS_WRITE_4(dc2c->wq_ctrl, TX_POSTED_INDEX, new_posted); dc2c->posted = new_posted; if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) return (0); result = dc2c->result + dc2c->next_result; color = dc2c->color; dc2c->next_result++; if (dc2c->next_result == dc2c->result_size) { dc2c->next_result = 0; dc2c->color = dc2c->color ? 0 : 1; } for (delay = 0; delay < wait; delay++) { if (result->color == color) { if (result->error) { err = result->error; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) device_printf(dev_from_vnic_dev(vdev), "Error %d devcmd %d\n", err, _CMD_N(cmd)); return (err); } if (_CMD_DIR(cmd) & _CMD_DIR_READ) for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) vdev->args[i] = result->results[i]; return 0; } udelay(100); } device_printf(dev_from_vnic_dev(vdev), "devcmd %d timed out\n", _CMD_N(cmd)); return (ETIMEDOUT); } static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) { u32 status; int err; /* * Proxy command consumes 2 arguments. One for proxy index, * the other is for command to be proxied */ if (nargs > VNIC_DEVCMD_NARGS - 2) { pr_err("number of args %d exceeds the maximum\n", nargs); return (EINVAL); } memset(vdev->args, 0, sizeof(vdev->args)); vdev->args[0] = vdev->proxy_index; vdev->args[1] = cmd; memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); if (err) return (err); status = (u32)vdev->args[0]; if (status & STAT_ERROR) { err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); return (err); } memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); return 0; } static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) { int err; if (nargs > VNIC_DEVCMD_NARGS) { pr_err("number of args %d exceeds the maximum\n", nargs); return (EINVAL); } memset(vdev->args, 0, sizeof(vdev->args)); memcpy(vdev->args, args, nargs * sizeof(args[0])); err = vdev->devcmd_rtn(vdev, cmd, wait); memcpy(args, vdev->args, nargs * sizeof(args[0])); return (err); } int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { u64 args[2]; int err; args[0] = *a0; args[1] = *a1; memset(vdev->args, 0, sizeof(vdev->args)); switch (vdev->proxy) { case PROXY_BY_INDEX: err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, args, ARRAY_SIZE(args), wait); break; case PROXY_BY_BDF: err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, args, ARRAY_SIZE(args), wait); break; case PROXY_NONE: default: err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); break; } if (err == 0) { *a0 = args[0]; *a1 = args[1]; } return (err); } int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait) { switch (vdev->proxy) { case PROXY_BY_INDEX: return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, args, nargs, wait); case PROXY_BY_BDF: return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, args, nargs, wait); case PROXY_NONE: default: return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); } } static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args, int nargs) { memset(args, 0, nargs * sizeof(*args)); args[0] = CMD_ADD_ADV_FILTER; args[1] = FILTER_CAP_MODE_V1_FLAG; return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); } int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) { u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); if (err) return 0; return (a1 >= (u32)FILTER_DPDK_1); } /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3 * value or 0 on error: * FILTER_DPDK_1- advanced filters availabile * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that * the IP layer must explicitly specified. I.e. cannot have a UDP * filter that matches both IPv4 and IPv6. * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. * all other filter types are not available. * Retrun true in filter_tags if supported */ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, u8 *filter_actions) { u64 args[4]; int err; u32 max_level = 0; err = vnic_dev_advanced_filters_cap(vdev, args, 4); /* determine supported filter actions */ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ if (args[2] == FILTER_CAP_MODE_V1) *filter_actions = args[3]; if (err || ((args[0] == 1) && (args[1] == 0))) { /* Adv filter Command not supported or adv filters available but * not enabled. Try the normal filter capability command. */ args[0] = CMD_ADD_FILTER; args[1] = 0; err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); if (err) return (err); max_level = args[1]; goto parse_max_level; } else if (args[2] == FILTER_CAP_MODE_V1) { /* parse filter capability mask in args[1] */ if (args[1] & FILTER_DPDK_1_FLAG) *mode = FILTER_DPDK_1; else if (args[1] & FILTER_USNIC_IP_FLAG) *mode = FILTER_USNIC_IP; else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) *mode = FILTER_IPV4_5TUPLE; return 0; } max_level = args[1]; parse_max_level: if (max_level >= (u32)FILTER_USNIC_IP) *mode = FILTER_USNIC_IP; else *mode = FILTER_IPV4_5TUPLE; return 0; } void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, bool *weak) { u64 a0 = CMD_NIC_CFG, a1 = 0; int wait = 1000; int err; *cfg_chk = false; *weak = false; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); if (err == 0 && a0 != 0 && a1 != 0) { *cfg_chk = true; *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); } } int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) { u64 a0 = (u32)cmd, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); return !(err || a0); } int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value) { u64 a0, a1; int wait = 1000; int err; a0 = offset; a1 = size; err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); switch (size) { case 1: *(u8 *)value = (u8)a0; break; case 2: *(u16 *)value = (u16)a0; break; case 4: *(u32 *)value = (u32)a0; break; case 8: *(u64 *)value = a0; break; default: BUG(); break; } return (err); } int vnic_dev_stats_clear(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); } int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; int rc; if (!vdev->stats) return (ENOMEM); *stats = vdev->stats; a0 = vdev->stats_res.idi_paddr; a1 = sizeof(struct vnic_stats); bus_dmamap_sync(vdev->stats_res.idi_tag, vdev->stats_res.idi_map, BUS_DMASYNC_POSTREAD); rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); bus_dmamap_sync(vdev->stats_res.idi_tag, vdev->stats_res.idi_map, BUS_DMASYNC_PREREAD); return (rc); } /* * Configure counter DMA */ int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period, u32 num_counters) { u64 args[3]; int wait = 1000; int err; if (num_counters > VNIC_MAX_FLOW_COUNTERS) return (ENOMEM); if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD || num_counters == 0)) return (EINVAL); args[0] = num_counters; args[1] = vdev->flow_counters_res.idi_paddr; args[2] = period; bus_dmamap_sync(vdev->flow_counters_res.idi_tag, vdev->flow_counters_res.idi_map, BUS_DMASYNC_POSTREAD); err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait); bus_dmamap_sync(vdev->flow_counters_res.idi_tag, vdev->flow_counters_res.idi_map, BUS_DMASYNC_PREREAD); /* record if DMAs need to be stopped on close */ if (!err) vdev->flow_counters_dma_active = (num_counters != 0 && period != 0); return (err); } int vnic_dev_close(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); } int vnic_dev_enable_wait(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); else return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); } int vnic_dev_disable(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); } int vnic_dev_open(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); } int vnic_dev_open_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); if (err) return (err); *done = (a0 == 0); return 0; } int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); if (err) return (err); for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = ((u8 *)&a0)[i]; return 0; } int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti) { u64 a0, a1 = 0; int wait = 1000; int err; a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | (multicast ? CMD_PFILTER_MULTICAST : 0) | (broadcast ? CMD_PFILTER_BROADCAST : 0) | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) pr_err("Can't set packet filter\n"); return (err); } int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], err); return (err); } int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], err); return (err); } int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, u8 ig_vlan_rewrite_mode) { u64 a0 = ig_vlan_rewrite_mode, a1 = 0; int wait = 1000; if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait); else return 0; } void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) { vdev->in_reset = state; } static inline int vnic_dev_in_reset(struct vnic_dev *vdev) { return vdev->in_reset; } int vnic_dev_notify_setcmd(struct vnic_dev *vdev, void *notify_addr, bus_addr_t notify_pa, u16 intr) { u64 a0, a1; int wait = 1000; int r; bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_PREWRITE); memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_POSTWRITE); if (!vnic_dev_in_reset(vdev)) { vdev->notify = notify_addr; vdev->notify_pa = notify_pa; } a0 = (u64)notify_pa; a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; a1 += sizeof(struct vnic_devcmd_notify); r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); if (!vnic_dev_in_reset(vdev)) vdev->notify_sz = (r == 0) ? (u32)a1 : 0; return r; } int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) { void *notify_addr = NULL; bus_addr_t notify_pa = 0; char name[NAME_MAX]; static u32 instance; if (vdev->notify || vdev->notify_pa) { return vnic_dev_notify_setcmd(vdev, vdev->notify, vdev->notify_pa, intr); } if (!vnic_dev_in_reset(vdev)) { snprintf((char *)name, sizeof(name), "vnic_notify-%u", instance++); iflib_dma_alloc(vdev->softc->ctx, sizeof(struct vnic_devcmd_notify), &vdev->notify_res, BUS_DMA_NOWAIT); notify_pa = vdev->notify_res.idi_paddr; notify_addr = vdev->notify_res.idi_vaddr; } return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); } int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; int err; a0 = 0; /* paddr = 0 to unset notify buffer */ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ a1 += sizeof(struct vnic_devcmd_notify); err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); if (!vnic_dev_in_reset(vdev)) { vdev->notify = NULL; vdev->notify_pa = 0; vdev->notify_sz = 0; } return (err); } int vnic_dev_notify_unset(struct vnic_dev *vdev) { if (vdev->notify && !vnic_dev_in_reset(vdev)) { iflib_dma_free(&vdev->notify_res); } return vnic_dev_notify_unsetcmd(vdev); } static int vnic_dev_notify_ready(struct vnic_dev *vdev) { u32 *words; unsigned int nwords = vdev->notify_sz / 4; unsigned int i; u32 csum; if (!vdev->notify || !vdev->notify_sz) return 0; do { csum = 0; bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_PREREAD); memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); bus_dmamap_sync(vdev->notify_res.idi_tag, vdev->notify_res.idi_map, BUS_DMASYNC_POSTREAD); words = (u32 *)&vdev->notify_copy; for (i = 1; i < nwords; i++) csum += words[i]; } while (csum != words[0]); return (1); } int vnic_dev_init(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; int r = 0; if (vnic_dev_capable(vdev, CMD_INIT)) r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); else { vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); if (a0 & CMD_INITF_DEFAULT_MAC) { /* Emulate these for old CMD_INIT_v1 which * didn't pass a0 so no CMD_INITF_*. */ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); } } return r; } void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) { /* Default: hardware intr coal timer is in units of 1.5 usecs */ vdev->intr_coal_timer_info.mul = 2; vdev->intr_coal_timer_info.div = 3; vdev->intr_coal_timer_info.max_usec = vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); } int vnic_dev_link_status(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.link_state; } u32 vnic_dev_port_speed(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.port_speed; } u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) { return (usec * vdev->intr_coal_timer_info.mul) / vdev->intr_coal_timer_info.div; } u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) { return (hw_cycles * vdev->intr_coal_timer_info.div) / vdev->intr_coal_timer_info.mul; } u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) { return vdev->intr_coal_timer_info.max_usec; } u32 vnic_dev_mtu(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.mtu; } void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode) { vdev->intr_mode = intr_mode; } enum vnic_dev_intr_mode vnic_dev_get_intr_mode( struct vnic_dev *vdev) { return vdev->intr_mode; } int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) { char name[NAME_MAX]; static u32 instance; struct enic_softc *softc; softc = vdev->softc; snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0); vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr; return vdev->stats == NULL ? -ENOMEM : 0; } /* * Initialize for up to VNIC_MAX_FLOW_COUNTERS */ int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev) { char name[NAME_MAX]; static u32 instance; struct enic_softc *softc; softc = vdev->softc; snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++); iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0); vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr; vdev->flow_counters_dma_active = 0; return (vdev->flow_counters == NULL ? ENOMEM : 0); } struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, struct enic_bar_info *mem, unsigned int num_bars) { if (vnic_dev_discover_res(vdev, NULL, num_bars)) goto err_out; vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); if (!vdev->devcmd) goto err_out; return vdev; err_out: return NULL; } static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) { vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); if (!vdev->devcmd) return (ENODEV); vdev->devcmd_rtn = _vnic_dev_cmd; return 0; } static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) { int err; unsigned int fetch_index; err = 0; if (vdev->devcmd2) return (0); vdev->devcmd2 = malloc(sizeof(*vdev->devcmd2), M_DEVBUF, M_NOWAIT | M_ZERO); if (!vdev->devcmd2) { return (ENOMEM); } vdev->devcmd2->color = 1; vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); if (err) { goto err_free_devcmd2; } vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; fetch_index = ENIC_BUS_READ_4(vdev->devcmd2->wq.ctrl, TX_FETCH_INDEX); if (fetch_index == 0xFFFFFFFF) return (ENODEV); enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, 0); vdev->devcmd2->posted = fetch_index; vnic_wq_enable(&vdev->devcmd2->wq); err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); if (err) goto err_free_devcmd2; vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | VNIC_PADDR_TARGET; vdev->args[1] = DEVCMD2_RING_SIZE; err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); if (err) goto err_free_devcmd2; vdev->devcmd_rtn = _vnic_dev_cmd2; return (err); err_free_devcmd2: err = ENOMEM; if (vdev->devcmd2->wq_ctrl) vnic_wq_free(&vdev->devcmd2->wq); if (vdev->devcmd2->result) vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); free(vdev->devcmd2, M_DEVBUF); vdev->devcmd2 = NULL; return (err); } /* * vnic_dev_classifier: Add/Delete classifier entries * @vdev: vdev of the device * @cmd: CLSF_ADD for Add filter * CLSF_DEL for Delete filter * @entry: In case of ADD filter, the caller passes the RQ number in this * variable. * This function stores the filter_id returned by the * firmware in the same variable before return; * * In case of DEL filter, the caller passes the RQ number. Return * value is irrelevant. * @data: filter data * @action: action data */ int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) { u64 a0 = overlay; u64 a1 = config; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); } int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number) { u64 a1 = vxlan_udp_port_number; u64 a0 = overlay; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); } int vnic_dev_capable_vxlan(struct vnic_dev *vdev) { u64 a0 = VIC_FEATURE_VXLAN; u64 a1 = 0; int wait = 1000; int ret; ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ return ret == 0 && (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); } bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx) { u64 a0 = 0; u64 a1 = 0; int wait = 1000; if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait)) return false; *idx = (uint32_t)a0; return true; } bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx) { u64 a0 = idx; u64 a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1, wait) == 0; } bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx, bool reset, uint64_t *packets, uint64_t *bytes) { u64 a0 = idx; u64 a1 = reset ? 1 : 0; int wait = 1000; if (reset) { /* query/reset returns updated counters */ if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait)) return false; *packets = a0; *bytes = a1; } else { /* Get values DMA'd from the adapter */ *packets = vdev->flow_counters[idx].vcc_packets; *bytes = vdev->flow_counters[idx].vcc_bytes; } return true; } device_t dev_from_vnic_dev(struct vnic_dev *vdev) { return (vdev->softc->dev); } int vnic_dev_cmd_init(struct vnic_dev *vdev) { int err; void __iomem *res; res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); if (res) { err = vnic_dev_init_devcmd2(vdev); if (err) device_printf(dev_from_vnic_dev(vdev), "DEVCMD2 init failed, Using DEVCMD1\n"); else return 0; } err = vnic_dev_init_devcmd1(vdev); return (err); } diff --git a/sys/dev/enic/vnic_dev.h b/sys/dev/enic/vnic_dev.h index 5e2d01d985f3..66583f4d278d 100644 --- a/sys/dev/enic/vnic_dev.h +++ b/sys/dev/enic/vnic_dev.h @@ -1,176 +1,175 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_DEV_H_ #define _VNIC_DEV_H_ #include "enic_compat.h" #include "vnic_resource.h" #include "vnic_devcmd.h" #ifndef VNIC_PADDR_TARGET #define VNIC_PADDR_TARGET 0x0000000000000000ULL #endif enum vnic_dev_intr_mode { VNIC_DEV_INTR_MODE_UNKNOWN, VNIC_DEV_INTR_MODE_INTX, VNIC_DEV_INTR_MODE_MSI, VNIC_DEV_INTR_MODE_MSIX, }; struct vnic_dev_bar { void __iomem *vaddr; unsigned long len; }; struct vnic_dev_ring { void *descs; /* vaddr */ size_t size; bus_addr_t base_addr; /* paddr */ size_t base_align; void *descs_unaligned; size_t size_unaligned; bus_addr_t base_addr_unaligned; unsigned int desc_size; unsigned int desc_count; unsigned int desc_avail; unsigned int last_count; iflib_dma_info_t ifdip; }; struct vnic_dev_iomap_info { bus_addr_t bus_addr; unsigned long len; void __iomem *vaddr; }; struct vnic_dev; struct vnic_stats; void *vnic_dev_priv(struct vnic_dev *vdev); unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, enum vnic_res_type type); void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, bus_addr_t *dma_handle, struct iflib_dma_info *res, u8 *name), void (*free_consistent)(void *priv, size_t size, void *vaddr, bus_addr_t dma_handle, struct iflib_dma_info *res)); void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index); uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev, enum vnic_res_type type); uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index); unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev, enum vnic_res_type type); unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size); -void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size); void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring); int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait); int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait); void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index); void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf); void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev); int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info); int vnic_dev_capable_adv_filters(struct vnic_dev *vdev); int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd); int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, u8 *filter_actions); void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, bool *weak); int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev); int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value); int vnic_dev_stats_clear(struct vnic_dev *vdev); int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period, u32 num_counters); int vnic_dev_hang_notify(struct vnic_dev *vdev); int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti); int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti); int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state); int vnic_dev_notify_unset(struct vnic_dev *vdev); int vnic_dev_notify_setcmd(struct vnic_dev *vdev, void *notify_addr, bus_addr_t notify_pa, u16 intr); int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); int vnic_dev_link_status(struct vnic_dev *vdev); u32 vnic_dev_port_speed(struct vnic_dev *vdev); u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); u32 vnic_dev_mtu(struct vnic_dev *vdev); u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); u32 vnic_dev_notify_status(struct vnic_dev *vdev); u32 vnic_dev_uif(struct vnic_dev *vdev); int vnic_dev_close(struct vnic_dev *vdev); int vnic_dev_enable(struct vnic_dev *vdev); int vnic_dev_enable_wait(struct vnic_dev *vdev); int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err); int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_deinit(struct vnic_dev *vdev); void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev); int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode); enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec); u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles); u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev); int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, u8 ig_vlan_rewrite_mode); struct enic; struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, struct enic_bar_info *mem, unsigned int num_bars); struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev); int vnic_dev_cmd_init(struct vnic_dev *vdev); int vnic_dev_get_size(void); int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op); u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_enable2(struct vnic_dev *vdev, int active); int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter_v2 *data, struct filter_action_v2 *action_v2); int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); int vnic_dev_capable_vxlan(struct vnic_dev *vdev); bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx); bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx); bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx, bool reset, uint64_t *packets, uint64_t *bytes); void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev); device_t dev_from_vnic_dev(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/sys/dev/enic/vnic_rq.c b/sys/dev/enic/vnic_rq.c index ef30563fa2f3..4c02347579b1 100644 --- a/sys/dev/enic/vnic_rq.c +++ b/sys/dev/enic/vnic_rq.c @@ -1,98 +1,96 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_rq.h" void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = rq->ring.desc_count; paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; ENIC_BUS_WRITE_8(rq->ctrl, RX_RING_BASE, paddr); ENIC_BUS_WRITE_4(rq->ctrl, RX_RING_SIZE, count); ENIC_BUS_WRITE_4(rq->ctrl, RX_CQ_INDEX, cq_index); ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_INTR_ENABLE, error_interrupt_enable); ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_INTR_OFFSET, error_interrupt_offset); ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_STATUS, 0); ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, fetch_index); ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, posted_index); } void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u32 fetch_index = 0; /* Use current fetch_index as the ring starting point */ fetch_index = ENIC_BUS_READ_4(rq->ctrl, RX_FETCH_INDEX); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } fetch_index = 0; vnic_rq_init_start(rq, cq_index, fetch_index, fetch_index, error_interrupt_enable, error_interrupt_offset); rq->rxst_idx = 0; rq->tot_pkts = 0; } unsigned int vnic_rq_error_status(struct vnic_rq *rq) { return (ENIC_BUS_READ_4(rq->ctrl, RX_ERROR_STATUS)); } void vnic_rq_enable(struct vnic_rq *rq) { ENIC_BUS_WRITE_4(rq->ctrl, RX_ENABLE, 1); } int vnic_rq_disable(struct vnic_rq *rq) { unsigned int wait; ENIC_BUS_WRITE_4(rq->ctrl, RX_ENABLE, 0); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 1000; wait++) { if (!(ENIC_BUS_READ_4(rq->ctrl, RX_RUNNING))) return 0; udelay(10); } pr_err("Failed to disable RQ[%d]\n", rq->index); return (ETIMEDOUT); } void vnic_rq_clean(struct vnic_rq *rq) { u32 fetch_index; unsigned int count = rq->ring.desc_count; rq->ring.desc_avail = count - 1; rq->rx_nb_hold = 0; /* Use current fetch_index as the ring starting point */ fetch_index = ENIC_BUS_READ_4(rq->ctrl, RX_FETCH_INDEX); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, fetch_index); - - vnic_dev_clear_desc_ring(&rq->ring); } diff --git a/sys/dev/enic/vnic_wq.c b/sys/dev/enic/vnic_wq.c index 995af3270a21..1d3120798798 100644 --- a/sys/dev/enic/vnic_wq.c +++ b/sys/dev/enic/vnic_wq.c @@ -1,185 +1,183 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #include "enic.h" #include "vnic_dev.h" #include "vnic_wq.h" int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { iflib_dma_info_t ifdip; int err; if ((ifdip = malloc(sizeof(struct iflib_dma_info), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev_from_vnic_dev(vdev), "Unable to allocate DMA info memory\n"); return (ENOMEM); } err = iflib_dma_alloc(vdev->softc->ctx, desc_count * desc_size, ifdip, 0); if (err) { device_printf(dev_from_vnic_dev(vdev), "Unable to allocate DEVCMD2 descriptors\n"); err = ENOMEM; goto err_out_alloc; } ring->base_addr = ifdip->idi_paddr; ring->descs = ifdip->idi_vaddr; ring->ifdip = ifdip; ring->desc_size = desc_size; ring->desc_count = desc_count; ring->last_count = 0; ring->desc_avail = ring->desc_count - 1; - ring->size = ring->desc_count * ring->desc_size; ring->base_align = 512; - ring->size_unaligned = ring->size + ring->base_align; + ring->size_unaligned = ring->desc_count * ring->desc_size \ + + ring->base_align; return (err); iflib_dma_free(ifdip); err_out_alloc: free(ifdip, M_DEVBUF); return (err); } void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) { if (ring && ring->descs) { iflib_dma_free(ring->ifdip); free(ring->ifdip, M_DEVBUF); ring->descs = NULL; } } void vnic_wq_free(struct vnic_wq *wq) { vnic_dev_free_desc_ring(wq->vdev, &wq->ring); wq->ctrl = NULL; } int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int desc_count, unsigned int desc_size) { int err; wq->index = 0; wq->vdev = vdev; wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); if (!wq->ctrl) return (EINVAL); vnic_wq_disable(wq); err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); return (err); } void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) { if (vdev->devcmd2) { vnic_wq_disable(&vdev->devcmd2->wq); if (vdev->devcmd2->wq_ctrl) vnic_wq_free(&vdev->devcmd2->wq); if (vdev->devcmd2->result) vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); free(vdev->devcmd2, M_DEVBUF); vdev->devcmd2 = NULL; } } int vnic_dev_deinit(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return (vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait)); return (0); } void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; ENIC_BUS_WRITE_8(wq->ctrl, TX_RING_BASE, paddr); ENIC_BUS_WRITE_4(wq->ctrl, TX_RING_SIZE, count); ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, fetch_index); ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, posted_index); ENIC_BUS_WRITE_4(wq->ctrl, TX_CQ_INDEX, cq_index); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_ENABLE, error_interrupt_enable); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_OFFSET, error_interrupt_offset); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0); wq->head_idx = fetch_index; wq->tail_idx = wq->head_idx; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { enic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); wq->cq_pend = 0; wq->last_completed_index = 0; } unsigned int vnic_wq_error_status(struct vnic_wq *wq) { return (ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS)); } void vnic_wq_enable(struct vnic_wq *wq) { ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 1); } int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 0); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 1000; wait++) { if (!(ENIC_BUS_READ_4(wq->ctrl, TX_RUNNING))) return 0; udelay(10); } pr_err("Failed to disable WQ[%d]\n", wq->index); return (ETIMEDOUT); } void vnic_wq_clean(struct vnic_wq *wq) { unsigned int to_clean = wq->tail_idx; while (vnic_wq_desc_used(wq) > 0) { to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); wq->ring.desc_avail++; } wq->head_idx = 0; wq->tail_idx = 0; wq->last_completed_index = 0; ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, 0); ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, 0); ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0); - - vnic_dev_clear_desc_ring(&wq->ring); }