Index: head/sys/dev/vmware/vmxnet3/if_vmx.c =================================================================== --- head/sys/dev/vmware/vmxnet3/if_vmx.c (revision 343290) +++ head/sys/dev/vmware/vmxnet3/if_vmx.c (revision 343291) @@ -1,3950 +1,2418 @@ /*- * Copyright (c) 2013 Tsubai Masanari * Copyright (c) 2013 Bryan Venteicher + * Copyright (c) 2018 Patrick Kelsey * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $ */ /* Driver for VMware vmxnet3 virtual ethernet devices. */ #include __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include +#include -#include - #include #include #include #include #include #include #include -#include - #include #include #include #include #include #include +#include "ifdi_if.h" + #include "if_vmxreg.h" #include "if_vmxvar.h" #include "opt_inet.h" #include "opt_inet6.h" -#ifdef VMXNET3_FAILPOINTS -#include -static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0, - "vmxnet3 fail points"); -#define VMXNET3_FP _debug_fail_point_vmxnet3 -#endif -static int vmxnet3_probe(device_t); -static int vmxnet3_attach(device_t); -static int vmxnet3_detach(device_t); -static int vmxnet3_shutdown(device_t); +#define VMXNET3_VMWARE_VENDOR_ID 0x15AD +#define VMXNET3_VMWARE_DEVICE_ID 0x07B0 +static pci_vendor_info_t vmxnet3_vendor_info_array[] = +{ + PVID(VMXNET3_VMWARE_VENDOR_ID, VMXNET3_VMWARE_DEVICE_ID, "VMware VMXNET3 Ethernet Adapter"), + /* required last entry */ + PVID_END +}; + +static void *vmxnet3_register(device_t); +static int vmxnet3_attach_pre(if_ctx_t); +static int vmxnet3_msix_intr_assign(if_ctx_t, int); +static void vmxnet3_free_irqs(struct vmxnet3_softc *); +static int vmxnet3_attach_post(if_ctx_t); +static int vmxnet3_detach(if_ctx_t); +static int vmxnet3_shutdown(if_ctx_t); +static int vmxnet3_suspend(if_ctx_t); +static int vmxnet3_resume(if_ctx_t); + static int vmxnet3_alloc_resources(struct vmxnet3_softc *); static void vmxnet3_free_resources(struct vmxnet3_softc *); static int vmxnet3_check_version(struct vmxnet3_softc *); -static void vmxnet3_initial_config(struct vmxnet3_softc *); -static void vmxnet3_check_multiqueue(struct vmxnet3_softc *); +static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *); -static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int, - struct vmxnet3_interrupt *); -static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *); -static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *); -static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *); -static int vmxnet3_setup_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *); +static int vmxnet3_queues_shared_alloc(struct vmxnet3_softc *); +static void vmxnet3_init_txq(struct vmxnet3_softc *, int); +static int vmxnet3_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static void vmxnet3_init_rxq(struct vmxnet3_softc *, int, int); +static int vmxnet3_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static void vmxnet3_queues_free(if_ctx_t); -static void vmxnet3_free_interrupt(struct vmxnet3_softc *, - struct vmxnet3_interrupt *); -static void vmxnet3_free_interrupts(struct vmxnet3_softc *); - -#ifndef VMXNET3_LEGACY_TX -static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *); -static void vmxnet3_start_taskqueue(struct vmxnet3_softc *); -static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *); -static void vmxnet3_free_taskqueue(struct vmxnet3_softc *); -#endif - -static int vmxnet3_init_rxq(struct vmxnet3_softc *, int); -static int vmxnet3_init_txq(struct vmxnet3_softc *, int); -static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *); -static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *); -static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *); -static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *); - static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *); static void vmxnet3_free_shared_data(struct vmxnet3_softc *); -static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *); -static void vmxnet3_free_txq_data(struct vmxnet3_softc *); -static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *); -static void vmxnet3_free_rxq_data(struct vmxnet3_softc *); -static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); -static void vmxnet3_free_queue_data(struct vmxnet3_softc *); static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); +static void vmxnet3_free_mcast_table(struct vmxnet3_softc *); static void vmxnet3_init_shared_data(struct vmxnet3_softc *); -static void vmxnet3_init_hwassist(struct vmxnet3_softc *); -static void vmxnet3_reinit_interface(struct vmxnet3_softc *); static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *); static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); static int vmxnet3_alloc_data(struct vmxnet3_softc *); static void vmxnet3_free_data(struct vmxnet3_softc *); -static int vmxnet3_setup_interface(struct vmxnet3_softc *); static void vmxnet3_evintr(struct vmxnet3_softc *); -static void vmxnet3_txq_eof(struct vmxnet3_txqueue *); -static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); -static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *); -static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, - struct vmxnet3_rxring *, int); -static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *); -static void vmxnet3_legacy_intr(void *); -static void vmxnet3_txq_intr(void *); -static void vmxnet3_rxq_intr(void *); -static void vmxnet3_event_intr(void *); +static int vmxnet3_isc_txd_encap(void *, if_pkt_info_t); +static void vmxnet3_isc_txd_flush(void *, uint16_t, qidx_t); +static int vmxnet3_isc_txd_credits_update(void *, uint16_t, bool); +static int vmxnet3_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); +static int vmxnet3_isc_rxd_pkt_get(void *, if_rxd_info_t); +static void vmxnet3_isc_rxd_refill(void *, if_rxd_update_t); +static void vmxnet3_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); +static int vmxnet3_legacy_intr(void *); +static int vmxnet3_rxq_intr(void *); +static int vmxnet3_event_intr(void *); -static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); -static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); -static void vmxnet3_stop(struct vmxnet3_softc *); +static void vmxnet3_stop(if_ctx_t); static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); -static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); -static int vmxnet3_reinit_queues(struct vmxnet3_softc *); +static void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); +static void vmxnet3_reinit_queues(struct vmxnet3_softc *); static int vmxnet3_enable_device(struct vmxnet3_softc *); static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *); -static int vmxnet3_reinit(struct vmxnet3_softc *); -static void vmxnet3_init_locked(struct vmxnet3_softc *); -static void vmxnet3_init(void *); +static void vmxnet3_init(if_ctx_t); +static void vmxnet3_multi_set(if_ctx_t); +static int vmxnet3_mtu_set(if_ctx_t, uint32_t); +static void vmxnet3_media_status(if_ctx_t, struct ifmediareq *); +static int vmxnet3_media_change(if_ctx_t); +static int vmxnet3_promisc_set(if_ctx_t, int); +static uint64_t vmxnet3_get_counter(if_ctx_t, ift_counter); +static void vmxnet3_update_admin_status(if_ctx_t); +static void vmxnet3_txq_timer(if_ctx_t, uint16_t); -static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *, - int *, int *, int *); -static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, - bus_dmamap_t, bus_dma_segment_t [], int *); -static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t); -static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **); -#ifdef VMXNET3_LEGACY_TX -static void vmxnet3_start_locked(struct ifnet *); -static void vmxnet3_start(struct ifnet *); -#else -static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *, - struct mbuf *); -static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *); -static void vmxnet3_txq_tq_deferred(void *, int); -#endif -static void vmxnet3_txq_start(struct vmxnet3_txqueue *); -static void vmxnet3_tx_start_all(struct vmxnet3_softc *); - static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int, uint16_t); -static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t); -static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t); -static void vmxnet3_set_rxfilter(struct vmxnet3_softc *); -static int vmxnet3_change_mtu(struct vmxnet3_softc *, int); -static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t); -static uint64_t vmxnet3_get_counter(struct ifnet *, ift_counter); +static void vmxnet3_vlan_register(if_ctx_t, uint16_t); +static void vmxnet3_vlan_unregister(if_ctx_t, uint16_t); +static void vmxnet3_set_rxfilter(struct vmxnet3_softc *, int); -#ifndef VMXNET3_LEGACY_TX -static void vmxnet3_qflush(struct ifnet *); -#endif - -static int vmxnet3_watchdog(struct vmxnet3_txqueue *); static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *); -static void vmxnet3_tick(void *); +static int vmxnet3_link_is_up(struct vmxnet3_softc *); static void vmxnet3_link_status(struct vmxnet3_softc *); -static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *); -static int vmxnet3_media_change(struct ifnet *); static void vmxnet3_set_lladdr(struct vmxnet3_softc *); static void vmxnet3_get_lladdr(struct vmxnet3_softc *); static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void vmxnet3_setup_sysctl(struct vmxnet3_softc *); static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t, uint32_t); static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t); static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t, uint32_t); static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t); static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t); +static int vmxnet3_tx_queue_intr_enable(if_ctx_t, uint16_t); +static int vmxnet3_rx_queue_intr_enable(if_ctx_t, uint16_t); +static void vmxnet3_link_intr_enable(if_ctx_t); static void vmxnet3_enable_intr(struct vmxnet3_softc *, int); static void vmxnet3_disable_intr(struct vmxnet3_softc *, int); -static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *); -static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); +static void vmxnet3_intr_enable_all(if_ctx_t); +static void vmxnet3_intr_disable_all(if_ctx_t); -static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, - bus_size_t, struct vmxnet3_dma_alloc *); -static void vmxnet3_dma_free(struct vmxnet3_softc *, - struct vmxnet3_dma_alloc *); -static int vmxnet3_tunable_int(struct vmxnet3_softc *, - const char *, int); - typedef enum { VMXNET3_BARRIER_RD, VMXNET3_BARRIER_WR, VMXNET3_BARRIER_RDWR, } vmxnet3_barrier_t; static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t); -/* Tunables. */ -static int vmxnet3_mq_disable = 0; -TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable); -static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES; -TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue); -static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES; -TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue); -static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC; -TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc); -static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC; -TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc); static device_method_t vmxnet3_methods[] = { - /* Device interface. */ - DEVMETHOD(device_probe, vmxnet3_probe), - DEVMETHOD(device_attach, vmxnet3_attach), - DEVMETHOD(device_detach, vmxnet3_detach), - DEVMETHOD(device_shutdown, vmxnet3_shutdown), - + /* Device interface */ + DEVMETHOD(device_register, vmxnet3_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + DEVMETHOD(device_shutdown, iflib_device_shutdown), + DEVMETHOD(device_suspend, iflib_device_suspend), + DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t vmxnet3_driver = { "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc) }; static devclass_t vmxnet3_devclass; DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0); +IFLIB_PNP_INFO(pci, vmx, vmxnet3_vendor_info_array); +MODULE_VERSION(vmx, 2); MODULE_DEPEND(vmx, pci, 1, 1, 1); MODULE_DEPEND(vmx, ether, 1, 1, 1); +MODULE_DEPEND(vmx, iflib, 1, 1, 1); -#define VMXNET3_VMWARE_VENDOR_ID 0x15AD -#define VMXNET3_VMWARE_DEVICE_ID 0x07B0 +static device_method_t vmxnet3_iflib_methods[] = { + DEVMETHOD(ifdi_tx_queues_alloc, vmxnet3_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, vmxnet3_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, vmxnet3_queues_free), -static int -vmxnet3_probe(device_t dev) -{ + DEVMETHOD(ifdi_attach_pre, vmxnet3_attach_pre), + DEVMETHOD(ifdi_attach_post, vmxnet3_attach_post), + DEVMETHOD(ifdi_detach, vmxnet3_detach), - if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID && - pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) { - device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter"); - return (BUS_PROBE_DEFAULT); - } + DEVMETHOD(ifdi_init, vmxnet3_init), + DEVMETHOD(ifdi_stop, vmxnet3_stop), + DEVMETHOD(ifdi_multi_set, vmxnet3_multi_set), + DEVMETHOD(ifdi_mtu_set, vmxnet3_mtu_set), + DEVMETHOD(ifdi_media_status, vmxnet3_media_status), + DEVMETHOD(ifdi_media_change, vmxnet3_media_change), + DEVMETHOD(ifdi_promisc_set, vmxnet3_promisc_set), + DEVMETHOD(ifdi_get_counter, vmxnet3_get_counter), + DEVMETHOD(ifdi_update_admin_status, vmxnet3_update_admin_status), + DEVMETHOD(ifdi_timer, vmxnet3_txq_timer), - return (ENXIO); + DEVMETHOD(ifdi_tx_queue_intr_enable, vmxnet3_tx_queue_intr_enable), + DEVMETHOD(ifdi_rx_queue_intr_enable, vmxnet3_rx_queue_intr_enable), + DEVMETHOD(ifdi_link_intr_enable, vmxnet3_link_intr_enable), + DEVMETHOD(ifdi_intr_enable, vmxnet3_intr_enable_all), + DEVMETHOD(ifdi_intr_disable, vmxnet3_intr_disable_all), + DEVMETHOD(ifdi_msix_intr_assign, vmxnet3_msix_intr_assign), + + DEVMETHOD(ifdi_vlan_register, vmxnet3_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, vmxnet3_vlan_unregister), + + DEVMETHOD(ifdi_shutdown, vmxnet3_shutdown), + DEVMETHOD(ifdi_suspend, vmxnet3_suspend), + DEVMETHOD(ifdi_resume, vmxnet3_resume), + + DEVMETHOD_END +}; + +static driver_t vmxnet3_iflib_driver = { + "vmx", vmxnet3_iflib_methods, sizeof(struct vmxnet3_softc) +}; + +struct if_txrx vmxnet3_txrx = { + .ift_txd_encap = vmxnet3_isc_txd_encap, + .ift_txd_flush = vmxnet3_isc_txd_flush, + .ift_txd_credits_update = vmxnet3_isc_txd_credits_update, + .ift_rxd_available = vmxnet3_isc_rxd_available, + .ift_rxd_pkt_get = vmxnet3_isc_rxd_pkt_get, + .ift_rxd_refill = vmxnet3_isc_rxd_refill, + .ift_rxd_flush = vmxnet3_isc_rxd_flush, + .ift_legacy_intr = vmxnet3_legacy_intr +}; + +static struct if_shared_ctx vmxnet3_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_q_align = 512, + + .isc_tx_maxsize = VMXNET3_TX_MAXSIZE, + .isc_tx_maxsegsize = VMXNET3_TX_MAXSEGSIZE, + .isc_tso_maxsize = VMXNET3_TSO_MAXSIZE + sizeof(struct ether_vlan_header), + .isc_tso_maxsegsize = VMXNET3_TX_MAXSEGSIZE, + + /* + * These values are used to configure the busdma tag used for + * receive descriptors. Each receive descriptor only points to one + * buffer. + */ + .isc_rx_maxsize = VMXNET3_RX_MAXSEGSIZE, /* One buf per descriptor */ + .isc_rx_nsegments = 1, /* One mapping per descriptor */ + .isc_rx_maxsegsize = VMXNET3_RX_MAXSEGSIZE, + + .isc_admin_intrcnt = 1, + .isc_vendor_info = vmxnet3_vendor_info_array, + .isc_driver_version = "2", + .isc_driver = &vmxnet3_iflib_driver, + .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ, + + /* + * Number of receive queues per receive queue set, with associated + * descriptor settings for each. + */ + .isc_nrxqs = 3, + .isc_nfl = 2, /* one free list for each receive command queue */ + .isc_nrxd_min = {VMXNET3_MIN_RX_NDESC, VMXNET3_MIN_RX_NDESC, VMXNET3_MIN_RX_NDESC}, + .isc_nrxd_max = {VMXNET3_MAX_RX_NDESC, VMXNET3_MAX_RX_NDESC, VMXNET3_MAX_RX_NDESC}, + .isc_nrxd_default = {VMXNET3_DEF_RX_NDESC, VMXNET3_DEF_RX_NDESC, VMXNET3_DEF_RX_NDESC}, + + /* + * Number of transmit queues per transmit queue set, with associated + * descriptor settings for each. + */ + .isc_ntxqs = 2, + .isc_ntxd_min = {VMXNET3_MIN_TX_NDESC, VMXNET3_MIN_TX_NDESC}, + .isc_ntxd_max = {VMXNET3_MAX_TX_NDESC, VMXNET3_MAX_TX_NDESC}, + .isc_ntxd_default = {VMXNET3_DEF_TX_NDESC, VMXNET3_DEF_TX_NDESC}, +}; + +static void * +vmxnet3_register(device_t dev) +{ + return (&vmxnet3_sctx_init); } static int -vmxnet3_attach(device_t dev) +vmxnet3_attach_pre(if_ctx_t ctx) { + device_t dev; + if_softc_ctx_t scctx; struct vmxnet3_softc *sc; + uint32_t intr_config; int error; - sc = device_get_softc(dev); + dev = iflib_get_dev(ctx); + sc = iflib_get_softc(ctx); sc->vmx_dev = dev; + sc->vmx_ctx = ctx; + sc->vmx_sctx = iflib_get_sctx(ctx); + sc->vmx_scctx = iflib_get_softc_ctx(ctx); + sc->vmx_ifp = iflib_get_ifp(ctx); + sc->vmx_media = iflib_get_media(ctx); + scctx = sc->vmx_scctx; - pci_enable_busmaster(dev); + scctx->isc_tx_nsegments = VMXNET3_TX_MAXSEGS; + scctx->isc_tx_tso_segments_max = VMXNET3_TX_MAXSEGS; + /* isc_tx_tso_size_max doesn't include possible vlan header */ + scctx->isc_tx_tso_size_max = VMXNET3_TSO_MAXSIZE; + scctx->isc_tx_tso_segsize_max = VMXNET3_TX_MAXSEGSIZE; + scctx->isc_txrx = &vmxnet3_txrx; - VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev)); - callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0); + /* If 0, the iflib tunable was not set, so set to the default */ + if (scctx->isc_nrxqsets == 0) + scctx->isc_nrxqsets = VMXNET3_DEF_RX_QUEUES; + scctx->isc_nrxqsets_max = min(VMXNET3_MAX_RX_QUEUES, mp_ncpus); - vmxnet3_initial_config(sc); + /* If 0, the iflib tunable was not set, so set to the default */ + if (scctx->isc_ntxqsets == 0) + scctx->isc_ntxqsets = VMXNET3_DEF_TX_QUEUES; + scctx->isc_ntxqsets_max = min(VMXNET3_MAX_TX_QUEUES, mp_ncpus); - error = vmxnet3_alloc_resources(sc); - if (error) - goto fail; + /* + * Enforce that the transmit completion queue descriptor count is + * the same as the transmit command queue descriptor count. + */ + scctx->isc_ntxd[0] = scctx->isc_ntxd[1]; + scctx->isc_txqsizes[0] = + sizeof(struct vmxnet3_txcompdesc) * scctx->isc_ntxd[0]; + scctx->isc_txqsizes[1] = + sizeof(struct vmxnet3_txdesc) * scctx->isc_ntxd[1]; - error = vmxnet3_check_version(sc); - if (error) - goto fail; + /* + * Enforce that the receive completion queue descriptor count is the + * sum of the receive command queue descriptor counts, and that the + * second receive command queue descriptor count is the same as the + * first one. + */ + scctx->isc_nrxd[2] = scctx->isc_nrxd[1]; + scctx->isc_nrxd[0] = scctx->isc_nrxd[1] + scctx->isc_nrxd[2]; + scctx->isc_rxqsizes[0] = + sizeof(struct vmxnet3_rxcompdesc) * scctx->isc_nrxd[0]; + scctx->isc_rxqsizes[1] = + sizeof(struct vmxnet3_rxdesc) * scctx->isc_nrxd[1]; + scctx->isc_rxqsizes[2] = + sizeof(struct vmxnet3_rxdesc) * scctx->isc_nrxd[2]; - error = vmxnet3_alloc_rxtx_queues(sc); - if (error) - goto fail; + scctx->isc_rss_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE; -#ifndef VMXNET3_LEGACY_TX - error = vmxnet3_alloc_taskqueue(sc); + /* Map PCI BARs */ + error = vmxnet3_alloc_resources(sc); if (error) goto fail; -#endif - error = vmxnet3_alloc_interrupts(sc); + /* Check device versions */ + error = vmxnet3_check_version(sc); if (error) goto fail; - vmxnet3_check_multiqueue(sc); + /* + * The interrupt mode can be set in the hypervisor configuration via + * the parameter ethernet.intrMode. + */ + intr_config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); + sc->vmx_intr_mask_mode = (intr_config >> 2) & 0x03; - error = vmxnet3_alloc_data(sc); - if (error) - goto fail; + /* + * Configure the softc context to attempt to configure the interrupt + * mode now indicated by intr_config. iflib will follow the usual + * fallback path MSIX -> MSI -> LEGACY, starting at the configured + * starting mode. + */ + switch (intr_config & 0x03) { + case VMXNET3_IT_AUTO: + case VMXNET3_IT_MSIX: + scctx->isc_msix_bar = pci_msix_table_bar(dev); + break; + case VMXNET3_IT_MSI: + scctx->isc_msix_bar = -1; + scctx->isc_disable_msix = 1; + break; + case VMXNET3_IT_LEGACY: + scctx->isc_msix_bar = 0; + break; + } - error = vmxnet3_setup_interface(sc); - if (error) - goto fail; + scctx->isc_tx_csum_flags = VMXNET3_CSUM_ALL_OFFLOAD; + scctx->isc_capabilities = scctx->isc_capenable = + IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | + IFCAP_TSO4 | IFCAP_TSO6 | + IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | + IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | + IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | + IFCAP_JUMBO_MTU; - error = vmxnet3_setup_interrupts(sc); - if (error) { - ether_ifdetach(sc->vmx_ifp); - device_printf(dev, "could not set up interrupt\n"); - goto fail; - } + /* These capabilities are not enabled by default. */ + scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; - vmxnet3_setup_sysctl(sc); -#ifndef VMXNET3_LEGACY_TX - vmxnet3_start_taskqueue(sc); -#endif + vmxnet3_get_lladdr(sc); + iflib_set_mac(ctx, sc->vmx_lladdr); + return (0); fail: - if (error) - vmxnet3_detach(dev); + /* + * We must completely clean up anything allocated above as iflib + * will not invoke any other driver entry points as a result of this + * failure. + */ + vmxnet3_free_resources(sc); return (error); } static int -vmxnet3_detach(device_t dev) +vmxnet3_msix_intr_assign(if_ctx_t ctx, int msix) { struct vmxnet3_softc *sc; - struct ifnet *ifp; + if_softc_ctx_t scctx; + struct vmxnet3_rxqueue *rxq; + int error; + int i; + char irq_name[16]; - sc = device_get_softc(dev); - ifp = sc->vmx_ifp; + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_nrxqsets; i++) { + snprintf(irq_name, sizeof(irq_name), "rxq%d", i); - if (device_is_attached(dev)) { - VMXNET3_CORE_LOCK(sc); - vmxnet3_stop(sc); - VMXNET3_CORE_UNLOCK(sc); + rxq = &sc->vmx_rxq[i]; + error = iflib_irq_alloc_generic(ctx, &rxq->vxrxq_irq, i + 1, + IFLIB_INTR_RX, vmxnet3_rxq_intr, rxq, i, irq_name); + if (error) { + device_printf(iflib_get_dev(ctx), + "Failed to register rxq %d interrupt handler\n", i); + return (error); + } + } - callout_drain(&sc->vmx_tick); -#ifndef VMXNET3_LEGACY_TX - vmxnet3_drain_taskqueue(sc); -#endif + for (i = 0; i < scctx->isc_ntxqsets; i++) { + snprintf(irq_name, sizeof(irq_name), "txq%d", i); - ether_ifdetach(ifp); + /* + * Don't provide the corresponding rxq irq for reference - + * we want the transmit task to be attached to a task queue + * that is different from the one used by the corresponding + * rxq irq. That is because the TX doorbell writes are very + * expensive as virtualized MMIO operations, so we want to + * be able to defer them to another core when possible so + * that they don't steal receive processing cycles during + * stack turnarounds like TCP ACK generation. The other + * piece to this approach is enabling the iflib abdicate + * option (currently via an interface-specific + * tunable/sysctl). + */ + iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, + irq_name); } - if (sc->vmx_vlan_attach != NULL) { - EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach); - sc->vmx_vlan_attach = NULL; + error = iflib_irq_alloc_generic(ctx, &sc->vmx_event_intr_irq, + scctx->isc_nrxqsets + 1, IFLIB_INTR_ADMIN, vmxnet3_event_intr, sc, 0, + "event"); + if (error) { + device_printf(iflib_get_dev(ctx), + "Failed to register event interrupt handler\n"); + return (error); } - if (sc->vmx_vlan_detach != NULL) { - EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach); - sc->vmx_vlan_detach = NULL; - } -#ifndef VMXNET3_LEGACY_TX - vmxnet3_free_taskqueue(sc); -#endif - vmxnet3_free_interrupts(sc); + return (0); +} - if (ifp != NULL) { - if_free(ifp); - sc->vmx_ifp = NULL; +static void +vmxnet3_free_irqs(struct vmxnet3_softc *sc) +{ + if_softc_ctx_t scctx; + struct vmxnet3_rxqueue *rxq; + int i; + + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_nrxqsets; i++) { + rxq = &sc->vmx_rxq[i]; + iflib_irq_free(sc->vmx_ctx, &rxq->vxrxq_irq); } - ifmedia_removeall(&sc->vmx_media); + iflib_irq_free(sc->vmx_ctx, &sc->vmx_event_intr_irq); +} +static int +vmxnet3_attach_post(if_ctx_t ctx) +{ + device_t dev; + if_softc_ctx_t scctx; + struct vmxnet3_softc *sc; + int error; + + dev = iflib_get_dev(ctx); + scctx = iflib_get_softc_ctx(ctx); + sc = iflib_get_softc(ctx); + + if (scctx->isc_nrxqsets > 1) + sc->vmx_flags |= VMXNET3_FLAG_RSS; + + error = vmxnet3_alloc_data(sc); + if (error) + goto fail; + + vmxnet3_set_interrupt_idx(sc); + vmxnet3_setup_sysctl(sc); + + ifmedia_add(sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(sc->vmx_media, IFM_ETHER | IFM_AUTO); + +fail: + return (error); +} + +static int +vmxnet3_detach(if_ctx_t ctx) +{ + struct vmxnet3_softc *sc; + + sc = iflib_get_softc(ctx); + + vmxnet3_free_irqs(sc); vmxnet3_free_data(sc); vmxnet3_free_resources(sc); - vmxnet3_free_rxtx_queues(sc); - VMXNET3_CORE_LOCK_DESTROY(sc); + return (0); +} +static int +vmxnet3_shutdown(if_ctx_t ctx) +{ + return (0); } static int -vmxnet3_shutdown(device_t dev) +vmxnet3_suspend(if_ctx_t ctx) { return (0); } static int +vmxnet3_resume(if_ctx_t ctx) +{ + + return (0); +} + +static int vmxnet3_alloc_resources(struct vmxnet3_softc *sc) { device_t dev; int rid; dev = sc->vmx_dev; rid = PCIR_BAR(0); sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->vmx_res0 == NULL) { device_printf(dev, "could not map BAR0 memory\n"); return (ENXIO); } sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0); sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0); rid = PCIR_BAR(1); sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->vmx_res1 == NULL) { device_printf(dev, "could not map BAR1 memory\n"); return (ENXIO); } sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1); sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1); - if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { - rid = PCIR_BAR(2); - sc->vmx_msix_res = bus_alloc_resource_any(dev, - SYS_RES_MEMORY, &rid, RF_ACTIVE); - } - - if (sc->vmx_msix_res == NULL) - sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX; - return (0); } static void vmxnet3_free_resources(struct vmxnet3_softc *sc) { device_t dev; int rid; dev = sc->vmx_dev; if (sc->vmx_res0 != NULL) { rid = PCIR_BAR(0); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0); sc->vmx_res0 = NULL; } if (sc->vmx_res1 != NULL) { rid = PCIR_BAR(1); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1); sc->vmx_res1 = NULL; } - - if (sc->vmx_msix_res != NULL) { - rid = PCIR_BAR(2); - bus_release_resource(dev, SYS_RES_MEMORY, rid, - sc->vmx_msix_res); - sc->vmx_msix_res = NULL; - } } static int vmxnet3_check_version(struct vmxnet3_softc *sc) { device_t dev; uint32_t version; dev = sc->vmx_dev; version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS); if ((version & 0x01) == 0) { device_printf(dev, "unsupported hardware version %#x\n", version); return (ENOTSUP); } vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1); version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS); if ((version & 0x01) == 0) { device_printf(dev, "unsupported UPT version %#x\n", version); return (ENOTSUP); } vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1); return (0); } -static int -trunc_powerof2(int val) -{ - - return (1U << (fls(val) - 1)); -} - static void -vmxnet3_initial_config(struct vmxnet3_softc *sc) -{ - int nqueue, ndesc; - - nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue); - if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1) - nqueue = VMXNET3_DEF_TX_QUEUES; - if (nqueue > mp_ncpus) - nqueue = mp_ncpus; - sc->vmx_max_ntxqueues = trunc_powerof2(nqueue); - - nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue); - if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1) - nqueue = VMXNET3_DEF_RX_QUEUES; - if (nqueue > mp_ncpus) - nqueue = mp_ncpus; - sc->vmx_max_nrxqueues = trunc_powerof2(nqueue); - - if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) { - sc->vmx_max_nrxqueues = 1; - sc->vmx_max_ntxqueues = 1; - } - - ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc); - if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC) - ndesc = VMXNET3_DEF_TX_NDESC; - if (ndesc & VMXNET3_MASK_TX_NDESC) - ndesc &= ~VMXNET3_MASK_TX_NDESC; - sc->vmx_ntxdescs = ndesc; - - ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc); - if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC) - ndesc = VMXNET3_DEF_RX_NDESC; - if (ndesc & VMXNET3_MASK_RX_NDESC) - ndesc &= ~VMXNET3_MASK_RX_NDESC; - sc->vmx_nrxdescs = ndesc; - sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS; -} - -static void -vmxnet3_check_multiqueue(struct vmxnet3_softc *sc) -{ - - if (sc->vmx_intr_type != VMXNET3_IT_MSIX) - goto out; - - /* BMV: Just use the maximum configured for now. */ - sc->vmx_nrxqueues = sc->vmx_max_nrxqueues; - sc->vmx_ntxqueues = sc->vmx_max_ntxqueues; - - if (sc->vmx_nrxqueues > 1) - sc->vmx_flags |= VMXNET3_FLAG_RSS; - - return; - -out: - sc->vmx_ntxqueues = 1; - sc->vmx_nrxqueues = 1; -} - -static int -vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - int nmsix, cnt, required; - - dev = sc->vmx_dev; - - if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) - return (1); - - /* Allocate an additional vector for the events interrupt. */ - required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1; - - nmsix = pci_msix_count(dev); - if (nmsix < required) - return (1); - - cnt = required; - if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { - sc->vmx_nintrs = required; - return (0); - } else - pci_release_msi(dev); - - /* BMV TODO Fallback to sharing MSIX vectors if possible. */ - - return (1); -} - -static int -vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - int nmsi, cnt, required; - - dev = sc->vmx_dev; - required = 1; - - nmsi = pci_msi_count(dev); - if (nmsi < required) - return (1); - - cnt = required; - if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) { - sc->vmx_nintrs = 1; - return (0); - } else - pci_release_msi(dev); - - return (1); -} - -static int -vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc) -{ - - sc->vmx_nintrs = 1; - return (0); -} - -static int -vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags, - struct vmxnet3_interrupt *intr) -{ - struct resource *irq; - - irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags); - if (irq == NULL) - return (ENXIO); - - intr->vmxi_irq = irq; - intr->vmxi_rid = rid; - - return (0); -} - -static int -vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc) -{ - int i, rid, flags, error; - - rid = 0; - flags = RF_ACTIVE; - - if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) - flags |= RF_SHAREABLE; - else - rid = 1; - - for (i = 0; i < sc->vmx_nintrs; i++, rid++) { - error = vmxnet3_alloc_interrupt(sc, rid, flags, - &sc->vmx_intrs[i]); - if (error) - return (error); - } - - return (0); -} - -static int -vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_txqueue *txq; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_interrupt *intr; - enum intr_type type; - int i, error; - - dev = sc->vmx_dev; - intr = &sc->vmx_intrs[0]; - type = INTR_TYPE_NET | INTR_MPSAFE; - - for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) { - txq = &sc->vmx_txq[i]; - error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, - vmxnet3_txq_intr, txq, &intr->vmxi_handler); - if (error) - return (error); - bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, - "tq%d", i); - txq->vxtxq_intr_idx = intr->vmxi_rid - 1; - } - - for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) { - rxq = &sc->vmx_rxq[i]; - error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, - vmxnet3_rxq_intr, rxq, &intr->vmxi_handler); - if (error) - return (error); - bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, - "rq%d", i); - rxq->vxrxq_intr_idx = intr->vmxi_rid - 1; - } - - error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, - vmxnet3_event_intr, sc, &intr->vmxi_handler); - if (error) - return (error); - bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event"); - sc->vmx_event_intr_idx = intr->vmxi_rid - 1; - - return (0); -} - -static int -vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc) -{ - struct vmxnet3_interrupt *intr; - int i, error; - - intr = &sc->vmx_intrs[0]; - error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq, - INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc, - &intr->vmxi_handler); - - for (i = 0; i < sc->vmx_ntxqueues; i++) - sc->vmx_txq[i].vxtxq_intr_idx = 0; - for (i = 0; i < sc->vmx_nrxqueues; i++) - sc->vmx_rxq[i].vxrxq_intr_idx = 0; - sc->vmx_event_intr_idx = 0; - - return (error); -} - -static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc) { + if_softc_ctx_t scctx; struct vmxnet3_txqueue *txq; struct vmxnet3_txq_shared *txs; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxq_shared *rxs; + int intr_idx; int i; - sc->vmx_ds->evintr = sc->vmx_event_intr_idx; + scctx = sc->vmx_scctx; - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - txs = txq->vxtxq_ts; - txs->intr_idx = txq->vxtxq_intr_idx; - } + /* + * There is either one interrupt, or there is one interrupt per + * receive queue. If there is one interrupt, then all interrupt + * indexes are zero. If there is one interrupt per receive queue, + * the transmit queue interrupt indexes are assigned the receive + * queue interrupt indexesin round-robin fashion. + * + * The event interrupt is always the last interrupt index. + */ + sc->vmx_event_intr_idx = scctx->isc_vectors - 1; - for (i = 0; i < sc->vmx_nrxqueues; i++) { + intr_idx = 0; + for (i = 0; i < scctx->isc_nrxqsets; i++, intr_idx++) { rxq = &sc->vmx_rxq[i]; rxs = rxq->vxrxq_rs; + rxq->vxrxq_intr_idx = intr_idx; rxs->intr_idx = rxq->vxrxq_intr_idx; } -} -static int -vmxnet3_setup_interrupts(struct vmxnet3_softc *sc) -{ - int error; - - error = vmxnet3_alloc_intr_resources(sc); - if (error) - return (error); - - switch (sc->vmx_intr_type) { - case VMXNET3_IT_MSIX: - error = vmxnet3_setup_msix_interrupts(sc); - break; - case VMXNET3_IT_MSI: - case VMXNET3_IT_LEGACY: - error = vmxnet3_setup_legacy_interrupt(sc); - break; - default: - panic("%s: invalid interrupt type %d", __func__, - sc->vmx_intr_type); + /* + * Assign the tx queues interrupt indexes above what we are actually + * using. These interrupts will never be enabled. + */ + intr_idx = scctx->isc_vectors; + for (i = 0; i < scctx->isc_ntxqsets; i++, intr_idx++) { + txq = &sc->vmx_txq[i]; + txs = txq->vxtxq_ts; + txq->vxtxq_intr_idx = intr_idx; + txs->intr_idx = txq->vxtxq_intr_idx; } - - if (error == 0) - vmxnet3_set_interrupt_idx(sc); - - return (error); } static int -vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc) +vmxnet3_queues_shared_alloc(struct vmxnet3_softc *sc) { - device_t dev; - uint32_t config; + if_softc_ctx_t scctx; + int size; int error; + + scctx = sc->vmx_scctx; - dev = sc->vmx_dev; - config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); - - sc->vmx_intr_type = config & 0x03; - sc->vmx_intr_mask_mode = (config >> 2) & 0x03; - - switch (sc->vmx_intr_type) { - case VMXNET3_IT_AUTO: - sc->vmx_intr_type = VMXNET3_IT_MSIX; - /* FALLTHROUGH */ - case VMXNET3_IT_MSIX: - error = vmxnet3_alloc_msix_interrupts(sc); - if (error == 0) - break; - sc->vmx_intr_type = VMXNET3_IT_MSI; - /* FALLTHROUGH */ - case VMXNET3_IT_MSI: - error = vmxnet3_alloc_msi_interrupts(sc); - if (error == 0) - break; - sc->vmx_intr_type = VMXNET3_IT_LEGACY; - /* FALLTHROUGH */ - case VMXNET3_IT_LEGACY: - error = vmxnet3_alloc_legacy_interrupts(sc); - if (error == 0) - break; - /* FALLTHROUGH */ - default: - sc->vmx_intr_type = -1; - device_printf(dev, "cannot allocate any interrupt resources\n"); - return (ENXIO); + /* + * The txq and rxq shared data areas must be allocated contiguously + * as vmxnet3_driver_shared contains only a single address member + * for the shared queue data area. + */ + size = scctx->isc_ntxqsets * sizeof(struct vmxnet3_txq_shared) + + scctx->isc_nrxqsets * sizeof(struct vmxnet3_rxq_shared); + error = iflib_dma_alloc_align(sc->vmx_ctx, size, 128, &sc->vmx_qs_dma, 0); + if (error) { + device_printf(sc->vmx_dev, "cannot alloc queue shared memory\n"); + return (error); } - return (error); + return (0); } static void -vmxnet3_free_interrupt(struct vmxnet3_softc *sc, - struct vmxnet3_interrupt *intr) +vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) { - device_t dev; + struct vmxnet3_txqueue *txq; + struct vmxnet3_comp_ring *txc; + struct vmxnet3_txring *txr; + if_softc_ctx_t scctx; + + txq = &sc->vmx_txq[q]; + txc = &txq->vxtxq_comp_ring; + txr = &txq->vxtxq_cmd_ring; + scctx = sc->vmx_scctx; - dev = sc->vmx_dev; + snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", + device_get_nameunit(sc->vmx_dev), q); - if (intr->vmxi_handler != NULL) { - bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler); - intr->vmxi_handler = NULL; - } - - if (intr->vmxi_irq != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid, - intr->vmxi_irq); - intr->vmxi_irq = NULL; - intr->vmxi_rid = -1; - } + txq->vxtxq_sc = sc; + txq->vxtxq_id = q; + txc->vxcr_ndesc = scctx->isc_ntxd[0]; + txr->vxtxr_ndesc = scctx->isc_ntxd[1]; } -static void -vmxnet3_free_interrupts(struct vmxnet3_softc *sc) -{ - int i; - - for (i = 0; i < sc->vmx_nintrs; i++) - vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]); - - if (sc->vmx_intr_type == VMXNET3_IT_MSI || - sc->vmx_intr_type == VMXNET3_IT_MSIX) - pci_release_msi(sc->vmx_dev); -} - -#ifndef VMXNET3_LEGACY_TX static int -vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc) +vmxnet3_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, + int ntxqs, int ntxqsets) { - device_t dev; + struct vmxnet3_softc *sc; + int q; + int error; + caddr_t kva; + + sc = iflib_get_softc(ctx); - dev = sc->vmx_dev; - - sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT, - taskqueue_thread_enqueue, &sc->vmx_tq); - if (sc->vmx_tq == NULL) + /* Allocate the array of transmit queues */ + sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) * + ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->vmx_txq == NULL) return (ENOMEM); - return (0); -} + /* Initialize driver state for each transmit queue */ + for (q = 0; q < ntxqsets; q++) + vmxnet3_init_txq(sc, q); -static void -vmxnet3_start_taskqueue(struct vmxnet3_softc *sc) -{ - device_t dev; - int nthreads, error; - - dev = sc->vmx_dev; - /* - * The taskqueue is typically not frequently used, so a dedicated - * thread for each queue is unnecessary. + * Allocate queue state that is shared with the device. This check + * and call is performed in both vmxnet3_tx_queues_alloc() and + * vmxnet3_rx_queues_alloc() so that we don't have to care which + * order iflib invokes those routines in. */ - nthreads = MAX(1, sc->vmx_ntxqueues / 2); + if (sc->vmx_qs_dma.idi_size == 0) { + error = vmxnet3_queues_shared_alloc(sc); + if (error) + return (error); + } - /* - * Most drivers just ignore the return value - it only fails - * with ENOMEM so an error is not likely. It is hard for us - * to recover from an error here. - */ - error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET, - "%s taskq", device_get_nameunit(dev)); - if (error) - device_printf(dev, "failed to start taskqueue: %d", error); -} + kva = sc->vmx_qs_dma.idi_vaddr; + for (q = 0; q < ntxqsets; q++) { + sc->vmx_txq[q].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; + kva += sizeof(struct vmxnet3_txq_shared); + } -static void -vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc) -{ - struct vmxnet3_txqueue *txq; - int i; + /* Record descriptor ring vaddrs and paddrs */ + for (q = 0; q < ntxqsets; q++) { + struct vmxnet3_txqueue *txq; + struct vmxnet3_txring *txr; + struct vmxnet3_comp_ring *txc; - if (sc->vmx_tq != NULL) { - for (i = 0; i < sc->vmx_max_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask); - } + txq = &sc->vmx_txq[q]; + txc = &txq->vxtxq_comp_ring; + txr = &txq->vxtxq_cmd_ring; + + /* Completion ring */ + txc->vxcr_u.txcd = + (struct vmxnet3_txcompdesc *) vaddrs[q * ntxqs + 0]; + txc->vxcr_paddr = paddrs[q * ntxqs + 0]; + + /* Command ring */ + txr->vxtxr_txd = + (struct vmxnet3_txdesc *) vaddrs[q * ntxqs + 1]; + txr->vxtxr_paddr = paddrs[q * ntxqs + 1]; } + + return (0); } static void -vmxnet3_free_taskqueue(struct vmxnet3_softc *sc) +vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q, int nrxqs) { - if (sc->vmx_tq != NULL) { - taskqueue_free(sc->vmx_tq); - sc->vmx_tq = NULL; - } -} -#endif - -static int -vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q) -{ struct vmxnet3_rxqueue *rxq; + struct vmxnet3_comp_ring *rxc; struct vmxnet3_rxring *rxr; + if_softc_ctx_t scctx; int i; rxq = &sc->vmx_rxq[q]; + rxc = &rxq->vxrxq_comp_ring; + scctx = sc->vmx_scctx; snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d", device_get_nameunit(sc->vmx_dev), q); - mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF); rxq->vxrxq_sc = sc; rxq->vxrxq_id = q; - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { + /* + * First rxq is the completion queue, so there are nrxqs - 1 command + * rings starting at iflib queue id 1. + */ + rxc->vxcr_ndesc = scctx->isc_nrxd[0]; + for (i = 0; i < nrxqs - 1; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; - rxr->vxrxr_rid = i; - rxr->vxrxr_ndesc = sc->vmx_nrxdescs; - rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc * - sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO); - if (rxr->vxrxr_rxbuf == NULL) - return (ENOMEM); - - rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs; + rxr->vxrxr_ndesc = scctx->isc_nrxd[i + 1]; } - - return (0); } static int -vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) +vmxnet3_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, + int nrxqs, int nrxqsets) { - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; + int q; + int i; + int error; + caddr_t kva; + + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; - txq = &sc->vmx_txq[q]; - txr = &txq->vxtxq_cmd_ring; - - snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", - device_get_nameunit(sc->vmx_dev), q); - mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF); - - txq->vxtxq_sc = sc; - txq->vxtxq_id = q; - - txr->vxtxr_ndesc = sc->vmx_ntxdescs; - txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc * - sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO); - if (txr->vxtxr_txbuf == NULL) + /* Allocate the array of receive queues */ + sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) * + nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->vmx_rxq == NULL) return (ENOMEM); - txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs; + /* Initialize driver state for each receive queue */ + for (q = 0; q < nrxqsets; q++) + vmxnet3_init_rxq(sc, q, nrxqs); -#ifndef VMXNET3_LEGACY_TX - TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq); - - txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF, - M_NOWAIT, &txq->vxtxq_mtx); - if (txq->vxtxq_br == NULL) - return (ENOMEM); -#endif - - return (0); -} - -static int -vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc) -{ - int i, error; - /* - * Only attempt to create multiple queues if MSIX is available. MSIX is - * disabled by default because its apparently broken for devices passed - * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable - * must be set to zero for MSIX. This check prevents us from allocating - * queue structures that we will not use. + * Allocate queue state that is shared with the device. This check + * and call is performed in both vmxnet3_tx_queues_alloc() and + * vmxnet3_rx_queues_alloc() so that we don't have to care which + * order iflib invokes those routines in. */ - if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) { - sc->vmx_max_nrxqueues = 1; - sc->vmx_max_ntxqueues = 1; - } - - sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) * - sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); - sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) * - sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); - if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL) - return (ENOMEM); - - for (i = 0; i < sc->vmx_max_nrxqueues; i++) { - error = vmxnet3_init_rxq(sc, i); + if (sc->vmx_qs_dma.idi_size == 0) { + error = vmxnet3_queues_shared_alloc(sc); if (error) return (error); } - for (i = 0; i < sc->vmx_max_ntxqueues; i++) { - error = vmxnet3_init_txq(sc, i); - if (error) - return (error); + kva = sc->vmx_qs_dma.idi_vaddr + + scctx->isc_ntxqsets * sizeof(struct vmxnet3_txq_shared); + for (q = 0; q < nrxqsets; q++) { + sc->vmx_rxq[q].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; + kva += sizeof(struct vmxnet3_rxq_shared); } - return (0); -} + /* Record descriptor ring vaddrs and paddrs */ + for (q = 0; q < nrxqsets; q++) { + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_rxring *rxr; + struct vmxnet3_comp_ring *rxc; -static void -vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq) -{ - struct vmxnet3_rxring *rxr; - int i; + rxq = &sc->vmx_rxq[q]; + rxc = &rxq->vxrxq_comp_ring; - rxq->vxrxq_sc = NULL; - rxq->vxrxq_id = -1; + /* Completion ring */ + rxc->vxcr_u.rxcd = + (struct vmxnet3_rxcompdesc *) vaddrs[q * nrxqs + 0]; + rxc->vxcr_paddr = paddrs[q * nrxqs + 0]; - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; + /* Command ring(s) */ + for (i = 0; i < nrxqs - 1; i++) { + rxr = &rxq->vxrxq_cmd_ring[i]; - if (rxr->vxrxr_rxbuf != NULL) { - free(rxr->vxrxr_rxbuf, M_DEVBUF); - rxr->vxrxr_rxbuf = NULL; + rxr->vxrxr_rxd = + (struct vmxnet3_rxdesc *) vaddrs[q * nrxqs + 1 + i]; + rxr->vxrxr_paddr = paddrs[q * nrxqs + 1 + i]; } } - if (mtx_initialized(&rxq->vxrxq_mtx) != 0) - mtx_destroy(&rxq->vxrxq_mtx); + return (0); } static void -vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq) +vmxnet3_queues_free(if_ctx_t ctx) { - struct vmxnet3_txring *txr; + struct vmxnet3_softc *sc; - txr = &txq->vxtxq_cmd_ring; + sc = iflib_get_softc(ctx); - txq->vxtxq_sc = NULL; - txq->vxtxq_id = -1; - -#ifndef VMXNET3_LEGACY_TX - if (txq->vxtxq_br != NULL) { - buf_ring_free(txq->vxtxq_br, M_DEVBUF); - txq->vxtxq_br = NULL; + /* Free queue state area that is shared with the device */ + if (sc->vmx_qs_dma.idi_size != 0) { + iflib_dma_free(&sc->vmx_qs_dma); + sc->vmx_qs_dma.idi_size = 0; } -#endif - if (txr->vxtxr_txbuf != NULL) { - free(txr->vxtxr_txbuf, M_DEVBUF); - txr->vxtxr_txbuf = NULL; - } - - if (mtx_initialized(&txq->vxtxq_mtx) != 0) - mtx_destroy(&txq->vxtxq_mtx); -} - -static void -vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc) -{ - int i; - + /* Free array of receive queues */ if (sc->vmx_rxq != NULL) { - for (i = 0; i < sc->vmx_max_nrxqueues; i++) - vmxnet3_destroy_rxq(&sc->vmx_rxq[i]); free(sc->vmx_rxq, M_DEVBUF); sc->vmx_rxq = NULL; } + /* Free array of transmit queues */ if (sc->vmx_txq != NULL) { - for (i = 0; i < sc->vmx_max_ntxqueues; i++) - vmxnet3_destroy_txq(&sc->vmx_txq[i]); free(sc->vmx_txq, M_DEVBUF); sc->vmx_txq = NULL; } } static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc) { device_t dev; - uint8_t *kva; size_t size; - int i, error; + int error; dev = sc->vmx_dev; + /* Top level state structure shared with the device */ size = sizeof(struct vmxnet3_driver_shared); - error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma); + error = iflib_dma_alloc_align(sc->vmx_ctx, size, 1, &sc->vmx_ds_dma, 0); if (error) { device_printf(dev, "cannot alloc shared memory\n"); return (error); } - sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr; + sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.idi_vaddr; - size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) + - sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared); - error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma); - if (error) { - device_printf(dev, "cannot alloc queue shared memory\n"); - return (error); - } - sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr; - kva = sc->vmx_qs; - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; - kva += sizeof(struct vmxnet3_txq_shared); - } - for (i = 0; i < sc->vmx_nrxqueues; i++) { - sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; - kva += sizeof(struct vmxnet3_rxq_shared); - } - + /* RSS table state shared with the device */ if (sc->vmx_flags & VMXNET3_FLAG_RSS) { size = sizeof(struct vmxnet3_rss_shared); - error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma); + error = iflib_dma_alloc_align(sc->vmx_ctx, size, 128, + &sc->vmx_rss_dma, 0); if (error) { device_printf(dev, "cannot alloc rss shared memory\n"); return (error); } sc->vmx_rss = - (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr; + (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.idi_vaddr; } return (0); } static void vmxnet3_free_shared_data(struct vmxnet3_softc *sc) { + /* Free RSS table state shared with the device */ if (sc->vmx_rss != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_rss_dma); + iflib_dma_free(&sc->vmx_rss_dma); sc->vmx_rss = NULL; } - if (sc->vmx_qs != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_qs_dma); - sc->vmx_qs = NULL; - } - + /* Free top level state structure shared with the device */ if (sc->vmx_ds != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_ds_dma); + iflib_dma_free(&sc->vmx_ds_dma); sc->vmx_ds = NULL; } } static int -vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; - struct vmxnet3_comp_ring *txc; - size_t descsz, compsz; - int i, q, error; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_ntxqueues; q++) { - txq = &sc->vmx_txq[q]; - txr = &txq->vxtxq_cmd_ring; - txc = &txq->vxtxq_comp_ring; - - descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc); - compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc); - - error = bus_dma_tag_create(bus_get_dma_tag(dev), - 1, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - VMXNET3_TX_MAXSIZE, /* maxsize */ - VMXNET3_TX_MAXSEGS, /* nsegments */ - VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &txr->vxtxr_txtag); - if (error) { - device_printf(dev, - "unable to create Tx buffer tag for queue %d\n", q); - return (error); - } - - error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma); - if (error) { - device_printf(dev, "cannot alloc Tx descriptors for " - "queue %d error %d\n", q, error); - return (error); - } - txr->vxtxr_txd = - (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr; - - error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma); - if (error) { - device_printf(dev, "cannot alloc Tx comp descriptors " - "for queue %d error %d\n", q, error); - return (error); - } - txc->vxcr_u.txcd = - (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr; - - for (i = 0; i < txr->vxtxr_ndesc; i++) { - error = bus_dmamap_create(txr->vxtxr_txtag, 0, - &txr->vxtxr_txbuf[i].vtxb_dmamap); - if (error) { - device_printf(dev, "unable to create Tx buf " - "dmamap for queue %d idx %d\n", q, i); - return (error); - } - } - } - - return (0); -} - -static void -vmxnet3_free_txq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; - struct vmxnet3_comp_ring *txc; - struct vmxnet3_txbuf *txb; - int i, q; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_ntxqueues; q++) { - txq = &sc->vmx_txq[q]; - txr = &txq->vxtxq_cmd_ring; - txc = &txq->vxtxq_comp_ring; - - for (i = 0; i < txr->vxtxr_ndesc; i++) { - txb = &txr->vxtxr_txbuf[i]; - if (txb->vtxb_dmamap != NULL) { - bus_dmamap_destroy(txr->vxtxr_txtag, - txb->vtxb_dmamap); - txb->vtxb_dmamap = NULL; - } - } - - if (txc->vxcr_u.txcd != NULL) { - vmxnet3_dma_free(sc, &txc->vxcr_dma); - txc->vxcr_u.txcd = NULL; - } - - if (txr->vxtxr_txd != NULL) { - vmxnet3_dma_free(sc, &txr->vxtxr_dma); - txr->vxtxr_txd = NULL; - } - - if (txr->vxtxr_txtag != NULL) { - bus_dma_tag_destroy(txr->vxtxr_txtag); - txr->vxtxr_txtag = NULL; - } - } -} - -static int -vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - int descsz, compsz; - int i, j, q, error; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_nrxqueues; q++) { - rxq = &sc->vmx_rxq[q]; - rxc = &rxq->vxrxq_comp_ring; - compsz = 0; - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - descsz = rxr->vxrxr_ndesc * - sizeof(struct vmxnet3_rxdesc); - compsz += rxr->vxrxr_ndesc * - sizeof(struct vmxnet3_rxcompdesc); - - error = bus_dma_tag_create(bus_get_dma_tag(dev), - 1, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MJUMPAGESIZE, /* maxsize */ - 1, /* nsegments */ - MJUMPAGESIZE, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &rxr->vxrxr_rxtag); - if (error) { - device_printf(dev, - "unable to create Rx buffer tag for " - "queue %d\n", q); - return (error); - } - - error = vmxnet3_dma_malloc(sc, descsz, 512, - &rxr->vxrxr_dma); - if (error) { - device_printf(dev, "cannot allocate Rx " - "descriptors for queue %d/%d error %d\n", - i, q, error); - return (error); - } - rxr->vxrxr_rxd = - (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr; - } - - error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma); - if (error) { - device_printf(dev, "cannot alloc Rx comp descriptors " - "for queue %d error %d\n", q, error); - return (error); - } - rxc->vxcr_u.rxcd = - (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr; - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, - &rxr->vxrxr_spare_dmap); - if (error) { - device_printf(dev, "unable to create spare " - "dmamap for queue %d/%d error %d\n", - q, i, error); - return (error); - } - - for (j = 0; j < rxr->vxrxr_ndesc; j++) { - error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, - &rxr->vxrxr_rxbuf[j].vrxb_dmamap); - if (error) { - device_printf(dev, "unable to create " - "dmamap for queue %d/%d slot %d " - "error %d\n", - q, i, j, error); - return (error); - } - } - } - } - - return (0); -} - -static void -vmxnet3_free_rxq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - struct vmxnet3_rxbuf *rxb; - int i, j, q; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_nrxqueues; q++) { - rxq = &sc->vmx_rxq[q]; - rxc = &rxq->vxrxq_comp_ring; - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - if (rxr->vxrxr_spare_dmap != NULL) { - bus_dmamap_destroy(rxr->vxrxr_rxtag, - rxr->vxrxr_spare_dmap); - rxr->vxrxr_spare_dmap = NULL; - } - - for (j = 0; j < rxr->vxrxr_ndesc; j++) { - rxb = &rxr->vxrxr_rxbuf[j]; - if (rxb->vrxb_dmamap != NULL) { - bus_dmamap_destroy(rxr->vxrxr_rxtag, - rxb->vrxb_dmamap); - rxb->vrxb_dmamap = NULL; - } - } - } - - if (rxc->vxcr_u.rxcd != NULL) { - vmxnet3_dma_free(sc, &rxc->vxcr_dma); - rxc->vxcr_u.rxcd = NULL; - } - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - if (rxr->vxrxr_rxd != NULL) { - vmxnet3_dma_free(sc, &rxr->vxrxr_dma); - rxr->vxrxr_rxd = NULL; - } - - if (rxr->vxrxr_rxtag != NULL) { - bus_dma_tag_destroy(rxr->vxrxr_rxtag); - rxr->vxrxr_rxtag = NULL; - } - } - } -} - -static int -vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc) -{ - int error; - - error = vmxnet3_alloc_txq_data(sc); - if (error) - return (error); - - error = vmxnet3_alloc_rxq_data(sc); - if (error) - return (error); - - return (0); -} - -static void -vmxnet3_free_queue_data(struct vmxnet3_softc *sc) -{ - - if (sc->vmx_rxq != NULL) - vmxnet3_free_rxq_data(sc); - - if (sc->vmx_txq != NULL) - vmxnet3_free_txq_data(sc); -} - -static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc) { int error; - error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, - 32, &sc->vmx_mcast_dma); + /* Multicast table state shared with the device */ + error = iflib_dma_alloc_align(sc->vmx_ctx, + VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, 32, &sc->vmx_mcast_dma, 0); if (error) device_printf(sc->vmx_dev, "unable to alloc multicast table\n"); else - sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr; + sc->vmx_mcast = sc->vmx_mcast_dma.idi_vaddr; return (error); } static void vmxnet3_free_mcast_table(struct vmxnet3_softc *sc) { + /* Free multicast table state shared with the device */ if (sc->vmx_mcast != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_mcast_dma); + iflib_dma_free(&sc->vmx_mcast_dma); sc->vmx_mcast = NULL; } } static void vmxnet3_init_shared_data(struct vmxnet3_softc *sc) { struct vmxnet3_driver_shared *ds; + if_shared_ctx_t sctx; + if_softc_ctx_t scctx; struct vmxnet3_txqueue *txq; struct vmxnet3_txq_shared *txs; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxq_shared *rxs; int i; ds = sc->vmx_ds; + sctx = sc->vmx_sctx; + scctx = sc->vmx_scctx; /* * Initialize fields of the shared data that remains the same across * reinits. Note the shared data is zero'd when allocated. */ ds->magic = VMXNET3_REV1_MAGIC; /* DriverInfo */ ds->version = VMXNET3_DRIVER_VERSION; ds->guest = VMXNET3_GOS_FREEBSD | #ifdef __LP64__ VMXNET3_GOS_64BIT; #else VMXNET3_GOS_32BIT; #endif ds->vmxnet3_revision = 1; ds->upt_version = 1; /* Misc. conf */ ds->driver_data = vtophys(sc); ds->driver_data_len = sizeof(struct vmxnet3_softc); - ds->queue_shared = sc->vmx_qs_dma.dma_paddr; - ds->queue_shared_len = sc->vmx_qs_dma.dma_size; - ds->nrxsg_max = sc->vmx_max_rxsegs; + ds->queue_shared = sc->vmx_qs_dma.idi_paddr; + ds->queue_shared_len = sc->vmx_qs_dma.idi_size; + ds->nrxsg_max = IFLIB_MAX_RX_SEGS; /* RSS conf */ if (sc->vmx_flags & VMXNET3_FLAG_RSS) { ds->rss.version = 1; - ds->rss.paddr = sc->vmx_rss_dma.dma_paddr; - ds->rss.len = sc->vmx_rss_dma.dma_size; + ds->rss.paddr = sc->vmx_rss_dma.idi_paddr; + ds->rss.len = sc->vmx_rss_dma.idi_size; } /* Interrupt control. */ ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO; - ds->nintr = sc->vmx_nintrs; + /* + * Total number of interrupt indexes we are using in the shared + * config data, even though we don't actually allocate MSIX + * resources for the tx queues. Some versions of the device will + * fail to initialize successfully if interrupt indexes are used in + * the shared config that exceed the number of interrupts configured + * here. + */ + ds->nintr = (scctx->isc_vectors == 1) ? + 1 : (scctx->isc_nrxqsets + scctx->isc_ntxqsets + 1); ds->evintr = sc->vmx_event_intr_idx; ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < sc->vmx_nintrs; i++) + for (i = 0; i < ds->nintr; i++) ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; /* Receive filter. */ - ds->mcast_table = sc->vmx_mcast_dma.dma_paddr; - ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size; + ds->mcast_table = sc->vmx_mcast_dma.idi_paddr; + ds->mcast_tablelen = sc->vmx_mcast_dma.idi_size; /* Tx queues */ - for (i = 0; i < sc->vmx_ntxqueues; i++) { + for (i = 0; i < scctx->isc_ntxqsets; i++) { txq = &sc->vmx_txq[i]; txs = txq->vxtxq_ts; - txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr; + txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_paddr; txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc; - txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr; + txs->comp_ring = txq->vxtxq_comp_ring.vxcr_paddr; txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc; txs->driver_data = vtophys(txq); txs->driver_data_len = sizeof(struct vmxnet3_txqueue); } /* Rx queues */ - for (i = 0; i < sc->vmx_nrxqueues; i++) { + for (i = 0; i < scctx->isc_nrxqsets; i++) { rxq = &sc->vmx_rxq[i]; rxs = rxq->vxrxq_rs; - rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr; + rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_paddr; rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc; - rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr; + rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_paddr; rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc; - rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr; + rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_paddr; rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc; rxs->driver_data = vtophys(rxq); rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue); } } static void -vmxnet3_init_hwassist(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp = sc->vmx_ifp; - uint64_t hwassist; - - hwassist = 0; - if (ifp->if_capenable & IFCAP_TXCSUM) - hwassist |= VMXNET3_CSUM_OFFLOAD; - if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) - hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6; - if (ifp->if_capenable & IFCAP_TSO4) - hwassist |= CSUM_IP_TSO; - if (ifp->if_capenable & IFCAP_TSO6) - hwassist |= CSUM_IP6_TSO; - ifp->if_hwassist = hwassist; -} - -static void -vmxnet3_reinit_interface(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp; - - ifp = sc->vmx_ifp; - - /* Use the current MAC address. */ - bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN); - vmxnet3_set_lladdr(sc); - - vmxnet3_init_hwassist(sc); -} - -static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc) { /* * Use the same key as the Linux driver until FreeBSD can do * RSS (presumably Toeplitz) in software. */ static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, }; struct vmxnet3_driver_shared *ds; + if_softc_ctx_t scctx; struct vmxnet3_rss_shared *rss; int i; - + ds = sc->vmx_ds; + scctx = sc->vmx_scctx; rss = sc->vmx_rss; rss->hash_type = UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 | UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6; rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ; rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE; rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE; memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE); for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++) - rss->ind_table[i] = i % sc->vmx_nrxqueues; + rss->ind_table[i] = i % scctx->isc_nrxqsets; } static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) { struct ifnet *ifp; struct vmxnet3_driver_shared *ds; - + if_softc_ctx_t scctx; + ifp = sc->vmx_ifp; ds = sc->vmx_ds; - + scctx = sc->vmx_scctx; + ds->mtu = ifp->if_mtu; - ds->ntxqueue = sc->vmx_ntxqueues; - ds->nrxqueue = sc->vmx_nrxqueues; + ds->ntxqueue = scctx->isc_ntxqsets; + ds->nrxqueue = scctx->isc_nrxqsets; ds->upt_features = 0; if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) ds->upt_features |= UPT1_F_CSUM; if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ds->upt_features |= UPT1_F_VLAN; if (ifp->if_capenable & IFCAP_LRO) ds->upt_features |= UPT1_F_LRO; if (sc->vmx_flags & VMXNET3_FLAG_RSS) { ds->upt_features |= UPT1_F_RSS; vmxnet3_reinit_rss_shared_data(sc); } - vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr); + vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.idi_paddr); vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH, - (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32); + (uint64_t) sc->vmx_ds_dma.idi_paddr >> 32); } static int vmxnet3_alloc_data(struct vmxnet3_softc *sc) { int error; error = vmxnet3_alloc_shared_data(sc); if (error) return (error); - error = vmxnet3_alloc_queue_data(sc); - if (error) - return (error); - error = vmxnet3_alloc_mcast_table(sc); if (error) return (error); vmxnet3_init_shared_data(sc); return (0); } static void vmxnet3_free_data(struct vmxnet3_softc *sc) { vmxnet3_free_mcast_table(sc); - vmxnet3_free_queue_data(sc); vmxnet3_free_shared_data(sc); } -static int -vmxnet3_setup_interface(struct vmxnet3_softc *sc) -{ - device_t dev; - struct ifnet *ifp; - - dev = sc->vmx_dev; - - ifp = sc->vmx_ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) { - device_printf(dev, "cannot allocate ifnet structure\n"); - return (ENOSPC); - } - - if_initname(ifp, device_get_name(dev), device_get_unit(dev)); -#if __FreeBSD_version < 1000025 - ifp->if_baudrate = 1000000000; -#elif __FreeBSD_version < 1100011 - if_initbaudrate(ifp, IF_Gbps(10)); -#else - ifp->if_baudrate = IF_Gbps(10); -#endif - ifp->if_softc = sc; - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_init = vmxnet3_init; - ifp->if_ioctl = vmxnet3_ioctl; - ifp->if_get_counter = vmxnet3_get_counter; - ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); - ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS; - ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE; - -#ifdef VMXNET3_LEGACY_TX - ifp->if_start = vmxnet3_start; - ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1; - IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1); - IFQ_SET_READY(&ifp->if_snd); -#else - ifp->if_transmit = vmxnet3_txq_mq_start; - ifp->if_qflush = vmxnet3_qflush; -#endif - - vmxnet3_get_lladdr(sc); - ether_ifattach(ifp, sc->vmx_lladdr); - - ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM; - ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6; - ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; - ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | - IFCAP_VLAN_HWCSUM; - ifp->if_capenable = ifp->if_capabilities; - - /* These capabilities are not enabled by default. */ - ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; - - sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST); - sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config, - vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); - - ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change, - vmxnet3_media_status); - ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO); - - return (0); -} - static void vmxnet3_evintr(struct vmxnet3_softc *sc) { device_t dev; - struct ifnet *ifp; struct vmxnet3_txq_shared *ts; struct vmxnet3_rxq_shared *rs; uint32_t event; - int reset; dev = sc->vmx_dev; - ifp = sc->vmx_ifp; - reset = 0; - VMXNET3_CORE_LOCK(sc); - /* Clear events. */ event = sc->vmx_ds->event; vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event); - if (event & VMXNET3_EVENT_LINK) { + if (event & VMXNET3_EVENT_LINK) vmxnet3_link_status(sc); - if (sc->vmx_link_active != 0) - vmxnet3_tx_start_all(sc); - } if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { - reset = 1; vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS); ts = sc->vmx_txq[0].vxtxq_ts; if (ts->stopped != 0) device_printf(dev, "Tx queue error %#x\n", ts->error); rs = sc->vmx_rxq[0].vxrxq_rs; if (rs->stopped != 0) device_printf(dev, "Rx queue error %#x\n", rs->error); - device_printf(dev, "Rx/Tx queue error event ... resetting\n"); + + /* XXX - rely on liflib watchdog to reset us? */ + device_printf(dev, "Rx/Tx queue error event ... " + "waiting for iflib watchdog reset\n"); } if (event & VMXNET3_EVENT_DIC) device_printf(dev, "device implementation change event\n"); if (event & VMXNET3_EVENT_DEBUG) device_printf(dev, "debug event\n"); - - if (reset != 0) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } - - VMXNET3_CORE_UNLOCK(sc); } -static void -vmxnet3_txq_eof(struct vmxnet3_txqueue *txq) +static int +vmxnet3_isc_txd_encap(void *vsc, if_pkt_info_t pi) { struct vmxnet3_softc *sc; - struct ifnet *ifp; + struct vmxnet3_txqueue *txq; struct vmxnet3_txring *txr; - struct vmxnet3_comp_ring *txc; - struct vmxnet3_txcompdesc *txcd; - struct vmxnet3_txbuf *txb; - struct mbuf *m; - u_int sop; + struct vmxnet3_txdesc *txd, *sop; + bus_dma_segment_t *segs; + int nsegs; + int pidx; + int hdrlen; + int i; + int gen; - sc = txq->vxtxq_sc; - ifp = sc->vmx_ifp; + sc = vsc; + txq = &sc->vmx_txq[pi->ipi_qsidx]; txr = &txq->vxtxq_cmd_ring; - txc = &txq->vxtxq_comp_ring; + segs = pi->ipi_segs; + nsegs = pi->ipi_nsegs; + pidx = pi->ipi_pidx; - VMXNET3_TXQ_LOCK_ASSERT(txq); + KASSERT(nsegs <= VMXNET3_TX_MAXSEGS, + ("%s: packet with too many segments %d", __func__, nsegs)); - for (;;) { - txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; - if (txcd->gen != txc->vxcr_gen) - break; - vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); + sop = &txr->vxtxr_txd[pidx]; + gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ - if (++txc->vxcr_next == txc->vxcr_ndesc) { - txc->vxcr_next = 0; - txc->vxcr_gen ^= 1; - } + for (i = 0; i < nsegs; i++) { + txd = &txr->vxtxr_txd[pidx]; - sop = txr->vxtxr_next; - txb = &txr->vxtxr_txbuf[sop]; + txd->addr = segs[i].ds_addr; + txd->len = segs[i].ds_len; + txd->gen = gen; + txd->dtype = 0; + txd->offload_mode = VMXNET3_OM_NONE; + txd->offload_pos = 0; + txd->hlen = 0; + txd->eop = 0; + txd->compreq = 0; + txd->vtag_mode = 0; + txd->vtag = 0; - if ((m = txb->vtxb_m) != NULL) { - bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); - - txq->vxtxq_stats.vmtxs_opackets++; - txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len; - if (m->m_flags & M_MCAST) - txq->vxtxq_stats.vmtxs_omcasts++; - - m_freem(m); - txb->vtxb_m = NULL; + if (++pidx == txr->vxtxr_ndesc) { + pidx = 0; + txr->vxtxr_gen ^= 1; } - - txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; + gen = txr->vxtxr_gen; } + txd->eop = 1; + txd->compreq = !!(pi->ipi_flags & IPI_TX_INTR); + pi->ipi_new_pidx = pidx; - if (txr->vxtxr_head == txr->vxtxr_next) - txq->vxtxq_watchdog = 0; -} - -static int -vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr) -{ - struct ifnet *ifp; - struct mbuf *m; - struct vmxnet3_rxdesc *rxd; - struct vmxnet3_rxbuf *rxb; - bus_dma_tag_t tag; - bus_dmamap_t dmap; - bus_dma_segment_t segs[1]; - int idx, clsize, btype, flags, nsegs, error; - - ifp = sc->vmx_ifp; - tag = rxr->vxrxr_rxtag; - dmap = rxr->vxrxr_spare_dmap; - idx = rxr->vxrxr_fill; - rxd = &rxr->vxrxr_rxd[idx]; - rxb = &rxr->vxrxr_rxbuf[idx]; - -#ifdef VMXNET3_FAILPOINTS - KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS); - if (rxr->vxrxr_rid != 0) - KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS); -#endif - - if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) { - flags = M_PKTHDR; - clsize = MCLBYTES; - btype = VMXNET3_BTYPE_HEAD; - } else { -#if __FreeBSD_version < 902001 - /* - * These mbufs will never be used for the start of a frame. - * Roughly prior to branching releng/9.2, the load_mbuf_sg() - * required the mbuf to always be a packet header. Avoid - * unnecessary mbuf initialization in newer versions where - * that is not the case. - */ - flags = M_PKTHDR; -#else - flags = 0; -#endif - clsize = MJUMPAGESIZE; - btype = VMXNET3_BTYPE_BODY; + /* + * VLAN + */ + if (pi->ipi_mflags & M_VLANTAG) { + sop->vtag_mode = 1; + sop->vtag = pi->ipi_vtag; } - m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize); - if (m == NULL) { - sc->vmx_stats.vmst_mgetcl_failed++; - return (ENOBUFS); + /* + * TSO and checksum offloads + */ + hdrlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen; + if (pi->ipi_csum_flags & CSUM_TSO) { + sop->offload_mode = VMXNET3_OM_TSO; + sop->hlen = hdrlen; + sop->offload_pos = pi->ipi_tso_segsz; + } else if (pi->ipi_csum_flags & (VMXNET3_CSUM_OFFLOAD | + VMXNET3_CSUM_OFFLOAD_IPV6)) { + sop->offload_mode = VMXNET3_OM_CSUM; + sop->hlen = hdrlen; + sop->offload_pos = hdrlen + + ((pi->ipi_ipproto == IPPROTO_TCP) ? + offsetof(struct tcphdr, th_sum) : + offsetof(struct udphdr, uh_sum)); } - if (btype == VMXNET3_BTYPE_HEAD) { - m->m_len = m->m_pkthdr.len = clsize; - m_adj(m, ETHER_ALIGN); - } else - m->m_len = clsize; + /* Finally, change the ownership. */ + vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); + sop->gen ^= 1; - error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs, - BUS_DMA_NOWAIT); - if (error) { - m_freem(m); - sc->vmx_stats.vmst_mbuf_load_failed++; - return (error); - } - KASSERT(nsegs == 1, - ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); -#if __FreeBSD_version < 902001 - if (btype == VMXNET3_BTYPE_BODY) - m->m_flags &= ~M_PKTHDR; -#endif - - if (rxb->vrxb_m != NULL) { - bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(tag, rxb->vrxb_dmamap); - } - - rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; - rxb->vrxb_dmamap = dmap; - rxb->vrxb_m = m; - - rxd->addr = segs[0].ds_addr; - rxd->len = segs[0].ds_len; - rxd->btype = btype; - rxd->gen = rxr->vxrxr_gen; - - vmxnet3_rxr_increment_fill(rxr); return (0); } static void -vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq, - struct vmxnet3_rxring *rxr, int idx) +vmxnet3_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx) { - struct vmxnet3_rxdesc *rxd; + struct vmxnet3_softc *sc; + struct vmxnet3_txqueue *txq; - rxd = &rxr->vxrxr_rxd[idx]; - rxd->gen = rxr->vxrxr_gen; - vmxnet3_rxr_increment_fill(rxr); + sc = vsc; + txq = &sc->vmx_txq[txqid]; + + /* + * pidx is what we last set ipi_new_pidx to in + * vmxnet3_isc_txd_encap() + */ + + /* + * Avoid expensive register updates if the flush request is + * redundant. + */ + if (txq->vxtxq_last_flush == pidx) + return; + txq->vxtxq_last_flush = pidx; + vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), pidx); } -static void -vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq) +static int +vmxnet3_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear) { struct vmxnet3_softc *sc; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - struct vmxnet3_rxcompdesc *rxcd; - int idx, eof; + struct vmxnet3_txqueue *txq; + struct vmxnet3_comp_ring *txc; + struct vmxnet3_txcompdesc *txcd; + struct vmxnet3_txring *txr; + int processed; + + sc = vsc; + txq = &sc->vmx_txq[txqid]; + txc = &txq->vxtxq_comp_ring; + txr = &txq->vxtxq_cmd_ring; - sc = rxq->vxrxq_sc; - rxc = &rxq->vxrxq_comp_ring; - - do { - rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; - if (rxcd->gen != rxc->vxcr_gen) - break; /* Not expected. */ + /* + * If clear is true, we need to report the number of TX command ring + * descriptors that have been processed by the device. If clear is + * false, we just need to report whether or not at least one TX + * command ring descriptor has been processed by the device. + */ + processed = 0; + for (;;) { + txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; + if (txcd->gen != txc->vxcr_gen) + break; + else if (!clear) + return (1); vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); - if (++rxc->vxcr_next == rxc->vxcr_ndesc) { - rxc->vxcr_next = 0; - rxc->vxcr_gen ^= 1; + if (++txc->vxcr_next == txc->vxcr_ndesc) { + txc->vxcr_next = 0; + txc->vxcr_gen ^= 1; } - idx = rxcd->rxd_idx; - eof = rxcd->eop; - if (rxcd->qid < sc->vmx_nrxqueues) - rxr = &rxq->vxrxq_cmd_ring[0]; + if (txcd->eop_idx < txr->vxtxr_next) + processed += txr->vxtxr_ndesc - + (txr->vxtxr_next - txcd->eop_idx) + 1; else - rxr = &rxq->vxrxq_cmd_ring[1]; - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - } while (!eof); + processed += txcd->eop_idx - txr->vxtxr_next + 1; + txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; + } + + return (processed); } -static void -vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) +static int +vmxnet3_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget) { + struct vmxnet3_softc *sc; + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_comp_ring *rxc; + struct vmxnet3_rxcompdesc *rxcd; + int avail; + int completed_gen; +#ifdef INVARIANTS + int expect_sop = 1; +#endif + sc = vsc; + rxq = &sc->vmx_rxq[rxqid]; + rxc = &rxq->vxrxq_comp_ring; - if (rxcd->ipv4) { - m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; - if (rxcd->ipcsum_ok) - m->m_pkthdr.csum_flags |= CSUM_IP_VALID; - } + avail = 0; + completed_gen = rxc->vxcr_gen; + for (;;) { + rxcd = &rxc->vxcr_u.rxcd[idx]; + if (rxcd->gen != completed_gen) + break; + vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); - if (!rxcd->fragment) { - if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) { - m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | - CSUM_PSEUDO_HDR; - m->m_pkthdr.csum_data = 0xFFFF; +#ifdef INVARIANTS + if (expect_sop) + KASSERT(rxcd->sop, ("%s: expected sop", __func__)); + else + KASSERT(!rxcd->sop, ("%s: unexpected sop", __func__)); + expect_sop = rxcd->eop; +#endif + if (rxcd->eop && (rxcd->len != 0)) + avail++; + if (avail > budget) + break; + if (++idx == rxc->vxcr_ndesc) { + idx = 0; + completed_gen ^= 1; } } + + return (avail); } -static void -vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq, - struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) +static int +vmxnet3_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri) { struct vmxnet3_softc *sc; - struct ifnet *ifp; + if_softc_ctx_t scctx; + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_comp_ring *rxc; + struct vmxnet3_rxcompdesc *rxcd; + struct vmxnet3_rxring *rxr; + struct vmxnet3_rxdesc *rxd; + if_rxd_frag_t frag; + int cqidx; + uint16_t total_len; + uint8_t nfrags; + uint8_t flid; - sc = rxq->vxrxq_sc; - ifp = sc->vmx_ifp; + sc = vsc; + scctx = sc->vmx_scctx; + rxq = &sc->vmx_rxq[ri->iri_qsidx]; + rxc = &rxq->vxrxq_comp_ring; - if (rxcd->error) { - rxq->vxrxq_stats.vmrxs_ierrors++; - m_freem(m); - return; + /* + * Get a single packet starting at the given index in the completion + * queue. That we have been called indicates that + * vmxnet3_isc_rxd_available() has already verified that either + * there is a complete packet available starting at the given index, + * or there are one or more zero length packets starting at the + * given index followed by a complete packet, so no verification of + * ownership of the descriptors (and no associated read barrier) is + * required here. + */ + cqidx = ri->iri_cidx; + rxcd = &rxc->vxcr_u.rxcd[cqidx]; + while (rxcd->len == 0) { + KASSERT(rxcd->sop && rxcd->eop, + ("%s: zero-length packet without both sop and eop set", + __func__)); + if (++cqidx == rxc->vxcr_ndesc) { + cqidx = 0; + rxc->vxcr_gen ^= 1; + } + rxcd = &rxc->vxcr_u.rxcd[cqidx]; } + KASSERT(rxcd->sop, ("%s: expected sop", __func__)); -#ifdef notyet + /* + * RSS and flow ID + */ + ri->iri_flowid = rxcd->rss_hash; switch (rxcd->rss_type) { + case VMXNET3_RCD_RSS_TYPE_NONE: + ri->iri_flowid = ri->iri_qsidx; + ri->iri_rsstype = M_HASHTYPE_NONE; + break; case VMXNET3_RCD_RSS_TYPE_IPV4: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4); + ri->iri_rsstype = M_HASHTYPE_RSS_IPV4; break; case VMXNET3_RCD_RSS_TYPE_TCPIPV4: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4); + ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4; break; case VMXNET3_RCD_RSS_TYPE_IPV6: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6); + ri->iri_rsstype = M_HASHTYPE_RSS_IPV6; break; case VMXNET3_RCD_RSS_TYPE_TCPIPV6: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6); + ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6; break; - default: /* VMXNET3_RCD_RSS_TYPE_NONE */ - m->m_pkthdr.flowid = rxq->vxrxq_id; - M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); + default: + ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; break; } -#else - m->m_pkthdr.flowid = rxq->vxrxq_id; - M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); -#endif - if (!rxcd->no_csum) - vmxnet3_rx_csum(rxcd, m); + /* VLAN */ if (rxcd->vlan) { - m->m_flags |= M_VLANTAG; - m->m_pkthdr.ether_vtag = rxcd->vtag; + ri->iri_flags |= M_VLANTAG; + ri->iri_vtag = rxcd->vtag; } - rxq->vxrxq_stats.vmrxs_ipackets++; - rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len; + /* Checksum offload */ + if (!rxcd->no_csum) { + uint32_t csum_flags = 0; - VMXNET3_RXQ_UNLOCK(rxq); - (*ifp->if_input)(ifp, m); - VMXNET3_RXQ_LOCK(rxq); -} - -static void -vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq) -{ - struct vmxnet3_softc *sc; - struct ifnet *ifp; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - struct vmxnet3_rxdesc *rxd; - struct vmxnet3_rxcompdesc *rxcd; - struct mbuf *m, *m_head, *m_tail; - int idx, length; - - sc = rxq->vxrxq_sc; - ifp = sc->vmx_ifp; - rxc = &rxq->vxrxq_comp_ring; - - VMXNET3_RXQ_LOCK_ASSERT(rxq); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - return; - - m_head = rxq->vxrxq_mhead; - rxq->vxrxq_mhead = NULL; - m_tail = rxq->vxrxq_mtail; - rxq->vxrxq_mtail = NULL; - MPASS(m_head == NULL || m_tail != NULL); - - for (;;) { - rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; - if (rxcd->gen != rxc->vxcr_gen) { - rxq->vxrxq_mhead = m_head; - rxq->vxrxq_mtail = m_tail; - break; + if (rxcd->ipv4) { + csum_flags |= CSUM_IP_CHECKED; + if (rxcd->ipcsum_ok) + csum_flags |= CSUM_IP_VALID; } - vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); - - if (++rxc->vxcr_next == rxc->vxcr_ndesc) { - rxc->vxcr_next = 0; - rxc->vxcr_gen ^= 1; + if (!rxcd->fragment && (rxcd->tcp || rxcd->udp)) { + csum_flags |= CSUM_L4_CALC; + if (rxcd->csum_ok) { + csum_flags |= CSUM_L4_VALID; + ri->iri_csum_data = 0xffff; + } } + ri->iri_csum_flags = csum_flags; + } - idx = rxcd->rxd_idx; - length = rxcd->len; - if (rxcd->qid < sc->vmx_nrxqueues) - rxr = &rxq->vxrxq_cmd_ring[0]; - else - rxr = &rxq->vxrxq_cmd_ring[1]; - rxd = &rxr->vxrxr_rxd[idx]; + /* + * The queue numbering scheme used for rxcd->qid is as follows: + * - All of the command ring 0s are numbered [0, nrxqsets - 1] + * - All of the command ring 1s are numbered [nrxqsets, 2*nrxqsets - 1] + * + * Thus, rxcd->qid less than nrxqsets indicates command ring (and + * flid) 0, and rxcd->qid greater than or equal to nrxqsets + * indicates command ring (and flid) 1. + */ + nfrags = 0; + total_len = 0; + do { + rxcd = &rxc->vxcr_u.rxcd[cqidx]; + KASSERT(rxcd->gen == rxc->vxcr_gen, + ("%s: generation mismatch", __func__)); + flid = (rxcd->qid >= scctx->isc_nrxqsets) ? 1 : 0; + rxr = &rxq->vxrxq_cmd_ring[flid]; + rxd = &rxr->vxrxr_rxd[rxcd->rxd_idx]; - m = rxr->vxrxr_rxbuf[idx].vrxb_m; - KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf", - __func__, rxcd->qid, idx)); - - /* - * The host may skip descriptors. We detect this when this - * descriptor does not match the previous fill index. Catch - * up with the host now. - */ - if (__predict_false(rxr->vxrxr_fill != idx)) { - while (rxr->vxrxr_fill != idx) { - rxr->vxrxr_rxd[rxr->vxrxr_fill].gen = - rxr->vxrxr_gen; - vmxnet3_rxr_increment_fill(rxr); - } + frag = &ri->iri_frags[nfrags]; + frag->irf_flid = flid; + frag->irf_idx = rxcd->rxd_idx; + frag->irf_len = rxcd->len; + total_len += rxcd->len; + nfrags++; + if (++cqidx == rxc->vxcr_ndesc) { + cqidx = 0; + rxc->vxcr_gen ^= 1; } + } while (!rxcd->eop); - if (rxcd->sop) { - KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD, - ("%s: start of frame w/o head buffer", __func__)); - KASSERT(rxr == &rxq->vxrxq_cmd_ring[0], - ("%s: start of frame not in ring 0", __func__)); - KASSERT((idx % sc->vmx_rx_max_chain) == 0, - ("%s: start of frame at unexcepted index %d (%d)", - __func__, idx, sc->vmx_rx_max_chain)); - KASSERT(m_head == NULL, - ("%s: duplicate start of frame?", __func__)); + ri->iri_cidx = cqidx; + ri->iri_nfrags = nfrags; + ri->iri_len = total_len; - if (length == 0) { - /* Just ignore this descriptor. */ - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - goto nextp; - } + return (0); +} - if (vmxnet3_newbuf(sc, rxr) != 0) { - rxq->vxrxq_stats.vmrxs_iqdrops++; - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - if (!rxcd->eop) - vmxnet3_rxq_discard_chain(rxq); - goto nextp; - } +static void +vmxnet3_isc_rxd_refill(void *vsc, if_rxd_update_t iru) +{ + struct vmxnet3_softc *sc; + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_rxring *rxr; + struct vmxnet3_rxdesc *rxd; + uint64_t *paddrs; + int count; + int len; + int pidx; + int i; + uint8_t flid; + uint8_t btype; - m->m_pkthdr.rcvif = ifp; - m->m_pkthdr.len = m->m_len = length; - m->m_pkthdr.csum_flags = 0; - m_head = m_tail = m; + count = iru->iru_count; + len = iru->iru_buf_size; + pidx = iru->iru_pidx; + flid = iru->iru_flidx; + paddrs = iru->iru_paddrs; - } else { - KASSERT(rxd->btype == VMXNET3_BTYPE_BODY, - ("%s: non start of frame w/o body buffer", __func__)); + sc = vsc; + rxq = &sc->vmx_rxq[iru->iru_qsidx]; + rxr = &rxq->vxrxq_cmd_ring[flid]; + rxd = rxr->vxrxr_rxd; - if (m_head == NULL && m_tail == NULL) { - /* - * This is a continuation of a packet that we - * started to drop, but could not drop entirely - * because this segment was still owned by the - * host. So, drop the remainder now. - */ - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - if (!rxcd->eop) - vmxnet3_rxq_discard_chain(rxq); - goto nextp; - } + /* + * Command ring 0 is filled with BTYPE_HEAD descriptors, and + * command ring 1 is filled with BTYPE_BODY descriptors. + */ + btype = (flid == 0) ? VMXNET3_BTYPE_HEAD : VMXNET3_BTYPE_BODY; + for (i = 0; i < count; i++) { + rxd[pidx].addr = paddrs[i]; + rxd[pidx].len = len; + rxd[pidx].btype = btype; + rxd[pidx].gen = rxr->vxrxr_gen; - KASSERT(m_head != NULL, - ("%s: frame not started?", __func__)); - - if (vmxnet3_newbuf(sc, rxr) != 0) { - rxq->vxrxq_stats.vmrxs_iqdrops++; - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - if (!rxcd->eop) - vmxnet3_rxq_discard_chain(rxq); - m_freem(m_head); - m_head = m_tail = NULL; - goto nextp; - } - - m->m_len = length; - m_head->m_pkthdr.len += length; - m_tail->m_next = m; - m_tail = m; + if (++pidx == rxr->vxrxr_ndesc) { + pidx = 0; + rxr->vxrxr_gen ^= 1; } - - if (rxcd->eop) { - vmxnet3_rxq_input(rxq, rxcd, m_head); - m_head = m_tail = NULL; - - /* Must recheck after dropping the Rx lock. */ - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; - } - -nextp: - if (__predict_false(rxq->vxrxq_rs->update_rxhead)) { - int qid = rxcd->qid; - bus_size_t r; - - idx = (idx + 1) % rxr->vxrxr_ndesc; - if (qid >= sc->vmx_nrxqueues) { - qid -= sc->vmx_nrxqueues; - r = VMXNET3_BAR0_RXH2(qid); - } else - r = VMXNET3_BAR0_RXH1(qid); - vmxnet3_write_bar0(sc, r, idx); - } } } static void -vmxnet3_legacy_intr(void *xsc) +vmxnet3_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx) { struct vmxnet3_softc *sc; struct vmxnet3_rxqueue *rxq; - struct vmxnet3_txqueue *txq; + struct vmxnet3_rxring *rxr; + bus_size_t r; + + sc = vsc; + rxq = &sc->vmx_rxq[rxqid]; + rxr = &rxq->vxrxq_cmd_ring[flid]; - sc = xsc; - rxq = &sc->vmx_rxq[0]; - txq = &sc->vmx_txq[0]; + if (flid == 0) + r = VMXNET3_BAR0_RXH1(rxqid); + else + r = VMXNET3_BAR0_RXH2(rxqid); - if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) { - if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) - return; - } - if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_all_intrs(sc); - - if (sc->vmx_ds->event != 0) - vmxnet3_evintr(sc); - - VMXNET3_RXQ_LOCK(rxq); - vmxnet3_rxq_eof(rxq); - VMXNET3_RXQ_UNLOCK(rxq); - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_txq_eof(txq); - vmxnet3_txq_start(txq); - VMXNET3_TXQ_UNLOCK(txq); - - vmxnet3_enable_all_intrs(sc); + /* + * pidx is the index of the last descriptor with a buffer the device + * can use, and the device needs to be told which index is one past + * that. + */ + if (++pidx == rxr->vxrxr_ndesc) + pidx = 0; + vmxnet3_write_bar0(sc, r, pidx); } -static void -vmxnet3_txq_intr(void *xtxq) +static int +vmxnet3_legacy_intr(void *xsc) { struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; + if_softc_ctx_t scctx; + if_ctx_t ctx; + + sc = xsc; + scctx = sc->vmx_scctx; + ctx = sc->vmx_ctx; - txq = xtxq; - sc = txq->vxtxq_sc; - + /* + * When there is only a single interrupt configured, this routine + * runs in fast interrupt context, following which the rxq 0 task + * will be enqueued. + */ + if (scctx->isc_intr == IFLIB_INTR_LEGACY) { + if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) + return (FILTER_HANDLED); + } if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx); + vmxnet3_intr_disable_all(ctx); - VMXNET3_TXQ_LOCK(txq); - vmxnet3_txq_eof(txq); - vmxnet3_txq_start(txq); - VMXNET3_TXQ_UNLOCK(txq); + if (sc->vmx_ds->event != 0) + iflib_admin_intr_deferred(ctx); - vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx); + /* + * XXX - When there is both rxq and event activity, do we care + * whether the rxq 0 task or the admin task re-enables the interrupt + * first? + */ + return (FILTER_SCHEDULE_THREAD); } -static void -vmxnet3_rxq_intr(void *xrxq) +static int +vmxnet3_rxq_intr(void *vrxq) { struct vmxnet3_softc *sc; struct vmxnet3_rxqueue *rxq; - rxq = xrxq; + rxq = vrxq; sc = rxq->vxrxq_sc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx); - VMXNET3_RXQ_LOCK(rxq); - vmxnet3_rxq_eof(rxq); - VMXNET3_RXQ_UNLOCK(rxq); - - vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx); + return (FILTER_SCHEDULE_THREAD); } -static void -vmxnet3_event_intr(void *xsc) +static int +vmxnet3_event_intr(void *vsc) { struct vmxnet3_softc *sc; - sc = xsc; + sc = vsc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx); - if (sc->vmx_ds->event != 0) - vmxnet3_evintr(sc); - - vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); + /* + * The work will be done via vmxnet3_update_admin_status(), and the + * interrupt will be re-enabled in vmxnet3_link_intr_enable(). + * + * The interrupt will be re-enabled by vmxnet3_link_intr_enable(). + */ + return (FILTER_SCHEDULE_THREAD); } static void -vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) +vmxnet3_stop(if_ctx_t ctx) { - struct vmxnet3_txring *txr; - struct vmxnet3_txbuf *txb; - int i; + struct vmxnet3_softc *sc; - txr = &txq->vxtxq_cmd_ring; + sc = iflib_get_softc(ctx); - for (i = 0; i < txr->vxtxr_ndesc; i++) { - txb = &txr->vxtxr_txbuf[i]; - - if (txb->vtxb_m == NULL) - continue; - - bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); - m_freem(txb->vtxb_m); - txb->vtxb_m = NULL; - } -} - -static void -vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) -{ - struct vmxnet3_rxring *rxr; - struct vmxnet3_rxbuf *rxb; - int i, j; - - if (rxq->vxrxq_mhead != NULL) { - m_freem(rxq->vxrxq_mhead); - rxq->vxrxq_mhead = NULL; - rxq->vxrxq_mtail = NULL; - } - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - for (j = 0; j < rxr->vxrxr_ndesc; j++) { - rxb = &rxr->vxrxr_rxbuf[j]; - - if (rxb->vrxb_m == NULL) - continue; - - bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap); - m_freem(rxb->vrxb_m); - rxb->vrxb_m = NULL; - } - } -} - -static void -vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc) -{ - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_txqueue *txq; - int i; - - for (i = 0; i < sc->vmx_nrxqueues; i++) { - rxq = &sc->vmx_rxq[i]; - VMXNET3_RXQ_LOCK(rxq); - VMXNET3_RXQ_UNLOCK(rxq); - } - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - VMXNET3_TXQ_LOCK(txq); - VMXNET3_TXQ_UNLOCK(txq); - } -} - -static void -vmxnet3_stop(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp; - int q; - - ifp = sc->vmx_ifp; - VMXNET3_CORE_LOCK_ASSERT(sc); - - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->vmx_link_active = 0; - callout_stop(&sc->vmx_tick); - - /* Disable interrupts. */ - vmxnet3_disable_all_intrs(sc); vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE); - - vmxnet3_stop_rendezvous(sc); - - for (q = 0; q < sc->vmx_ntxqueues; q++) - vmxnet3_txstop(sc, &sc->vmx_txq[q]); - for (q = 0; q < sc->vmx_nrxqueues; q++) - vmxnet3_rxstop(sc, &sc->vmx_rxq[q]); - vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET); } static void vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) { struct vmxnet3_txring *txr; struct vmxnet3_comp_ring *txc; + txq->vxtxq_last_flush = -1; + txr = &txq->vxtxq_cmd_ring; - txr->vxtxr_head = 0; txr->vxtxr_next = 0; txr->vxtxr_gen = VMXNET3_INIT_GEN; - bzero(txr->vxtxr_txd, - txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc)); + /* + * iflib has zeroed out the descriptor array during the prior attach + * or stop + */ txc = &txq->vxtxq_comp_ring; txc->vxcr_next = 0; txc->vxcr_gen = VMXNET3_INIT_GEN; - bzero(txc->vxcr_u.txcd, - txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc)); + /* + * iflib has zeroed out the descriptor array during the prior attach + * or stop + */ } -static int +static void vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) { - struct ifnet *ifp; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; - int i, populate, idx, frame_size, error; + int i; - ifp = sc->vmx_ifp; - frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) + - ifp->if_mtu; - /* - * If the MTU causes us to exceed what a regular sized cluster can - * handle, we allocate a second MJUMPAGESIZE cluster after it in - * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters. - * - * Keep rx_max_chain a divisor of the maximum Rx ring size to make - * our life easier. We do not support changing the ring size after - * the attach. + * The descriptors will be populated with buffers during a + * subsequent invocation of vmxnet3_isc_rxd_refill() */ - if (frame_size <= MCLBYTES) - sc->vmx_rx_max_chain = 1; - else - sc->vmx_rx_max_chain = 2; - - /* - * Only populate ring 1 if the configuration will take advantage - * of it. That is either when LRO is enabled or the frame size - * exceeds what ring 0 can contain. - */ - if ((ifp->if_capenable & IFCAP_LRO) == 0 && - frame_size <= MCLBYTES + MJUMPAGESIZE) - populate = 1; - else - populate = VMXNET3_RXRINGS_PERQ; - - for (i = 0; i < populate; i++) { + for (i = 0; i < sc->vmx_sctx->isc_nrxqs - 1; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; - rxr->vxrxr_fill = 0; rxr->vxrxr_gen = VMXNET3_INIT_GEN; - bzero(rxr->vxrxr_rxd, - rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); - - for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { - error = vmxnet3_newbuf(sc, rxr); - if (error) - return (error); - } + /* + * iflib has zeroed out the descriptor array during the + * prior attach or stop + */ } for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; - rxr->vxrxr_fill = 0; rxr->vxrxr_gen = 0; bzero(rxr->vxrxr_rxd, rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); } rxc = &rxq->vxrxq_comp_ring; rxc->vxcr_next = 0; rxc->vxcr_gen = VMXNET3_INIT_GEN; - bzero(rxc->vxcr_u.rxcd, - rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc)); - - return (0); + /* + * iflib has zeroed out the descriptor array during the prior attach + * or stop + */ } -static int +static void vmxnet3_reinit_queues(struct vmxnet3_softc *sc) { - device_t dev; - int q, error; + if_softc_ctx_t scctx; + int q; - dev = sc->vmx_dev; + scctx = sc->vmx_scctx; - for (q = 0; q < sc->vmx_ntxqueues; q++) + for (q = 0; q < scctx->isc_ntxqsets; q++) vmxnet3_txinit(sc, &sc->vmx_txq[q]); - for (q = 0; q < sc->vmx_nrxqueues; q++) { - error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); - if (error) { - device_printf(dev, "cannot populate Rx queue %d\n", q); - return (error); - } - } - - return (0); + for (q = 0; q < scctx->isc_nrxqsets; q++) + vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); } static int vmxnet3_enable_device(struct vmxnet3_softc *sc) { + if_softc_ctx_t scctx; int q; + scctx = sc->vmx_scctx; + if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) { device_printf(sc->vmx_dev, "device enable command failed!\n"); return (1); } /* Reset the Rx queue heads. */ - for (q = 0; q < sc->vmx_nrxqueues; q++) { + for (q = 0; q < scctx->isc_nrxqsets; q++) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0); vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0); } return (0); } static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) { struct ifnet *ifp; ifp = sc->vmx_ifp; - vmxnet3_set_rxfilter(sc); + vmxnet3_set_rxfilter(sc, if_getflags(ifp)); if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter, sizeof(sc->vmx_ds->vlan_filter)); else bzero(sc->vmx_ds->vlan_filter, sizeof(sc->vmx_ds->vlan_filter)); vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); } -static int -vmxnet3_reinit(struct vmxnet3_softc *sc) -{ - - vmxnet3_reinit_interface(sc); - vmxnet3_reinit_shared_data(sc); - - if (vmxnet3_reinit_queues(sc) != 0) - return (ENXIO); - - if (vmxnet3_enable_device(sc) != 0) - return (ENXIO); - - vmxnet3_reinit_rxfilters(sc); - - return (0); -} - static void -vmxnet3_init_locked(struct vmxnet3_softc *sc) +vmxnet3_init(if_ctx_t ctx) { - struct ifnet *ifp; + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; + + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; - ifp = sc->vmx_ifp; + scctx->isc_max_frame_size = if_getmtu(iflib_get_ifp(ctx)) + + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - return; + /* Use the current MAC address. */ + bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN); + vmxnet3_set_lladdr(sc); - vmxnet3_stop(sc); + vmxnet3_reinit_shared_data(sc); + vmxnet3_reinit_queues(sc); - if (vmxnet3_reinit(sc) != 0) { - vmxnet3_stop(sc); - return; - } + vmxnet3_enable_device(sc); - ifp->if_drv_flags |= IFF_DRV_RUNNING; + vmxnet3_reinit_rxfilters(sc); vmxnet3_link_status(sc); - - vmxnet3_enable_all_intrs(sc); - callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); } static void -vmxnet3_init(void *xsc) +vmxnet3_multi_set(if_ctx_t ctx) { - struct vmxnet3_softc *sc; - sc = xsc; - - VMXNET3_CORE_LOCK(sc); - vmxnet3_init_locked(sc); - VMXNET3_CORE_UNLOCK(sc); + vmxnet3_set_rxfilter(iflib_get_softc(ctx), + if_getflags(iflib_get_ifp(ctx))); } -/* - * BMV: Much of this can go away once we finally have offsets in - * the mbuf packet header. Bug andre@. - */ static int -vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m, - int *etype, int *proto, int *start) +vmxnet3_mtu_set(if_ctx_t ctx, uint32_t mtu) { - struct ether_vlan_header *evh; - int offset; -#if defined(INET) - struct ip *ip = NULL; - struct ip iphdr; -#endif -#if defined(INET6) - struct ip6_hdr *ip6 = NULL; - struct ip6_hdr ip6hdr; -#endif - evh = mtod(m, struct ether_vlan_header *); - if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - /* BMV: We should handle nested VLAN tags too. */ - *etype = ntohs(evh->evl_proto); - offset = sizeof(struct ether_vlan_header); - } else { - *etype = ntohs(evh->evl_encap_proto); - offset = sizeof(struct ether_header); - } - - switch (*etype) { -#if defined(INET) - case ETHERTYPE_IP: - if (__predict_false(m->m_len < offset + sizeof(struct ip))) { - m_copydata(m, offset, sizeof(struct ip), - (caddr_t) &iphdr); - ip = &iphdr; - } else - ip = mtodo(m, offset); - *proto = ip->ip_p; - *start = offset + (ip->ip_hl << 2); - break; -#endif -#if defined(INET6) - case ETHERTYPE_IPV6: - if (__predict_false(m->m_len < - offset + sizeof(struct ip6_hdr))) { - m_copydata(m, offset, sizeof(struct ip6_hdr), - (caddr_t) &ip6hdr); - ip6 = &ip6hdr; - } else - ip6 = mtodo(m, offset); - *proto = -1; - *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); - /* Assert the network stack sent us a valid packet. */ - KASSERT(*start > offset, - ("%s: mbuf %p start %d offset %d proto %d", __func__, m, - *start, offset, *proto)); - break; -#endif - default: + if (mtu > VMXNET3_TX_MAXSIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + + ETHER_CRC_LEN)) return (EINVAL); - } - if (m->m_pkthdr.csum_flags & CSUM_TSO) { - struct tcphdr *tcp, tcphdr; - uint16_t sum; - - if (__predict_false(*proto != IPPROTO_TCP)) { - /* Likely failed to correctly parse the mbuf. */ - return (EINVAL); - } - - txq->vxtxq_stats.vmtxs_tso++; - - switch (*etype) { -#if defined(INET) - case ETHERTYPE_IP: - sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, - htons(IPPROTO_TCP)); - break; -#endif -#if defined(INET6) - case ETHERTYPE_IPV6: - sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); - break; -#endif - default: - sum = 0; - break; - } - - if (m->m_len < *start + sizeof(struct tcphdr)) { - m_copyback(m, *start + offsetof(struct tcphdr, th_sum), - sizeof(uint16_t), (caddr_t) &sum); - m_copydata(m, *start, sizeof(struct tcphdr), - (caddr_t) &tcphdr); - tcp = &tcphdr; - } else { - tcp = mtodo(m, *start); - tcp->th_sum = sum; - } - - /* - * For TSO, the size of the protocol header is also - * included in the descriptor header size. - */ - *start += (tcp->th_off << 2); - } else - txq->vxtxq_stats.vmtxs_csum++; - return (0); } -static int -vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0, - bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs) +static void +vmxnet3_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { - struct vmxnet3_txring *txr; - struct mbuf *m; - bus_dma_tag_t tag; - int error; + struct vmxnet3_softc *sc; - txr = &txq->vxtxq_cmd_ring; - m = *m0; - tag = txr->vxtxr_txtag; + sc = iflib_get_softc(ctx); - error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); - if (error == 0 || error != EFBIG) - return (error); + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; - m = m_defrag(m, M_NOWAIT); - if (m != NULL) { - *m0 = m; - error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); + if (vmxnet3_link_is_up(sc) != 0) { + ifmr->ifm_status |= IFM_ACTIVE; + ifmr->ifm_active |= IFM_AUTO; } else - error = ENOBUFS; - - if (error) { - m_freem(*m0); - *m0 = NULL; - txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++; - } else - txq->vxtxq_sc->vmx_stats.vmst_defragged++; - - return (error); + ifmr->ifm_active |= IFM_NONE; } -static void -vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) -{ - struct vmxnet3_txring *txr; - - txr = &txq->vxtxq_cmd_ring; - bus_dmamap_unload(txr->vxtxr_txtag, dmap); -} - static int -vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) +vmxnet3_media_change(if_ctx_t ctx) { - struct vmxnet3_softc *sc; - struct vmxnet3_txring *txr; - struct vmxnet3_txdesc *txd, *sop; - struct mbuf *m; - bus_dmamap_t dmap; - bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS]; - int i, gen, nsegs, etype, proto, start, error; - sc = txq->vxtxq_sc; - start = 0; - txd = NULL; - txr = &txq->vxtxq_cmd_ring; - dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap; - - error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs); - if (error) - return (error); - - m = *m0; - M_ASSERTPKTHDR(m); - KASSERT(nsegs <= VMXNET3_TX_MAXSEGS, - ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); - - if (VMXNET3_TXRING_AVAIL(txr) < nsegs) { - txq->vxtxq_stats.vmtxs_full++; - vmxnet3_txq_unload_mbuf(txq, dmap); - return (ENOSPC); - } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) { - error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start); - if (error) { - txq->vxtxq_stats.vmtxs_offload_failed++; - vmxnet3_txq_unload_mbuf(txq, dmap); - m_freem(m); - *m0 = NULL; - return (error); - } - } - - txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m; - sop = &txr->vxtxr_txd[txr->vxtxr_head]; - gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ - - for (i = 0; i < nsegs; i++) { - txd = &txr->vxtxr_txd[txr->vxtxr_head]; - - txd->addr = segs[i].ds_addr; - txd->len = segs[i].ds_len; - txd->gen = gen; - txd->dtype = 0; - txd->offload_mode = VMXNET3_OM_NONE; - txd->offload_pos = 0; - txd->hlen = 0; - txd->eop = 0; - txd->compreq = 0; - txd->vtag_mode = 0; - txd->vtag = 0; - - if (++txr->vxtxr_head == txr->vxtxr_ndesc) { - txr->vxtxr_head = 0; - txr->vxtxr_gen ^= 1; - } - gen = txr->vxtxr_gen; - } - txd->eop = 1; - txd->compreq = 1; - - if (m->m_flags & M_VLANTAG) { - sop->vtag_mode = 1; - sop->vtag = m->m_pkthdr.ether_vtag; - } - - if (m->m_pkthdr.csum_flags & CSUM_TSO) { - sop->offload_mode = VMXNET3_OM_TSO; - sop->hlen = start; - sop->offload_pos = m->m_pkthdr.tso_segsz; - } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD | - VMXNET3_CSUM_OFFLOAD_IPV6)) { - sop->offload_mode = VMXNET3_OM_CSUM; - sop->hlen = start; - sop->offload_pos = start + m->m_pkthdr.csum_data; - } - - /* Finally, change the ownership. */ - vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); - sop->gen ^= 1; - - txq->vxtxq_ts->npending += nsegs; - if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) { - txq->vxtxq_ts->npending = 0; - vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), - txr->vxtxr_head); - } - + /* Ignore. */ return (0); } -#ifdef VMXNET3_LEGACY_TX - -static void -vmxnet3_start_locked(struct ifnet *ifp) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; - struct mbuf *m_head; - int tx, avail; - - sc = ifp->if_softc; - txq = &sc->vmx_txq[0]; - txr = &txq->vxtxq_cmd_ring; - tx = 0; - - VMXNET3_TXQ_LOCK_ASSERT(txq); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || - sc->vmx_link_active == 0) - return; - - while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { - if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2) - break; - - IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); - if (m_head == NULL) - break; - - /* Assume worse case if this mbuf is the head of a chain. */ - if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - break; - } - - if (vmxnet3_txq_encap(txq, &m_head) != 0) { - if (m_head != NULL) - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - break; - } - - tx++; - ETHER_BPF_MTAP(ifp, m_head); - } - - if (tx > 0) - txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; -} - -static void -vmxnet3_start(struct ifnet *ifp) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - - sc = ifp->if_softc; - txq = &sc->vmx_txq[0]; - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_start_locked(ifp); - VMXNET3_TXQ_UNLOCK(txq); -} - -#else /* !VMXNET3_LEGACY_TX */ - static int -vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m) +vmxnet3_promisc_set(if_ctx_t ctx, int flags) { - struct vmxnet3_softc *sc; - struct vmxnet3_txring *txr; - struct buf_ring *br; - struct ifnet *ifp; - int tx, avail, error; - sc = txq->vxtxq_sc; - br = txq->vxtxq_br; - ifp = sc->vmx_ifp; - txr = &txq->vxtxq_cmd_ring; - tx = 0; - error = 0; + vmxnet3_set_rxfilter(iflib_get_softc(ctx), flags); - VMXNET3_TXQ_LOCK_ASSERT(txq); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || - sc->vmx_link_active == 0) { - if (m != NULL) - error = drbr_enqueue(ifp, br, m); - return (error); - } - - if (m != NULL) { - error = drbr_enqueue(ifp, br, m); - if (error) - return (error); - } - - while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) { - m = drbr_peek(ifp, br); - if (m == NULL) - break; - - /* Assume worse case if this mbuf is the head of a chain. */ - if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { - drbr_putback(ifp, br, m); - break; - } - - if (vmxnet3_txq_encap(txq, &m) != 0) { - if (m != NULL) - drbr_putback(ifp, br, m); - else - drbr_advance(ifp, br); - break; - } - drbr_advance(ifp, br); - - tx++; - ETHER_BPF_MTAP(ifp, m); - } - - if (tx > 0) - txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; - return (0); } -static int -vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m) +static uint64_t +vmxnet3_get_counter(if_ctx_t ctx, ift_counter cnt) { - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - int i, ntxq, error; + if_t ifp = iflib_get_ifp(ctx); - sc = ifp->if_softc; - ntxq = sc->vmx_ntxqueues; + if (cnt < IFCOUNTERS) + return if_get_counter_default(ifp, cnt); - /* check if flowid is set */ - if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) - i = m->m_pkthdr.flowid % ntxq; - else - i = curcpu % ntxq; - - txq = &sc->vmx_txq[i]; - - if (VMXNET3_TXQ_TRYLOCK(txq) != 0) { - error = vmxnet3_txq_mq_start_locked(txq, m); - VMXNET3_TXQ_UNLOCK(txq); - } else { - error = drbr_enqueue(ifp, txq->vxtxq_br, m); - taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask); - } - - return (error); + return (0); } static void -vmxnet3_txq_tq_deferred(void *xtxq, int pending) +vmxnet3_update_admin_status(if_ctx_t ctx) { struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - txq = xtxq; - sc = txq->vxtxq_sc; + sc = iflib_get_softc(ctx); + if (sc->vmx_ds->event != 0) + vmxnet3_evintr(sc); - VMXNET3_TXQ_LOCK(txq); - if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br)) - vmxnet3_txq_mq_start_locked(txq, NULL); - VMXNET3_TXQ_UNLOCK(txq); + vmxnet3_refresh_host_stats(sc); } -#endif /* VMXNET3_LEGACY_TX */ - static void -vmxnet3_txq_start(struct vmxnet3_txqueue *txq) +vmxnet3_txq_timer(if_ctx_t ctx, uint16_t qid) { - struct vmxnet3_softc *sc; - struct ifnet *ifp; - - sc = txq->vxtxq_sc; - ifp = sc->vmx_ifp; - -#ifdef VMXNET3_LEGACY_TX - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - vmxnet3_start_locked(ifp); -#else - if (!drbr_empty(ifp, txq->vxtxq_br)) - vmxnet3_txq_mq_start_locked(txq, NULL); -#endif + /* Host stats refresh is global, so just trigger it on txq 0 */ + if (qid == 0) + vmxnet3_refresh_host_stats(iflib_get_softc(ctx)); } static void -vmxnet3_tx_start_all(struct vmxnet3_softc *sc) -{ - struct vmxnet3_txqueue *txq; - int i; - - VMXNET3_CORE_LOCK_ASSERT(sc); - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_txq_start(txq); - VMXNET3_TXQ_UNLOCK(txq); - } -} - -static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag) { - struct ifnet *ifp; int idx, bit; - ifp = sc->vmx_ifp; - idx = (tag >> 5) & 0x7F; - bit = tag & 0x1F; - if (tag == 0 || tag > 4095) return; - VMXNET3_CORE_LOCK(sc); + idx = (tag >> 5) & 0x7F; + bit = tag & 0x1F; /* Update our private VLAN bitvector. */ if (add) sc->vmx_vlan_filter[idx] |= (1 << bit); else sc->vmx_vlan_filter[idx] &= ~(1 << bit); - - if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { - if (add) - sc->vmx_ds->vlan_filter[idx] |= (1 << bit); - else - sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit); - vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); - } - - VMXNET3_CORE_UNLOCK(sc); } static void -vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) +vmxnet3_vlan_register(if_ctx_t ctx, uint16_t tag) { - if (ifp->if_softc == arg) - vmxnet3_update_vlan_filter(arg, 1, tag); + vmxnet3_update_vlan_filter(iflib_get_softc(ctx), 1, tag); } static void -vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) +vmxnet3_vlan_unregister(if_ctx_t ctx, uint16_t tag) { - if (ifp->if_softc == arg) - vmxnet3_update_vlan_filter(arg, 0, tag); + vmxnet3_update_vlan_filter(iflib_get_softc(ctx), 0, tag); } static void -vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) +vmxnet3_set_rxfilter(struct vmxnet3_softc *sc, int flags) { struct ifnet *ifp; struct vmxnet3_driver_shared *ds; struct ifmultiaddr *ifma; u_int mode; ifp = sc->vmx_ifp; ds = sc->vmx_ds; mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST; - if (ifp->if_flags & IFF_PROMISC) + if (flags & IFF_PROMISC) mode |= VMXNET3_RXMODE_PROMISC; - if (ifp->if_flags & IFF_ALLMULTI) + if (flags & IFF_ALLMULTI) mode |= VMXNET3_RXMODE_ALLMULTI; else { int cnt = 0, overflow = 0; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; else if (cnt == VMXNET3_MULTICAST_MAX) { overflow = 1; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN); cnt++; } if_maddr_runlock(ifp); if (overflow != 0) { cnt = 0; mode |= VMXNET3_RXMODE_ALLMULTI; } else if (cnt > 0) mode |= VMXNET3_RXMODE_MCAST; ds->mcast_tablelen = cnt * ETHER_ADDR_LEN; } ds->rxmode = mode; vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER); vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE); } -static int -vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu) -{ - struct ifnet *ifp; - - ifp = sc->vmx_ifp; - - if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU) - return (EINVAL); - - ifp->if_mtu = mtu; - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } - - return (0); -} - -static int -vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) -{ - struct vmxnet3_softc *sc; - struct ifreq *ifr; - int reinit, mask, error; - - sc = ifp->if_softc; - ifr = (struct ifreq *) data; - error = 0; - - switch (cmd) { - case SIOCSIFMTU: - if (ifp->if_mtu != ifr->ifr_mtu) { - VMXNET3_CORE_LOCK(sc); - error = vmxnet3_change_mtu(sc, ifr->ifr_mtu); - VMXNET3_CORE_UNLOCK(sc); - } - break; - - case SIOCSIFFLAGS: - VMXNET3_CORE_LOCK(sc); - if (ifp->if_flags & IFF_UP) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { - if ((ifp->if_flags ^ sc->vmx_if_flags) & - (IFF_PROMISC | IFF_ALLMULTI)) { - vmxnet3_set_rxfilter(sc); - } - } else - vmxnet3_init_locked(sc); - } else { - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - vmxnet3_stop(sc); - } - sc->vmx_if_flags = ifp->if_flags; - VMXNET3_CORE_UNLOCK(sc); - break; - - case SIOCADDMULTI: - case SIOCDELMULTI: - VMXNET3_CORE_LOCK(sc); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - vmxnet3_set_rxfilter(sc); - VMXNET3_CORE_UNLOCK(sc); - break; - - case SIOCSIFMEDIA: - case SIOCGIFMEDIA: - error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd); - break; - - case SIOCSIFCAP: - VMXNET3_CORE_LOCK(sc); - mask = ifr->ifr_reqcap ^ ifp->if_capenable; - - if (mask & IFCAP_TXCSUM) - ifp->if_capenable ^= IFCAP_TXCSUM; - if (mask & IFCAP_TXCSUM_IPV6) - ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; - if (mask & IFCAP_TSO4) - ifp->if_capenable ^= IFCAP_TSO4; - if (mask & IFCAP_TSO6) - ifp->if_capenable ^= IFCAP_TSO6; - - if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | - IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) { - /* Changing these features requires us to reinit. */ - reinit = 1; - - if (mask & IFCAP_RXCSUM) - ifp->if_capenable ^= IFCAP_RXCSUM; - if (mask & IFCAP_RXCSUM_IPV6) - ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; - if (mask & IFCAP_LRO) - ifp->if_capenable ^= IFCAP_LRO; - if (mask & IFCAP_VLAN_HWTAGGING) - ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; - if (mask & IFCAP_VLAN_HWFILTER) - ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; - } else - reinit = 0; - - if (mask & IFCAP_VLAN_HWTSO) - ifp->if_capenable ^= IFCAP_VLAN_HWTSO; - - if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } else { - vmxnet3_init_hwassist(sc); - } - - VMXNET3_CORE_UNLOCK(sc); - VLAN_CAPABILITIES(ifp); - break; - - default: - error = ether_ioctl(ifp, cmd, data); - break; - } - - VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc); - - return (error); -} - -#ifndef VMXNET3_LEGACY_TX static void -vmxnet3_qflush(struct ifnet *ifp) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - struct mbuf *m; - int i; - - sc = ifp->if_softc; - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - - VMXNET3_TXQ_LOCK(txq); - while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL) - m_freem(m); - VMXNET3_TXQ_UNLOCK(txq); - } - - if_qflush(ifp); -} -#endif - -static int -vmxnet3_watchdog(struct vmxnet3_txqueue *txq) -{ - struct vmxnet3_softc *sc; - - sc = txq->vxtxq_sc; - - VMXNET3_TXQ_LOCK(txq); - if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) { - VMXNET3_TXQ_UNLOCK(txq); - return (0); - } - VMXNET3_TXQ_UNLOCK(txq); - - if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n", - txq->vxtxq_id); - return (1); -} - -static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc) { vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS); } -static uint64_t -vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt) -{ - struct vmxnet3_softc *sc; - uint64_t rv; - - sc = if_getsoftc(ifp); - rv = 0; - - /* - * With the exception of if_ierrors, these ifnet statistics are - * only updated in the driver, so just set them to our accumulated - * values. if_ierrors is updated in ether_input() for malformed - * frames that we should have already discarded. - */ - switch (cnt) { - case IFCOUNTER_IPACKETS: - for (int i = 0; i < sc->vmx_nrxqueues; i++) - rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets; - return (rv); - case IFCOUNTER_IQDROPS: - for (int i = 0; i < sc->vmx_nrxqueues; i++) - rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops; - return (rv); - case IFCOUNTER_IERRORS: - for (int i = 0; i < sc->vmx_nrxqueues; i++) - rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors; - return (rv); - case IFCOUNTER_OPACKETS: - for (int i = 0; i < sc->vmx_ntxqueues; i++) - rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets; - return (rv); -#ifndef VMXNET3_LEGACY_TX - case IFCOUNTER_OBYTES: - for (int i = 0; i < sc->vmx_ntxqueues; i++) - rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes; - return (rv); - case IFCOUNTER_OMCASTS: - for (int i = 0; i < sc->vmx_ntxqueues; i++) - rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts; - return (rv); -#endif - default: - return (if_get_counter_default(ifp, cnt)); - } -} - -static void -vmxnet3_tick(void *xsc) -{ - struct vmxnet3_softc *sc; - struct ifnet *ifp; - int i, timedout; - - sc = xsc; - ifp = sc->vmx_ifp; - timedout = 0; - - VMXNET3_CORE_LOCK_ASSERT(sc); - - vmxnet3_refresh_host_stats(sc); - - for (i = 0; i < sc->vmx_ntxqueues; i++) - timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]); - - if (timedout != 0) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } else - callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); -} - static int vmxnet3_link_is_up(struct vmxnet3_softc *sc) { uint32_t status; - /* Also update the link speed while here. */ status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK); - sc->vmx_link_speed = status >> 16; return !!(status & 0x1); } static void vmxnet3_link_status(struct vmxnet3_softc *sc) { - struct ifnet *ifp; + if_ctx_t ctx; + uint64_t speed; int link; - ifp = sc->vmx_ifp; + ctx = sc->vmx_ctx; link = vmxnet3_link_is_up(sc); - + speed = IF_Gbps(10); + if (link != 0 && sc->vmx_link_active == 0) { sc->vmx_link_active = 1; - if_link_state_change(ifp, LINK_STATE_UP); + iflib_link_state_change(ctx, LINK_STATE_UP, speed); } else if (link == 0 && sc->vmx_link_active != 0) { sc->vmx_link_active = 0; - if_link_state_change(ifp, LINK_STATE_DOWN); + iflib_link_state_change(ctx, LINK_STATE_DOWN, speed); } } static void -vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) -{ - struct vmxnet3_softc *sc; - - sc = ifp->if_softc; - - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_active = IFM_ETHER; - - VMXNET3_CORE_LOCK(sc); - if (vmxnet3_link_is_up(sc) != 0) { - ifmr->ifm_status |= IFM_ACTIVE; - ifmr->ifm_active |= IFM_AUTO; - } else - ifmr->ifm_active |= IFM_NONE; - VMXNET3_CORE_UNLOCK(sc); -} - -static int -vmxnet3_media_change(struct ifnet *ifp) -{ - - /* Ignore. */ - return (0); -} - -static void vmxnet3_set_lladdr(struct vmxnet3_softc *sc) { uint32_t ml, mh; ml = sc->vmx_lladdr[0]; ml |= sc->vmx_lladdr[1] << 8; ml |= sc->vmx_lladdr[2] << 16; ml |= sc->vmx_lladdr[3] << 24; vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml); mh = sc->vmx_lladdr[4]; mh |= sc->vmx_lladdr[5] << 8; vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh); } static void vmxnet3_get_lladdr(struct vmxnet3_softc *sc) { uint32_t ml, mh; ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL); mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH); sc->vmx_lladdr[0] = ml; sc->vmx_lladdr[1] = ml >> 8; sc->vmx_lladdr[2] = ml >> 16; sc->vmx_lladdr[3] = ml >> 24; sc->vmx_lladdr[4] = mh; sc->vmx_lladdr[5] = mh >> 8; } static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *node, *txsnode; struct sysctl_oid_list *list, *txslist; - struct vmxnet3_txq_stats *stats; struct UPT1_TxStats *txstats; char namebuf[16]; - stats = &txq->vxtxq_stats; txstats = &txq->vxtxq_ts->stats; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Transmit Queue"); txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, - &stats->vmtxs_opackets, "Transmit packets"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, - &stats->vmtxs_obytes, "Transmit bytes"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, - &stats->vmtxs_omcasts, "Transmit multicasts"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, - &stats->vmtxs_csum, "Transmit checksum offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, - &stats->vmtxs_tso, "Transmit TCP segmentation offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD, - &stats->vmtxs_full, "Transmit ring full"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD, - &stats->vmtxs_offload_failed, "Transmit checksum offload failed"); - /* - * Add statistics reported by the host. These are updated once - * per second. + * Add statistics reported by the host. These are updated by the + * iflib txq timer on txq 0. */ txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, NULL, "Host Statistics"); txslist = SYSCTL_CHILDREN(txsnode); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD, &txstats->TSO_packets, "TSO packets"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD, &txstats->TSO_bytes, "TSO bytes"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD, &txstats->ucast_packets, "Unicast packets"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD, &txstats->ucast_bytes, "Unicast bytes"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD, &txstats->mcast_packets, "Multicast packets"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD, &txstats->mcast_bytes, "Multicast bytes"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD, &txstats->error, "Errors"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD, &txstats->discard, "Discards"); } static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *node, *rxsnode; struct sysctl_oid_list *list, *rxslist; - struct vmxnet3_rxq_stats *stats; struct UPT1_RxStats *rxstats; char namebuf[16]; - stats = &rxq->vxrxq_stats; rxstats = &rxq->vxrxq_rs->stats; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Receive Queue"); rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, - &stats->vmrxs_ipackets, "Receive packets"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, - &stats->vmrxs_ibytes, "Receive bytes"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, - &stats->vmrxs_iqdrops, "Receive drops"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, - &stats->vmrxs_ierrors, "Receive errors"); - /* - * Add statistics reported by the host. These are updated once - * per second. + * Add statistics reported by the host. These are updated by the + * iflib txq timer on txq 0. */ rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, NULL, "Host Statistics"); rxslist = SYSCTL_CHILDREN(rxsnode); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD, &rxstats->LRO_packets, "LRO packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD, &rxstats->LRO_bytes, "LRO bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD, &rxstats->ucast_packets, "Unicast packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD, &rxstats->ucast_bytes, "Unicast bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD, &rxstats->mcast_packets, "Multicast packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD, &rxstats->mcast_bytes, "Multicast bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD, &rxstats->bcast_packets, "Broadcast packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD, &rxstats->bcast_bytes, "Broadcast bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD, &rxstats->nobuffer, "No buffer"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD, &rxstats->error, "Errors"); } static void vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { + if_softc_ctx_t scctx; struct sysctl_oid *node; struct sysctl_oid_list *list; int i; - for (i = 0; i < sc->vmx_ntxqueues; i++) { + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_ntxqsets; i++) { struct vmxnet3_txqueue *txq = &sc->vmx_txq[i]; node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO, "debug", CTLFLAG_RD, NULL, ""); list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD, - &txq->vxtxq_cmd_ring.vxtxr_head, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_gen, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, &txq->vxtxq_comp_ring.vxcr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, &txq->vxtxq_comp_ring.vxcr_ndesc, 0,""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, &txq->vxtxq_comp_ring.vxcr_gen, 0, ""); } - for (i = 0; i < sc->vmx_nrxqueues; i++) { + for (i = 0; i < scctx->isc_nrxqsets; i++) { struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i]; node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO, "debug", CTLFLAG_RD, NULL, ""); list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD, - &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, ""); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD, - &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, ""); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, - &rxq->vxrxq_comp_ring.vxcr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, &rxq->vxrxq_comp_ring.vxcr_gen, 0, ""); } } static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { + if_softc_ctx_t scctx; int i; - for (i = 0; i < sc->vmx_ntxqueues; i++) + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_ntxqsets; i++) vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child); - for (i = 0; i < sc->vmx_nrxqueues; i++) + for (i = 0; i < scctx->isc_nrxqsets; i++) vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child); vmxnet3_setup_debug_sysctl(sc, ctx, child); } static void vmxnet3_setup_sysctl(struct vmxnet3_softc *sc) { device_t dev; - struct vmxnet3_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vmx_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD, - &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD, - &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD, - &sc->vmx_ntxqueues, 0, "Number of Tx queues"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD, - &sc->vmx_nrxqueues, 0, "Number of Rx queues"); - - stats = &sc->vmx_stats; - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD, - &stats->vmst_defragged, 0, "Tx mbuf chains defragged"); - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD, - &stats->vmst_defrag_failed, 0, - "Tx mbuf dropped because defrag failed"); - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD, - &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed"); - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD, - &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed"); - vmxnet3_setup_queue_sysctl(sc, ctx, child); } static void vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) { bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v); } static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r) { return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r)); } static void vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) { bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v); } static void vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd) { vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd); } static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd) { vmxnet3_write_cmd(sc, cmd); bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD)); } static void vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0); } static void vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1); } -static void -vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc) +static int +vmxnet3_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { - int i; - - sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < sc->vmx_nintrs; i++) - vmxnet3_enable_intr(sc, i); + /* Not using interrupts for TX */ + return (0); } -static void -vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc) +static int +vmxnet3_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { - int i; + struct vmxnet3_softc *sc; - sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < sc->vmx_nintrs; i++) - vmxnet3_disable_intr(sc, i); + sc = iflib_get_softc(ctx); + vmxnet3_enable_intr(sc, sc->vmx_rxq[qid].vxrxq_intr_idx); + return (0); } static void -vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +vmxnet3_link_intr_enable(if_ctx_t ctx) { - bus_addr_t *baddr = arg; + struct vmxnet3_softc *sc; - if (error == 0) - *baddr = segs->ds_addr; + sc = iflib_get_softc(ctx); + vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); } -static int -vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align, - struct vmxnet3_dma_alloc *dma) +static void +vmxnet3_intr_enable_all(if_ctx_t ctx) { - device_t dev; - int error; + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; + int i; - dev = sc->vmx_dev; - bzero(dma, sizeof(struct vmxnet3_dma_alloc)); - - error = bus_dma_tag_create(bus_get_dma_tag(dev), - align, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - size, /* maxsize */ - 1, /* nsegments */ - size, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &dma->dma_tag); - if (error) { - device_printf(dev, "bus_dma_tag_create failed: %d\n", error); - goto fail; - } - - error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, - BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map); - if (error) { - device_printf(dev, "bus_dmamem_alloc failed: %d\n", error); - goto fail; - } - - error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, - size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT); - if (error) { - device_printf(dev, "bus_dmamap_load failed: %d\n", error); - goto fail; - } - - dma->dma_size = size; - -fail: - if (error) - vmxnet3_dma_free(sc, dma); - - return (error); + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; + sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; + for (i = 0; i < scctx->isc_vectors; i++) + vmxnet3_enable_intr(sc, i); } static void -vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma) +vmxnet3_intr_disable_all(if_ctx_t ctx) { + struct vmxnet3_softc *sc; + int i; - if (dma->dma_tag != NULL) { - if (dma->dma_paddr != 0) { - bus_dmamap_sync(dma->dma_tag, dma->dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(dma->dma_tag, dma->dma_map); - } - - if (dma->dma_vaddr != NULL) { - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, - dma->dma_map); - } - - bus_dma_tag_destroy(dma->dma_tag); - } - bzero(dma, sizeof(struct vmxnet3_dma_alloc)); -} - -static int -vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def) -{ - char path[64]; - - snprintf(path, sizeof(path), - "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob); - TUNABLE_INT_FETCH(path, &def); - - return (def); + sc = iflib_get_softc(ctx); + /* + * iflib may invoke this routine before vmxnet3_attach_post() has + * run, which is before the top level shared data area is + * initialized and the device made aware of it. + */ + if (sc->vmx_ds != NULL) + sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; + for (i = 0; i < VMXNET3_MAX_INTRS; i++) + vmxnet3_disable_intr(sc, i); } /* * Since this is a purely paravirtualized device, we do not have * to worry about DMA coherency. But at times, we must make sure * both the compiler and CPU do not reorder memory operations. */ static inline void vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type) { switch (type) { case VMXNET3_BARRIER_RD: rmb(); break; case VMXNET3_BARRIER_WR: wmb(); break; case VMXNET3_BARRIER_RDWR: mb(); break; default: panic("%s: bad barrier type %d", __func__, type); } } Index: head/sys/dev/vmware/vmxnet3/if_vmxvar.h =================================================================== --- head/sys/dev/vmware/vmxnet3/if_vmxvar.h (revision 343290) +++ head/sys/dev/vmware/vmxnet3/if_vmxvar.h (revision 343291) @@ -1,345 +1,193 @@ /*- * Copyright (c) 2013 Tsubai Masanari * Copyright (c) 2013 Bryan Venteicher + * Copyright (c) 2018 Patrick Kelsey * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #ifndef _IF_VMXVAR_H #define _IF_VMXVAR_H struct vmxnet3_softc; -struct vmxnet3_dma_alloc { - bus_addr_t dma_paddr; - caddr_t dma_vaddr; - bus_dma_tag_t dma_tag; - bus_dmamap_t dma_map; - bus_size_t dma_size; -}; - /* * The number of Rx/Tx queues this driver prefers. */ #define VMXNET3_DEF_RX_QUEUES 8 #define VMXNET3_DEF_TX_QUEUES 8 /* * The number of Rx rings in each Rx queue. */ #define VMXNET3_RXRINGS_PERQ 2 /* * The number of descriptors in each Rx/Tx ring. */ #define VMXNET3_DEF_TX_NDESC 512 #define VMXNET3_MAX_TX_NDESC 4096 #define VMXNET3_MIN_TX_NDESC 32 #define VMXNET3_MASK_TX_NDESC 0x1F #define VMXNET3_DEF_RX_NDESC 256 #define VMXNET3_MAX_RX_NDESC 2048 #define VMXNET3_MIN_RX_NDESC 32 #define VMXNET3_MASK_RX_NDESC 0x1F #define VMXNET3_MAX_TX_NCOMPDESC VMXNET3_MAX_TX_NDESC #define VMXNET3_MAX_RX_NCOMPDESC \ (VMXNET3_MAX_RX_NDESC * VMXNET3_RXRINGS_PERQ) -struct vmxnet3_txbuf { - bus_dmamap_t vtxb_dmamap; - struct mbuf *vtxb_m; -}; - struct vmxnet3_txring { - struct vmxnet3_txbuf *vxtxr_txbuf; - u_int vxtxr_head; u_int vxtxr_next; u_int vxtxr_ndesc; int vxtxr_gen; - bus_dma_tag_t vxtxr_txtag; struct vmxnet3_txdesc *vxtxr_txd; - struct vmxnet3_dma_alloc vxtxr_dma; + bus_addr_t vxtxr_paddr; }; -static inline int -VMXNET3_TXRING_AVAIL(struct vmxnet3_txring *txr) -{ - int avail = txr->vxtxr_next - txr->vxtxr_head - 1; - return (avail < 0 ? txr->vxtxr_ndesc + avail : avail); -} - -struct vmxnet3_rxbuf { - bus_dmamap_t vrxb_dmamap; - struct mbuf *vrxb_m; -}; - struct vmxnet3_rxring { - struct vmxnet3_rxbuf *vxrxr_rxbuf; struct vmxnet3_rxdesc *vxrxr_rxd; - u_int vxrxr_fill; u_int vxrxr_ndesc; int vxrxr_gen; - int vxrxr_rid; - bus_dma_tag_t vxrxr_rxtag; - struct vmxnet3_dma_alloc vxrxr_dma; - bus_dmamap_t vxrxr_spare_dmap; + bus_addr_t vxrxr_paddr; }; -static inline void -vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr) -{ - - if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) { - rxr->vxrxr_fill = 0; - rxr->vxrxr_gen ^= 1; - } -} - struct vmxnet3_comp_ring { union { struct vmxnet3_txcompdesc *txcd; struct vmxnet3_rxcompdesc *rxcd; } vxcr_u; + /* + * vxcr_next is used on the transmit side to track the next index to + * begin cleaning at. It is not used on the receive side. + */ u_int vxcr_next; u_int vxcr_ndesc; int vxcr_gen; - struct vmxnet3_dma_alloc vxcr_dma; + bus_addr_t vxcr_paddr; }; -struct vmxnet3_txq_stats { - uint64_t vmtxs_opackets; /* if_opackets */ - uint64_t vmtxs_obytes; /* if_obytes */ - uint64_t vmtxs_omcasts; /* if_omcasts */ - uint64_t vmtxs_csum; - uint64_t vmtxs_tso; - uint64_t vmtxs_full; - uint64_t vmtxs_offload_failed; -}; - struct vmxnet3_txqueue { - struct mtx vxtxq_mtx; struct vmxnet3_softc *vxtxq_sc; -#ifndef VMXNET3_LEGACY_TX - struct buf_ring *vxtxq_br; -#endif int vxtxq_id; + int vxtxq_last_flush; int vxtxq_intr_idx; - int vxtxq_watchdog; struct vmxnet3_txring vxtxq_cmd_ring; struct vmxnet3_comp_ring vxtxq_comp_ring; - struct vmxnet3_txq_stats vxtxq_stats; struct vmxnet3_txq_shared *vxtxq_ts; struct sysctl_oid_list *vxtxq_sysctl; -#ifndef VMXNET3_LEGACY_TX - struct task vxtxq_defrtask; -#endif char vxtxq_name[16]; } __aligned(CACHE_LINE_SIZE); -#define VMXNET3_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vxtxq_mtx) -#define VMXNET3_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vxtxq_mtx) -#define VMXNET3_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vxtxq_mtx) -#define VMXNET3_TXQ_LOCK_ASSERT(_txq) \ - mtx_assert(&(_txq)->vxtxq_mtx, MA_OWNED) -#define VMXNET3_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ - mtx_assert(&(_txq)->vxtxq_mtx, MA_NOTOWNED) - -struct vmxnet3_rxq_stats { - uint64_t vmrxs_ipackets; /* if_ipackets */ - uint64_t vmrxs_ibytes; /* if_ibytes */ - uint64_t vmrxs_iqdrops; /* if_iqdrops */ - uint64_t vmrxs_ierrors; /* if_ierrors */ -}; - struct vmxnet3_rxqueue { - struct mtx vxrxq_mtx; struct vmxnet3_softc *vxrxq_sc; int vxrxq_id; int vxrxq_intr_idx; - struct mbuf *vxrxq_mhead; - struct mbuf *vxrxq_mtail; + struct if_irq vxrxq_irq; struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ]; struct vmxnet3_comp_ring vxrxq_comp_ring; - struct vmxnet3_rxq_stats vxrxq_stats; struct vmxnet3_rxq_shared *vxrxq_rs; struct sysctl_oid_list *vxrxq_sysctl; char vxrxq_name[16]; } __aligned(CACHE_LINE_SIZE); -#define VMXNET3_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vxrxq_mtx) -#define VMXNET3_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vxrxq_mtx) -#define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \ - mtx_assert(&(_rxq)->vxrxq_mtx, MA_OWNED) -#define VMXNET3_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \ - mtx_assert(&(_rxq)->vxrxq_mtx, MA_NOTOWNED) - -struct vmxnet3_statistics { - uint32_t vmst_defragged; - uint32_t vmst_defrag_failed; - uint32_t vmst_mgetcl_failed; - uint32_t vmst_mbuf_load_failed; -}; - -struct vmxnet3_interrupt { - struct resource *vmxi_irq; - int vmxi_rid; - void *vmxi_handler; -}; - struct vmxnet3_softc { device_t vmx_dev; + if_ctx_t vmx_ctx; + if_shared_ctx_t vmx_sctx; + if_softc_ctx_t vmx_scctx; struct ifnet *vmx_ifp; struct vmxnet3_driver_shared *vmx_ds; uint32_t vmx_flags; -#define VMXNET3_FLAG_NO_MSIX 0x0001 #define VMXNET3_FLAG_RSS 0x0002 struct vmxnet3_rxqueue *vmx_rxq; struct vmxnet3_txqueue *vmx_txq; struct resource *vmx_res0; bus_space_tag_t vmx_iot0; bus_space_handle_t vmx_ioh0; struct resource *vmx_res1; bus_space_tag_t vmx_iot1; bus_space_handle_t vmx_ioh1; - struct resource *vmx_msix_res; int vmx_link_active; - int vmx_link_speed; - int vmx_if_flags; - int vmx_ntxqueues; - int vmx_nrxqueues; - int vmx_ntxdescs; - int vmx_nrxdescs; - int vmx_max_rxsegs; - int vmx_rx_max_chain; - struct vmxnet3_statistics vmx_stats; - - int vmx_intr_type; int vmx_intr_mask_mode; int vmx_event_intr_idx; - int vmx_nintrs; - struct vmxnet3_interrupt vmx_intrs[VMXNET3_MAX_INTRS]; + struct if_irq vmx_event_intr_irq; - struct mtx vmx_mtx; -#ifndef VMXNET3_LEGACY_TX - struct taskqueue *vmx_tq; -#endif uint8_t *vmx_mcast; - void *vmx_qs; struct vmxnet3_rss_shared *vmx_rss; - struct callout vmx_tick; - struct vmxnet3_dma_alloc vmx_ds_dma; - struct vmxnet3_dma_alloc vmx_qs_dma; - struct vmxnet3_dma_alloc vmx_mcast_dma; - struct vmxnet3_dma_alloc vmx_rss_dma; - struct ifmedia vmx_media; - int vmx_max_ntxqueues; - int vmx_max_nrxqueues; - eventhandler_tag vmx_vlan_attach; - eventhandler_tag vmx_vlan_detach; + struct iflib_dma_info vmx_ds_dma; + struct iflib_dma_info vmx_qs_dma; + struct iflib_dma_info vmx_mcast_dma; + struct iflib_dma_info vmx_rss_dma; + struct ifmedia *vmx_media; uint32_t vmx_vlan_filter[4096/32]; uint8_t vmx_lladdr[ETHER_ADDR_LEN]; }; -#define VMXNET3_CORE_LOCK_INIT(_sc, _name) \ - mtx_init(&(_sc)->vmx_mtx, _name, "VMXNET3 Lock", MTX_DEF) -#define VMXNET3_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->vmx_mtx) -#define VMXNET3_CORE_LOCK(_sc) mtx_lock(&(_sc)->vmx_mtx) -#define VMXNET3_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->vmx_mtx) -#define VMXNET3_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->vmx_mtx, MA_OWNED) -#define VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(_sc) \ - mtx_assert(&(_sc)->vmx_mtx, MA_NOTOWNED) - /* * Our driver version we report to the hypervisor; we just keep * this value constant. */ #define VMXNET3_DRIVER_VERSION 0x00010000 /* * Max descriptors per Tx packet. We must limit the size of the * any TSO packets based on the number of segments. */ -#define VMXNET3_TX_MAXSEGS 32 +#define VMXNET3_TX_MAXSEGS 32 /* 64K @ 2K segment size */ #define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES) +#define VMXNET3_TSO_MAXSIZE (VMXNET3_TX_MAXSIZE - ETHER_VLAN_ENCAP_LEN) /* - * Maximum support Tx segments size. The length field in the + * Maximum supported Tx segment size. The length field in the * Tx descriptor is 14 bits. + * + * XXX It's possible a descriptor length field of 0 means 2^14, but this + * isn't confirmed, so limit to 2^14 - 1 for now. */ -#define VMXNET3_TX_MAXSEGSIZE (1 << 14) +#define VMXNET3_TX_MAXSEGSIZE ((1 << 14) - 1) /* - * The maximum number of Rx segments we accept. When LRO is enabled, - * this allows us to receive the maximum sized frame with one MCLBYTES - * cluster followed by 16 MJUMPAGESIZE clusters. + * Maximum supported Rx segment size. The length field in the + * Rx descriptor is 14 bits. + * + * The reference drivers skip zero-length descriptors, which seems to be a + * strong indication that on the receive side, a descriptor length field of + * zero does not mean 2^14. */ -#define VMXNET3_MAX_RX_SEGS 17 +#define VMXNET3_RX_MAXSEGSIZE ((1 << 14) - 1) /* * Predetermined size of the multicast MACs filter table. If the * number of multicast addresses exceeds this size, then the * ALL_MULTI mode is use instead. */ #define VMXNET3_MULTICAST_MAX 32 /* - * Our Tx watchdog timeout. - */ -#define VMXNET3_WATCHDOG_TIMEOUT 5 - -/* - * Number of slots in the Tx bufrings. This value matches most other - * multiqueue drivers. - */ -#define VMXNET3_DEF_BUFRING_SIZE 4096 - -/* * IP protocols that we can perform Tx checksum offloading of. */ #define VMXNET3_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) #define VMXNET3_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) #define VMXNET3_CSUM_ALL_OFFLOAD \ (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | CSUM_TSO) - -/* - * Compat macros to keep this driver compiling on old releases. - */ - -#if !defined(SYSCTL_ADD_UQUAD) -#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD -#endif - -#if !defined(IFCAP_TXCSUM_IPV6) -#define IFCAP_TXCSUM_IPV6 0 -#endif - -#if !defined(IFCAP_RXCSUM_IPV6) -#define IFCAP_RXCSUM_IPV6 0 -#endif - -#if !defined(CSUM_TCP_IPV6) -#define CSUM_TCP_IPV6 0 -#endif - -#if !defined(CSUM_UDP_IPV6) -#define CSUM_UDP_IPV6 0 -#endif #endif /* _IF_VMXVAR_H */ Index: head/sys/net/iflib.c =================================================================== --- head/sys/net/iflib.c (revision 343290) +++ head/sys/net/iflib.c (revision 343291) @@ -1,6482 +1,6487 @@ /*- * Copyright (c) 2014-2018, Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #ifdef PCI_IOV #include #endif #include /* * enable accounting of every mbuf as it comes in to and goes out of * iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* * Enable mbuf vectors for compressing long mbuf chains */ /* * NB: * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead * we prefetch needs to be determined by the time spent in m_free vis a vis * the cost of a prefetch. This will of course vary based on the workload: * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which * is quite expensive, thus suggesting very little prefetch. * - small packet forwarding which is just returning a single mbuf to * UMA will typically be very fast vis a vis the cost of a memory * access. */ /* * File organization: * - private structures * - iflib private utility functions * - ifnet functions * - vlan registry and other exported functions * - iflib public core functions * * */ MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); static void iflib_timer(void *arg); typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; void *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { KOBJ_FIELDS; /* * Pointer to hardware driver's softc */ void *ifc_softc; device_t ifc_dev; if_t ifc_ifp; cpuset_t ifc_cpus; if_shared_ctx_t ifc_sctx; struct if_softc_ctx ifc_softc_ctx; struct sx ifc_ctx_sx; struct mtx ifc_state_mtx; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; uint32_t ifc_if_flags; uint32_t ifc_flags; uint32_t ifc_max_fl_buf_size; int ifc_link_state; int ifc_link_irq; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; struct if_irq ifc_legacy_irq; struct grouptask ifc_admin_task; struct grouptask ifc_vflr_task; struct iflib_filter_info ifc_filter_info; struct ifmedia ifc_media; struct sysctl_oid *ifc_sysctl_node; uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; uint16_t ifc_sysctl_rx_budget; uint16_t ifc_sysctl_tx_abdicate; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update #define isc_rxd_available ifc_txrx.ift_rxd_available #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; uint8_t ifc_mac[ETHER_ADDR_LEN]; char ifc_mtx_name[16]; }; void * iflib_get_softc(if_ctx_t ctx) { return (ctx->ifc_softc); } device_t iflib_get_dev(if_ctx_t ctx) { return (ctx->ifc_dev); } if_t iflib_get_ifp(if_ctx_t ctx) { return (ctx->ifc_ifp); } struct ifmedia * iflib_get_media(if_ctx_t ctx) { return (&ctx->ifc_media); } uint32_t iflib_get_flags(if_ctx_t ctx) { return (ctx->ifc_flags); } void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) { bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); } if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx) { return (&ctx->ifc_softc_ctx); } if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx) { return (ctx->ifc_sctx); } #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) typedef struct iflib_sw_rx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ caddr_t *ifsd_cl; /* direct cluster pointer for rx */ bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */ } iflib_rxsd_array_t; typedef struct iflib_sw_tx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ } if_txsd_vec_t; /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 -/* bnxt supports 64 with hardware LRO enabled */ -#define IFLIB_MAX_RX_SEGS 64 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 /* The minimum descriptors per second before we start coalescing */ #define IFLIB_MIN_DESC_SEC 16384 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 #define IFLIB_QUEUE_IDLE 0 #define IFLIB_QUEUE_HUNG 1 #define IFLIB_QUEUE_WORKING 2 /* maximum number of txqs that can share an rx interrupt */ #define IFLIB_MAX_TX_SHARED_INTR 4 /* this should really scale with ring size - this is a fairly arbitrary value */ #define TX_BATCH_SIZE 32 #define IFLIB_RESTART_BUDGET 8 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) struct iflib_txq { qidx_t ift_in_use; qidx_t ift_cidx; qidx_t ift_cidx_processed; qidx_t ift_pidx; uint8_t ift_gen; uint8_t ift_br_offset; uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; /* implicit pad */ uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; uint64_t ift_cleaned_prev; #if MEMORY_LOGGING uint64_t ift_enqueued; uint64_t ift_dequeued; #endif uint64_t ift_no_tx_dma_setup; uint64_t ift_no_desc_avail; uint64_t ift_mbuf_defrag_failed; uint64_t ift_mbuf_defrag; uint64_t ift_map_failed; uint64_t ift_txd_encap_efbig; uint64_t ift_pullups; uint64_t ift_last_timer_tick; struct mtx ift_mtx; struct mtx ift_db_mtx; /* constant values */ if_ctx_t ift_ctx; struct ifmp_ring *ift_br; struct grouptask ift_task; qidx_t ift_size; uint16_t ift_id; struct callout ift_timer; if_txsd_vec_t ift_sds; uint8_t ift_qstatus; uint8_t ift_closed; uint8_t ift_update_freq; struct iflib_filter_info ift_filter_info; bus_dma_tag_t ift_desc_tag; bus_dma_tag_t ift_tso_desc_tag; iflib_dma_info_t ift_ifdi; #define MTX_NAME_LEN 16 char ift_mtx_name[MTX_NAME_LEN]; char ift_db_mtx_name[MTX_NAME_LEN]; bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ift_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); struct iflib_fl { qidx_t ifl_cidx; qidx_t ifl_pidx; qidx_t ifl_credits; uint8_t ifl_gen; uint8_t ifl_rxd_size; #if MEMORY_LOGGING uint64_t ifl_m_enqueued; uint64_t ifl_m_dequeued; uint64_t ifl_cl_enqueued; uint64_t ifl_cl_dequeued; #endif /* implicit pad */ bitstr_t *ifl_rx_bitmap; qidx_t ifl_fragidx; /* constant */ qidx_t ifl_size; uint16_t ifl_buf_size; uint16_t ifl_cltype; uma_zone_t ifl_zone; iflib_rxsd_array_t ifl_sds; iflib_rxq_t ifl_rxq; uint8_t ifl_id; bus_dma_tag_t ifl_desc_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline qidx_t get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) { qidx_t used; if (pidx > cidx) used = pidx - cidx; else if (pidx < cidx) used = size - cidx + pidx; else if (gen == 0 && pidx == cidx) used = 0; else if (gen == 1 && pidx == cidx) used = size; else panic("bad state"); return (used); } #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) struct iflib_rxq { /* If there is a separate completion queue - * these are the cq cidx and pidx. Otherwise * these are unused. */ qidx_t ifr_size; qidx_t ifr_cq_cidx; qidx_t ifr_cq_pidx; uint8_t ifr_cq_gen; uint8_t ifr_fl_offset; if_ctx_t ifr_ctx; iflib_fl_t ifr_fl; uint64_t ifr_rx_irq; uint16_t ifr_id; uint8_t ifr_lro_enabled; uint8_t ifr_nfl; uint8_t ifr_ntxqirq; uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; struct lro_ctrl ifr_lc; struct grouptask ifr_task; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; /* dynamically allocate if any drivers need a value substantially larger than this */ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ifr_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); typedef struct if_rxsd { caddr_t *ifsd_cl; struct mbuf **ifsd_m; iflib_fl_t ifsd_fl; qidx_t ifsd_cidx; } *if_rxsd_t; /* multiple of word size */ #ifdef __LP64__ #define PKT_INFO_SIZE 6 #define RXD_INFO_SIZE 5 #define PKT_TYPE uint64_t #else #define PKT_INFO_SIZE 11 #define RXD_INFO_SIZE 8 #define PKT_TYPE uint32_t #endif #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) typedef struct if_pkt_info_pad { PKT_TYPE pkt_val[PKT_INFO_SIZE]; } *if_pkt_info_pad_t; typedef struct if_rxd_info_pad { PKT_TYPE rxd_val[RXD_INFO_SIZE]; } *if_rxd_info_pad_t; CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); static inline void pkt_info_zero(if_pkt_info_t pi) { if_pkt_info_pad_t pi_pad; pi_pad = (if_pkt_info_pad_t)pi; pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; #ifndef __LP64__ pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; #endif } static device_method_t iflib_pseudo_methods[] = { DEVMETHOD(device_attach, noop_attach), DEVMETHOD(device_detach, iflib_pseudo_detach), DEVMETHOD_END }; driver_t iflib_pseudodriver = { "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), }; static inline void rxd_info_zero(if_rxd_info_t ri) { if_rxd_info_pad_t ri_pad; int i; ri_pad = (if_rxd_info_pad_t)ri; for (i = 0; i < RXD_LOOP_BOUND; i += 4) { ri_pad->rxd_val[i] = 0; ri_pad->rxd_val[i+1] = 0; ri_pad->rxd_val[i+2] = 0; ri_pad->rxd_val[i+3] = 0; } #ifdef __LP64__ ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; #endif } /* * Only allow a single packet to take up most 1/nth of the tx ring */ #define MAX_SINGLE_PACKET_FRACTION 12 #define IF_BAD_DMA (bus_addr_t)-1 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) void iflib_set_detach(if_ctx_t ctx) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_IN_DETACH; STATE_UNLOCK(ctx); } /* Our boot-time initialization hook */ static int iflib_module_event_handler(module_t, int, void *); static moduledata_t iflib_moduledata = { "iflib", iflib_module_event_handler, NULL }; DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(iflib, 1); MODULE_DEPEND(iflib, pci, 1, 1, 1); MODULE_DEPEND(iflib, ether, 1, 1, 1); TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); TASKQGROUP_DEFINE(if_config_tqg, 1, 1); #ifndef IFLIB_DEBUG_COUNTERS #ifdef INVARIANTS #define IFLIB_DEBUG_COUNTERS 1 #else #define IFLIB_DEBUG_COUNTERS 0 #endif /* !INVARIANTS */ #endif static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, "iflib driver parameters"); /* * XXX need to ensure that this can't accidentally cause the head to be moved backwards */ static int iflib_min_tx_latency = 0; SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); static int iflib_no_tx_batch = 0; SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); #if IFLIB_DEBUG_COUNTERS static int iflib_tx_seen; static int iflib_tx_sent; static int iflib_tx_encap; static int iflib_rx_allocs; static int iflib_fl_refills; static int iflib_fl_refills_large; static int iflib_tx_frees; SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0, "# tx mbufs seen"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0, "# tx mbufs sent"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0, "# tx mbufs encapped"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0, "# tx frees"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0, "# rx allocations"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0, "# refills"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, &iflib_fl_refills_large, 0, "# large refills"); static int iflib_txq_drain_flushing; static int iflib_txq_drain_oactive; static int iflib_txq_drain_notready; SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, &iflib_txq_drain_flushing, 0, "# drain flushes"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, &iflib_txq_drain_oactive, 0, "# drain oactives"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, &iflib_txq_drain_notready, 0, "# drain notready"); static int iflib_encap_load_mbuf_fail; static int iflib_encap_pad_mbuf_fail; static int iflib_encap_txq_avail_fail; static int iflib_encap_txd_encap_fail; SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); static int iflib_task_fn_rxs; static int iflib_rx_intr_enables; static int iflib_fast_intrs; static int iflib_rx_unavail; static int iflib_rx_ctx_inactive; static int iflib_rx_if_input; static int iflib_rx_mbuf_null; static int iflib_rxd_flush; static int iflib_verbose_debug; SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, &iflib_rx_intr_enables, 0, "# rx intr enables"); SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0, "# fast_intr calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0, "# times rxeof called with no available data"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input, 0, "# times rxeof called if_input"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0, "# times rxd_flush called"); SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, &iflib_verbose_debug, 0, "enable verbose debugging"); #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) static void iflib_debug_reset(void) { iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = iflib_txq_drain_flushing = iflib_txq_drain_oactive = iflib_txq_drain_notready = iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = iflib_rx_unavail = iflib_rx_ctx_inactive = iflib_rx_if_input = iflib_rx_mbuf_null = iflib_rxd_flush = 0; } #else #define DBG_COUNTER_INC(name) static void iflib_debug_reset(void) {} #endif #define IFLIB_DEBUG 0 static void iflib_tx_structures_free(if_ctx_t ctx); static void iflib_rx_structures_free(if_ctx_t ctx); static int iflib_queues_alloc(if_ctx_t ctx); static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); static int iflib_qset_structures_setup(if_ctx_t ctx); static int iflib_msix_init(if_ctx_t ctx); static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); static void iflib_txq_check_drain(iflib_txq_t txq, int budget); static uint32_t iflib_txq_can_drain(struct ifmp_ring *); #ifdef ALTQ static void iflib_altq_if_start(if_t ifp); static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); #endif static int iflib_register(if_ctx_t); static void iflib_init_locked(if_ctx_t ctx); static void iflib_add_device_sysctl_pre(if_ctx_t ctx); static void iflib_add_device_sysctl_post(if_ctx_t ctx); static void iflib_ifmp_purge(iflib_txq_t txq); static void _iflib_pre_assert(if_softc_ctx_t scctx); static void iflib_if_init_locked(if_ctx_t ctx); static void iflib_free_intr_mem(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m); #endif NETDUMP_DEFINE(iflib); #ifdef DEV_NETMAP #include #include #include MODULE_DEPEND(iflib, netmap, 1, 1, 1); static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); /* * device-specific sysctl variables: * * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * iflib_rx_miss, iflib_rx_miss_bufs: * count packets that might be missed due to lost interrupts. */ SYSCTL_DECL(_dev_netmap); /* * The xl driver by default strips CRCs and we do not override it. */ int iflib_crcstrip = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); int iflib_rx_miss, iflib_rx_miss_bufs; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); /* * Register/unregister. We are already under netmap lock. * Only called on the first register or the last unregister. */ static int iflib_netmap_register(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; int status; CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (!CTX_IS_VF(ctx)) IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } iflib_stop(ctx); iflib_init_locked(ctx); IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; if (status) nm_clear_native_flags(na); CTX_UNLOCK(ctx); return (status); } static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; struct netmap_ring *ring = kring->ring; bus_dmamap_t *map; struct if_rxd_update iru; if_ctx_t ctx = rxq->ifr_ctx; iflib_fl_t fl = &rxq->ifr_fl[0]; uint32_t refill_pidx, nic_i; #if IFLIB_DEBUG_COUNTERS int rf_count = 0; #endif if (nm_i == head && __predict_true(!init)) return 0; iru_init(&iru, rxq, 0 /* flid */); map = fl->ifl_sds.ifsd_map; refill_pidx = netmap_idx_k2n(kring, nm_i); /* * IMPORTANT: we must leave one free slot in the ring, * so move head back by one unit */ head = nm_prev(head, lim); nic_i = UINT_MAX; DBG_COUNTER_INC(fl_refills); while (nm_i != head) { #if IFLIB_DEBUG_COUNTERS if (++rf_count == 9) DBG_COUNTER_INC(fl_refills_large); #endif for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); uint32_t nic_i_dma = refill_pidx; nic_i = netmap_idx_k2n(kring, nm_i); MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ return netmap_ring_reinit(kring); fl->ifl_vm_addrs[tmp_pidx] = addr; if (__predict_false(init) && map) { netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); } else if (map && (slot->flags & NS_BUF_CHANGED)) { /* buffer has changed, reload map */ netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) continue; iru.iru_pidx = refill_pidx; iru.iru_count = tmp_pidx+1; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); refill_pidx = nic_i; if (map == NULL) continue; for (int n = 0; n < iru.iru_count; n++) { bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], BUS_DMASYNC_PREREAD); /* XXX - change this to not use the netmap func*/ nic_i_dma = nm_next(nic_i_dma, lim); } } } kring->nr_hwcur = head; if (map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (__predict_true(nic_i != UINT_MAX)) { ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); DBG_COUNTER_INC(rxd_flush); } return (0); } /* * Reconcile kernel and user view of the transmit ring. * * All information is in the kring. * Userspace wants to send packets up to the one before kring->rhead, * kernel knows kring->nr_hwcur is the first unsent packet. * * Here we push packets out (as many as possible), and possibly * reclaim buffers from previously completed transmission. * * The caller (netmap) guarantees that there is only one instance * running at any time. Any interference with other driver * methods should be handled by the individual drivers. */ static int iflib_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap kring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct if_pkt_info pi; /* * interrupts on every tx packet are expensive so request * them every half ring, or where NS_REPORT is set */ u_int report_frequency = kring->nkr_num_slots >> 1; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: process new packets to send. * nm_i is the current index in the netmap kring, * nic_i is the corresponding index in the NIC ring. * * If we have packets to send (nm_i != head) * iterate over the netmap ring, fetch length and update * the corresponding slot in the NIC ring. Some drivers also * need to update the buffer's physical address in the NIC slot * even NS_BUF_CHANGED is not set (PNMB computes the addresses). * * The netmap_reload_map() calls is especially expensive, * even when (as in this case) the tag is 0, so do only * when the buffer has actually changed. * * If possible do not set the report/intr bit on all slots, * but only a few times per ring or when NS_REPORT is set. * * Finally, on 10G and faster drivers, it might be useful * to prefetch the next slot and txr entry. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ pkt_info_zero(&pi); pi.ipi_segs = txq->ift_segs; pi.ipi_qsidx = kring->ring_id; nic_i = netmap_idx_k2n(kring, nm_i); __builtin_prefetch(&ring->slot[nm_i]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); if (txq->ift_sds.ifsd_map) __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? IPI_TX_INTR : 0; /* device-specific */ pi.ipi_len = len; pi.ipi_segs[0].ds_addr = paddr; pi.ipi_segs[0].ds_len = len; pi.ipi_nsegs = 1; pi.ipi_ndescs = 0; pi.ipi_pidx = nic_i; pi.ipi_flags = flags; /* Fill the slot in the NIC ring. */ ctx->isc_txd_encap(ctx->ifc_softc, &pi); DBG_COUNTER_INC(tx_encap); /* prefetch for next round */ __builtin_prefetch(&ring->slot[nm_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); if (txq->ift_sds.ifsd_map) { __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); NM_CHECK_ADDR_LEN(na, addr, len); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); } /* make sure changes to the buffer are synced */ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], BUS_DMASYNC_PREWRITE); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = nm_i; /* synchronize the NIC ring */ bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the tx unit up to slot nic_i (excluded) */ ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); } /* * Second part: reclaim buffers for completed transmissions. * * If there are unclaimed buffers, attempt to reclaim them. * If none are reclaimed, and TX IRQs are not in use, do an initial * minimal delay, then trigger the tx handler which will spin in the * group task queue. */ if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { if (iflib_tx_credits_update(ctx, txq)) { /* some tx completed, increment avail */ nic_i = txq->ift_cidx_processed; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } } if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000, iflib_timer, txq, txq->ift_timer.c_cpu); } return (0); } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient. * The caller guarantees a single invocations, but races against * the rest of the driver should be handled here. * * On call, kring->rhead is the first packet that userspace wants * to keep, and kring->rcur is the wakeup point. * The kernel has previously reported packets up to kring->rtail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int iflib_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; uint32_t nm_i; /* index into the netmap ring */ uint32_t nic_i; /* index into the NIC ring */ u_int i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; struct if_rxd_info ri; struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; iflib_fl_t fl = rxq->ifr_fl; if (head > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { if (fl->ifl_sds.ifsd_map == NULL) continue; bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } /* * First part: import newly received packets. * * nm_i is the index of the next free slot in the netmap ring, * nic_i is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * nic_i = rxr->next_check; * nm_i = kring->nr_hwtail (previous) * and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size * * rxr->next_check is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { int crclen = iflib_crcstrip ? 0 : 4; int error, avail; for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; nic_i = fl->ifl_cidx; nm_i = netmap_idx_n2k(kring, nic_i); avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); for (n = 0; avail > 0; n++, avail--) { rxd_info_zero(&ri); ri.iri_frags = rxq->ifr_frags; ri.iri_qsidx = kring->ring_id; ri.iri_ifp = ctx->ifc_ifp; ri.iri_cidx = nic_i; error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; ring->slot[nm_i].flags = 0; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ iflib_rx_miss ++; iflib_rx_miss_bufs += n; } fl->ifl_cidx = nic_i; kring->nr_hwtail = nm_i; } kring->nr_kflags &= ~NKR_PENDINTR; } } /* * Second part: skip past packets that userspace has released. * (kring->nr_hwcur to head excluded), * and make the buffers available for reception. * As usual nm_i is the index in the netmap ring, * nic_i is the index in the NIC ring, and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size */ /* XXX not sure how this will work with multiple free lists */ nm_i = kring->nr_hwcur; return (netmap_fl_refill(rxq, kring, nm_i, false)); } static void iflib_netmap_intr(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; CTX_LOCK(ctx); if (onoff) { IFDI_INTR_ENABLE(ctx); } else { IFDI_INTR_DISABLE(ctx); } CTX_UNLOCK(ctx); } static int iflib_netmap_attach(if_ctx_t ctx) { struct netmap_adapter na; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; bzero(&na, sizeof(na)); na.ifp = ctx->ifc_ifp; na.na_flags = NAF_BDG_MAYSLEEP; MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); na.num_tx_desc = scctx->isc_ntxd[0]; na.num_rx_desc = scctx->isc_nrxd[0]; na.nm_txsync = iflib_netmap_txsync; na.nm_rxsync = iflib_netmap_rxsync; na.nm_register = iflib_netmap_register; na.nm_intr = iflib_netmap_intr; na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; return (netmap_attach(&na)); } static void iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, txq->ift_id, 0); if (slot == NULL) return; for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); } } static void iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; struct netmap_slot *slot; uint32_t nm_i; slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); if (slot == NULL) return; nm_i = netmap_idx_n2k(kring, 0); netmap_fl_refill(rxq, kring, nm_i, true); } static void iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on) { struct netmap_kring *kring; kring = NA(ctx->ifc_ifp)->tx_rings[txqid]; if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) { if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) netmap_tx_irq(ctx->ifc_ifp, txqid); if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) { if (hz < 2000) *reset_on = 1; else *reset_on = hz / 1000; } } } #define iflib_netmap_detach(ifp) netmap_detach(ifp) #else #define iflib_netmap_txq_init(ctx, txq) #define iflib_netmap_rxq_init(ctx, rxq) #define iflib_netmap_detach(ifp) #define iflib_netmap_attach(ctx) (0) #define netmap_rx_irq(ifp, qid, budget) (0) #define netmap_tx_irq(ifp, qid) do {} while (0) #define iflib_netmap_timer_adjust(ctx, txqid, reset_on) #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } static __inline void prefetch2cachelines(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); #if (CACHE_LINE_SIZE < 128) __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); #endif } #else #define prefetch(x) #define prefetch2cachelines(x) #endif static void iflib_gen_mac(if_ctx_t ctx) { struct thread *td; MD5_CTX mdctx; char uuid[HOSTUUIDLEN+1]; char buf[HOSTUUIDLEN+16]; uint8_t *mac; unsigned char digest[16]; td = curthread; mac = ctx->ifc_mac; uuid[HOSTUUIDLEN] = 0; bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN); snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev)); /* * Generate a pseudo-random, deterministic MAC * address based on the UUID and unit number. * The FreeBSD Foundation OUI of 58-9C-FC is used. */ MD5Init(&mdctx); MD5Update(&mdctx, buf, strlen(buf)); MD5Final(digest, &mdctx); mac[0] = 0x58; mac[1] = 0x9C; mac[2] = 0xFC; mac[3] = digest[0]; mac[4] = digest[1]; mac[5] = digest[2]; } static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) { iflib_fl_t fl; fl = &rxq->ifr_fl[flid]; iru->iru_paddrs = fl->ifl_bus_addrs; iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; iru->iru_idxs = fl->ifl_rxd_idxs; iru->iru_qsidx = rxq->ifr_id; iru->iru_buf_size = fl->ifl_buf_size; iru->iru_flidx = fl->ifl_id; } static void _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { if (err) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int -iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) +iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags) { int err; - if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; - KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); - - err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - sctx->isc_q_align, 0, /* alignment, bounds */ + err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->idi_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", __func__, err); goto fail_0; } err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); if (err) { device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, err); goto fail_1; } dma->idi_paddr = IF_BAD_DMA; err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); if (err || dma->idi_paddr == IF_BAD_DMA) { device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); goto fail_2; } dma->idi_size = size; return (0); fail_2: bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); fail_1: bus_dma_tag_destroy(dma->idi_tag); fail_0: dma->idi_tag = NULL; return (err); +} + +int +iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) +{ + if_shared_ctx_t sctx = ctx->ifc_sctx; + + KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); + + return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); } int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { int i, err; iflib_dma_info_t *dmaiter; dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) { if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) break; } if (err) iflib_dma_free_multi(dmalist, i); return (err); } void iflib_dma_free(iflib_dma_info_t dma) { if (dma->idi_tag == NULL) return; if (dma->idi_paddr != IF_BAD_DMA) { bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->idi_tag, dma->idi_map); dma->idi_paddr = IF_BAD_DMA; } if (dma->idi_vaddr != NULL) { bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); dma->idi_vaddr = NULL; } bus_dma_tag_destroy(dma->idi_tag); dma->idi_tag = NULL; } void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) { int i; iflib_dma_info_t *dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) iflib_dma_free(*dmaiter); } #ifdef EARLY_AP_STARTUP static const int iflib_started = 1; #else /* * We used to abuse the smp_started flag to decide if the queues have been * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). * That gave bad races, since the SYSINIT() runs strictly after smp_started * is set. Run a SYSINIT() strictly after that to just set a usable * completion flag. */ static int iflib_started; static void iflib_record_started(void *arg) { iflib_started = 1; } SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, iflib_record_started, NULL); #endif static int iflib_fast_intr(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int iflib_fast_intr_rxtx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; if_ctx_t ctx = NULL;; int i, cidx; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); MPASS(rxq->ifr_ntxqirq); for (i = 0; i < rxq->ifr_ntxqirq; i++) { qidx_t txqid = rxq->ifr_txqid[i]; ctx = rxq->ifr_ctx; bus_dmamap_sync(rxq->ifr_ifdi->idi_tag, rxq->ifr_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); continue; } GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) cidx = rxq->ifr_cq_cidx; else cidx = rxq->ifr_fl[0].ifl_cidx; if (iflib_rxd_avail(ctx, rxq, cidx, 1)) GROUPTASK_ENQUEUE(gtask); else { IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } return (FILTER_HANDLED); } static int iflib_fast_intr_ctx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, driver_intr_t handler, void *arg, const char *name) { int rc, flags; struct resource *res; void *tag = NULL; device_t dev = ctx->ifc_dev; flags = RF_ACTIVE; if (ctx->ifc_flags & IFC_LEGACY) flags |= RF_SHAREABLE; MPASS(rid < 512); irq->ii_rid = rid; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); if (res == NULL) { device_printf(dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } irq->ii_res = res; KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, filter, handler, arg, &tag); if (rc != 0) { device_printf(dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name ? name : "unknown", rc); return (rc); } else if (name) bus_describe_intr(dev, res, tag, "%s", name); irq->ii_tag = tag; return (0); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int iflib_txsd_alloc(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; bus_size_t tsomaxsize; int err, nsegments, ntsosegments; bool tso; nsegments = scctx->isc_tx_nsegments; ntsosegments = scctx->isc_tx_tso_segments_max; tsomaxsize = scctx->isc_tx_tso_size_max; if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) tsomaxsize += sizeof(struct ether_vlan_header); MPASS(scctx->isc_ntxd[0] > 0); MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); MPASS(nsegments > 0); if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { MPASS(ntsosegments > 0); MPASS(sctx->isc_tso_maxsize >= tsomaxsize); } /* * Setup DMA descriptor areas. */ if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_tx_maxsize, /* maxsize */ nsegments, /* nsegments */ sctx->isc_tx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_desc_tag))) { device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); goto fail; } tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0; if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tsomaxsize, /* maxsize */ ntsosegments, /* nsegments */ sctx->isc_tso_maxsegsize,/* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_tso_desc_tag))) { device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); goto fail; } if (!(txq->ift_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc( sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc( sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TSO tx_buffer " "map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } if (!tso) continue; err = bus_dmamap_create(txq->ift_tso_desc_tag, 0, &txq->ift_sds.ifsd_tso_map[i]); if (err != 0) { device_printf(dev, "Unable to create TSO TX DMA map\n"); goto fail; } } return (0); fail: /* We free all, it handles case where we are in the middle */ iflib_tx_structures_free(ctx); return (err); } static void iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) { bus_dmamap_t map; map = NULL; if (txq->ift_sds.ifsd_map != NULL) map = txq->ift_sds.ifsd_map[i]; if (map != NULL) { bus_dmamap_sync(txq->ift_desc_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_desc_tag, map); bus_dmamap_destroy(txq->ift_desc_tag, map); txq->ift_sds.ifsd_map[i] = NULL; } map = NULL; if (txq->ift_sds.ifsd_tso_map != NULL) map = txq->ift_sds.ifsd_tso_map[i]; if (map != NULL) { bus_dmamap_sync(txq->ift_tso_desc_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_desc_tag, map); bus_dmamap_destroy(txq->ift_tso_desc_tag, map); txq->ift_sds.ifsd_tso_map[i] = NULL; } } static void iflib_txq_destroy(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; for (int i = 0; i < txq->ift_size; i++) iflib_txsd_destroy(ctx, txq, i); if (txq->ift_sds.ifsd_map != NULL) { free(txq->ift_sds.ifsd_map, M_IFLIB); txq->ift_sds.ifsd_map = NULL; } if (txq->ift_sds.ifsd_tso_map != NULL) { free(txq->ift_sds.ifsd_tso_map, M_IFLIB); txq->ift_sds.ifsd_tso_map = NULL; } if (txq->ift_sds.ifsd_m != NULL) { free(txq->ift_sds.ifsd_m, M_IFLIB); txq->ift_sds.ifsd_m = NULL; } if (txq->ift_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_desc_tag); txq->ift_desc_tag = NULL; } if (txq->ift_tso_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_tso_desc_tag); txq->ift_tso_desc_tag = NULL; } } static void iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) { struct mbuf **mp; mp = &txq->ift_sds.ifsd_m[i]; if (*mp == NULL) return; if (txq->ift_sds.ifsd_map != NULL) { bus_dmamap_sync(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]); } if (txq->ift_sds.ifsd_tso_map != NULL) { bus_dmamap_sync(txq->ift_tso_desc_tag, txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_desc_tag, txq->ift_sds.ifsd_tso_map[i]); } m_free(*mp); DBG_COUNTER_INC(tx_frees); *mp = NULL; } static int iflib_txq_setup(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; iflib_dma_info_t di; int i; /* Set number of descriptors available */ txq->ift_qstatus = IFLIB_QUEUE_IDLE; /* XXX make configurable */ txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; /* Reset indices */ txq->ift_cidx_processed = 0; txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) bzero((void *)di->idi_vaddr, di->idi_size); IFDI_TXQ_SETUP(ctx, txq->ift_id); for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int iflib_rxsd_alloc(iflib_rxq_t rxq) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; iflib_fl_t fl; int err; MPASS(scctx->isc_nrxd[0] > 0); MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); fl = rxq->ifr_fl; for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_rx_maxsize, /* maxsize */ sctx->isc_rx_nsegments, /* nsegments */ sctx->isc_rx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &fl->ifl_desc_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, err); goto fail; } if (!(fl->ifl_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(fl->ifl_sds.ifsd_cl = (caddr_t *) malloc(sizeof(caddr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(fl->ifl_sds.ifsd_ba = (bus_addr_t *) malloc(sizeof(bus_addr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx bus addr memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ if (!(fl->ifl_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create RX buffer DMA map\n"); goto fail; } } } return (0); fail: iflib_rx_structures_free(ctx); return (err); } /* * Internal service routines */ struct rxq_refill_cb_arg { int error; bus_dma_segment_t seg; int nseg; }; static void _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rxq_refill_cb_arg *cb_arg = arg; cb_arg->error = error; cb_arg->seg = segs[0]; cb_arg->nseg = nseg; } /** * rxq_refill - refill an rxq free-buffer list * @ctx: the iflib context * @rxq: the free-list to refill * @n: the number of new buffers to allocate * * (Re)populate an rxq free-buffer list with up to @n new packet buffers. * The caller must assure that @n does not exceed the queue's capacity. */ static void _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct mbuf *m; int idx, frag_idx = fl->ifl_fragidx; int pidx = fl->ifl_pidx; caddr_t cl, *sd_cl; struct mbuf **sd_m; struct if_rxd_update iru; struct rxq_refill_cb_arg cb_arg; bus_dmamap_t *sd_map; int n, i = 0; bus_addr_t bus_addr, *sd_ba; int err; qidx_t credits; sd_m = fl->ifl_sds.ifsd_m; sd_map = fl->ifl_sds.ifsd_map; sd_cl = fl->ifl_sds.ifsd_cl; sd_ba = fl->ifl_sds.ifsd_ba; idx = pidx; credits = fl->ifl_credits; n = count; MPASS(n > 0); MPASS(credits + n <= fl->ifl_size); if (pidx < fl->ifl_cidx) MPASS(pidx + n <= fl->ifl_cidx); if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) MPASS(fl->ifl_gen == 0); if (pidx > fl->ifl_cidx) MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); DBG_COUNTER_INC(fl_refills); if (n > 8) DBG_COUNTER_INC(fl_refills_large); iru_init(&iru, fl->ifl_rxq, fl->ifl_id); while (n--) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * * If the cluster is still set then we know a minimum sized packet was received */ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); if ((cl = sd_cl[frag_idx]) == NULL) { if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) break; cb_arg.error = 0; MPASS(sd_map != NULL); err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, BUS_DMA_NOWAIT); if (err != 0 || cb_arg.error) { /* * !zone_pack ? */ if (fl->ifl_zone == zone_pack) uma_zfree(fl->ifl_zone, cl); break; } bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], BUS_DMASYNC_PREREAD); sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; sd_cl[frag_idx] = cl; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; #endif } else { bus_addr = sd_ba[frag_idx]; } bit_set(fl->ifl_rx_bitmap, frag_idx); MPASS(sd_m[frag_idx] == NULL); if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { break; } sd_m[frag_idx] = m; #if MEMORY_LOGGING fl->ifl_m_enqueued++; #endif DBG_COUNTER_INC(rx_allocs); fl->ifl_rxd_idxs[i] = frag_idx; fl->ifl_bus_addrs[i] = bus_addr; fl->ifl_vm_addrs[i] = cl; credits++; i++; MPASS(credits <= fl->ifl_size); if (++idx == fl->ifl_size) { fl->ifl_gen = 1; idx = 0; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); i = 0; pidx = idx; fl->ifl_pidx = idx; fl->ifl_credits = credits; } } if (i) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); fl->ifl_pidx = idx; fl->ifl_credits = credits; } DBG_COUNTER_INC(rxd_flush); if (fl->ifl_pidx == 0) pidx = fl->ifl_size - 1; else pidx = fl->ifl_pidx - 1; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); fl->ifl_fragidx = frag_idx; } static __inline void __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; #ifdef INVARIANTS int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; #endif MPASS(fl->ifl_credits <= fl->ifl_size); MPASS(reclaimable == delta); if (reclaimable > 0) _iflib_fl_refill(ctx, fl, min(max, reclaimable)); } uint8_t iflib_in_detach(if_ctx_t ctx) { bool in_detach; STATE_LOCK(ctx); in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); STATE_UNLOCK(ctx); return (in_detach); } static void iflib_fl_bufs_free(iflib_fl_t fl) { iflib_dma_info_t idi = fl->ifl_ifdi; bus_dmamap_t sd_map; uint32_t i; for (i = 0; i < fl->ifl_size; i++) { struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; if (*sd_cl != NULL) { sd_map = fl->ifl_sds.ifsd_map[i]; bus_dmamap_sync(fl->ifl_desc_tag, sd_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fl->ifl_desc_tag, sd_map); if (*sd_cl != NULL) uma_zfree(fl->ifl_zone, *sd_cl); // XXX: Should this get moved out? if (iflib_in_detach(fl->ifl_rxq->ifr_ctx)) bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); if (*sd_m != NULL) { m_init(*sd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, *sd_m); } } else { MPASS(*sd_cl == NULL); MPASS(*sd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif *sd_cl = NULL; *sd_m = NULL; } #ifdef INVARIANTS for (i = 0; i < fl->ifl_size; i++) { MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); MPASS(fl->ifl_sds.ifsd_m[i] == NULL); } #endif /* * Reset free list values */ fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; bzero(idi->idi_vaddr, idi->idi_size); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int iflib_fl_setup(iflib_fl_t fl) { iflib_rxq_t rxq = fl->ifl_rxq; if_ctx_t ctx = rxq->ifr_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); /* ** Free current RX buffer structs and their mbufs */ iflib_fl_bufs_free(fl); /* Now replenish the mbufs */ MPASS(fl->ifl_credits == 0); /* * XXX don't set the max_frame_size to larger * than the hardware can handle */ if (sctx->isc_max_frame_size <= 2048) fl->ifl_buf_size = MCLBYTES; #ifndef CONTIGMALLOC_WORKS else fl->ifl_buf_size = MJUMPAGESIZE; #else else if (sctx->isc_max_frame_size <= 4096) fl->ifl_buf_size = MJUMPAGESIZE; else if (sctx->isc_max_frame_size <= 9216) fl->ifl_buf_size = MJUM9BYTES; else fl->ifl_buf_size = MJUM16BYTES; #endif if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; fl->ifl_cltype = m_gettype(fl->ifl_buf_size); fl->ifl_zone = m_getzone(fl->ifl_buf_size); /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); /* * handle failure */ MPASS(rxq != NULL); MPASS(fl->ifl_ifdi != NULL); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void iflib_rx_sds_free(iflib_rxq_t rxq) { iflib_fl_t fl; int i, j; if (rxq->ifr_fl != NULL) { for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; if (fl->ifl_desc_tag != NULL) { if (fl->ifl_sds.ifsd_map != NULL) { for (j = 0; j < fl->ifl_size; i++) { if (fl->ifl_sds.ifsd_map[i] == NULL) continue; bus_dmamap_sync( fl->ifl_desc_tag, fl->ifl_sds.ifsd_map[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload( fl->ifl_desc_tag, fl->ifl_sds.ifsd_map[i]); } } bus_dma_tag_destroy(fl->ifl_desc_tag); fl->ifl_desc_tag = NULL; } free(fl->ifl_sds.ifsd_m, M_IFLIB); free(fl->ifl_sds.ifsd_cl, M_IFLIB); free(fl->ifl_sds.ifsd_ba, M_IFLIB); free(fl->ifl_sds.ifsd_map, M_IFLIB); fl->ifl_sds.ifsd_m = NULL; fl->ifl_sds.ifsd_cl = NULL; fl->ifl_sds.ifsd_ba = NULL; fl->ifl_sds.ifsd_map = NULL; } free(rxq->ifr_fl, M_IFLIB); rxq->ifr_fl = NULL; rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } } /* * MI independent logic * */ static void iflib_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; uint64_t this_tick = ticks; uint32_t reset_on = hz / 2; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ if (this_tick - txq->ift_last_timer_tick >= hz / 2) { txq->ift_last_timer_tick = this_tick; IFDI_TIMER(ctx, txq->ift_id); if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && ((txq->ift_cleaned_prev == txq->ift_cleaned) || (sctx->isc_pause_frames == 0))) goto hung; if (ifmp_ring_is_stalled(txq->ift_br)) txq->ift_qstatus = IFLIB_QUEUE_HUNG; txq->ift_cleaned_prev = txq->ift_cleaned; } #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); #endif /* handle any laggards */ if (txq->ift_db_pending) GROUPTASK_ENQUEUE(&txq->ift_task); sctx->isc_pause_frames = 0; if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); return; hung: device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); STATE_LOCK(ctx); if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); } static void iflib_init_locked(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_t ifp = ctx->ifc_ifp; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, tx_ip_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); iflib_netmap_txq_init(ctx, txq); } #ifdef INVARIANTS i = if_getdrvflags(ifp); #endif IFDI_INIT(ctx); MPASS(if_getdrvflags(ifp) == i); for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { /* XXX this should really be done on a per-queue basis */ if (if_getcapenable(ifp) & IFCAP_NETMAP) { MPASS(rxq->ifr_id == i); iflib_netmap_rxq_init(ctx, rxq); continue; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { if (iflib_fl_setup(fl)) { device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); goto done; } } } done: if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); IFDI_INTR_ENABLE(ctx); txq = ctx->ifc_txqs; for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); } static int iflib_media_change(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); int err; CTX_LOCK(ctx); if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) iflib_init_locked(ctx); CTX_UNLOCK(ctx); return (err); } static void iflib_media_status(if_t ifp, struct ifmediareq *ifmr) { if_ctx_t ctx = if_getsoftc(ifp); CTX_LOCK(ctx); IFDI_UPDATE_ADMIN_STATUS(ctx); IFDI_MEDIA_STATUS(ctx, ifmr); CTX_UNLOCK(ctx); } void iflib_stop(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; iflib_dma_info_t di; iflib_fl_t fl; int i, j; /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); DELAY(1000); IFDI_STOP(ctx); DELAY(1000); iflib_debug_reset(); /* Wait for current tx queue users to exit to disarm watchdog timer. */ for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { /* make sure all transmitters have completed before proceeding XXX */ CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); /* clean any enqueued buffers */ iflib_ifmp_purge(txq); /* Free any existing tx buffers. */ for (j = 0; j < txq->ift_size; j++) { iflib_txsd_free(ctx, txq, j); } txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; txq->ift_pullups = 0; ifmp_ring_reset_stats(txq->ift_br); for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); } for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { /* make sure all transmitters have completed before proceeding XXX */ rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); /* also resets the free lists pidx/cidx */ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) iflib_fl_bufs_free(fl); } } static inline caddr_t calc_next_rxd(iflib_fl_t fl, int cidx) { qidx_t size; int nrxd; caddr_t start, end, cur, next; nrxd = fl->ifl_size; size = fl->ifl_rxd_size; start = fl->ifl_ifdi->idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*nrxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } static inline void prefetch_pkts(iflib_fl_t fl, int cidx) { int nextptr; int nrxd = fl->ifl_size; caddr_t next_rxd; nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); prefetch(&fl->ifl_sds.ifsd_m[nextptr]); prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); next_rxd = calc_next_rxd(fl, cidx); prefetch(next_rxd); prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); } static void rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) { int flid, cidx; bus_dmamap_t map; iflib_fl_t fl; iflib_dma_info_t di; int next; map = NULL; flid = irf->irf_flid; cidx = irf->irf_idx; fl = &rxq->ifr_fl[flid]; sd->ifsd_fl = fl; sd->ifsd_cidx = cidx; sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; fl->ifl_credits--; #if MEMORY_LOGGING fl->ifl_m_dequeued++; #endif if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) prefetch_pkts(fl, cidx); next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_map[next]); map = fl->ifl_sds.ifsd_map[cidx]; di = fl->ifl_ifdi; next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); /* not valid assert if bxe really does SGE from non-contiguous elements */ MPASS(fl->ifl_cidx == cidx); bus_dmamap_sync(fl->ifl_desc_tag, map, BUS_DMASYNC_POSTREAD); if (unload) bus_dmamap_unload(fl->ifl_desc_tag, map); fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); if (__predict_false(fl->ifl_cidx == 0)) fl->ifl_gen = 0; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bit_clear(fl->ifl_rx_bitmap, cidx); } static struct mbuf * assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) { int i, padlen , flags; struct mbuf *m, *mh, *mt; caddr_t cl; i = 0; mh = NULL; do { rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); MPASS(*sd->ifsd_cl != NULL); MPASS(*sd->ifsd_m != NULL); /* Don't include zero-length frags */ if (ri->iri_frags[i].irf_len == 0) { /* XXX we can save the cluster here, but not the mbuf */ m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); m_free(*sd->ifsd_m); *sd->ifsd_m = NULL; continue; } m = *sd->ifsd_m; *sd->ifsd_m = NULL; if (mh == NULL) { flags = M_PKTHDR|M_EXT; mh = mt = m; padlen = ri->iri_pad; } else { flags = M_EXT; mt->m_next = m; mt = m; /* assuming padding is only on the first fragment */ padlen = 0; } cl = *sd->ifsd_cl; *sd->ifsd_cl = NULL; /* Can these two be made one ? */ m_init(m, M_NOWAIT, MT_DATA, flags); m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); /* * These must follow m_init and m_cljset */ m->m_data += padlen; ri->iri_len -= padlen; m->m_len = ri->iri_frags[i].irf_len; } while (++i < ri->iri_nfrags); return (mh); } /* * Process one software descriptor */ static struct mbuf * iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) { struct if_rxsd sd; struct mbuf *m; /* should I merge this back in now that the two paths are basically duplicated? */ if (ri->iri_nfrags == 1 && ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); m = *sd.ifsd_m; *sd.ifsd_m = NULL; m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m)) m->m_data += 2; #endif memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); bus_dmamap_sync(rxq->ifr_fl->ifl_desc_tag, rxq->ifr_fl->ifl_sds.ifsd_map[ri->iri_frags[0].irf_idx], BUS_DMASYNC_PREREAD); m->m_len = ri->iri_frags[0].irf_len; } else { m = assemble_segments(rxq, ri, &sd); } m->m_pkthdr.len = ri->iri_len; m->m_pkthdr.rcvif = ri->iri_ifp; m->m_flags |= ri->iri_flags; m->m_pkthdr.ether_vtag = ri->iri_vtag; m->m_pkthdr.flowid = ri->iri_flowid; M_HASHTYPE_SET(m, ri->iri_rsstype); m->m_pkthdr.csum_flags = ri->iri_csum_flags; m->m_pkthdr.csum_data = ri->iri_csum_data; return (m); } #if defined(INET6) || defined(INET) static void iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) { CURVNET_SET(lc->ifp->if_vnet); #if defined(INET6) *v6 = VNET(ip6_forwarding); #endif #if defined(INET) *v4 = VNET(ipforwarding); #endif CURVNET_RESTORE(); } /* * Returns true if it's possible this packet could be LROed. * if it returns false, it is guaranteed that tcp_lro_rx() * would not return zero. */ static bool iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) { struct ether_header *eh; uint16_t eh_type; eh = mtod(m, struct ether_header *); eh_type = ntohs(eh->ether_type); switch (eh_type) { #if defined(INET6) case ETHERTYPE_IPV6: return !v6_forwarding; #endif #if defined (INET) case ETHERTYPE_IP: return !v4_forwarding; #endif } return false; } #else static void iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) { } #endif static bool iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; int avail, i; qidx_t *cidxp; struct if_rxd_info ri; int err, budget_left, rx_bytes, rx_pkts; iflib_fl_t fl; struct ifnet *ifp; int lro_enabled; bool v4_forwarding, v6_forwarding, lro_possible; /* * XXX early demux data packets so that if_input processing only handles * acks in interrupt context */ struct mbuf *m, *mh, *mt, *mf; lro_possible = v4_forwarding = v6_forwarding = false; ifp = ctx->ifc_ifp; mh = mt = NULL; MPASS(budget > 0); rx_pkts = rx_bytes = 0; if (sctx->isc_flags & IFLIB_HAS_RXCQ) cidxp = &rxq->ifr_cq_cidx; else cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); DBG_COUNTER_INC(rx_unavail); return (false); } for (budget_left = budget; budget_left > 0 && avail > 0;) { if (__predict_false(!CTX_ACTIVE(ctx))) { DBG_COUNTER_INC(rx_ctx_inactive); break; } /* * Reset client set fields to their default values */ rxd_info_zero(&ri); ri.iri_qsidx = rxq->ifr_id; ri.iri_cidx = *cidxp; ri.iri_ifp = ifp; ri.iri_frags = rxq->ifr_frags; di = rxq->ifr_fl[rxq->ifr_frags[0].irf_flid].ifl_ifdi; bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); if (err) goto err; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { *cidxp = ri.iri_cidx; /* Update our consumer index */ /* XXX NB: shurd - check if this is still safe */ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; rxq->ifr_cq_gen = 0; } /* was this only a completion queue message? */ if (__predict_false(ri.iri_nfrags == 0)) continue; } MPASS(ri.iri_nfrags != 0); MPASS(ri.iri_len != 0); /* will advance the cidx on the corresponding free lists */ m = iflib_rxd_pkt_get(rxq, &ri); avail--; budget_left--; if (avail == 0 && budget_left) avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); if (__predict_false(m == NULL)) { DBG_COUNTER_INC(rx_mbuf_null); continue; } /* imm_pkt: -- cxgb */ if (mh == NULL) mh = mt = m; else { mt->m_nextpkt = m; mt = m; } } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); if (lro_enabled) iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); mt = mf = NULL; while (mh != NULL) { m = mh; mh = mh->m_nextpkt; m->m_nextpkt = NULL; #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) continue; #endif rx_bytes += m->m_pkthdr.len; rx_pkts++; #if defined(INET6) || defined(INET) if (lro_enabled) { if (!lro_possible) { lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); if (lro_possible && mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); mt = mf = NULL; } } if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == (CSUM_L4_CALC|CSUM_L4_VALID)) { if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) continue; } } #endif if (lro_possible) { ifp->if_input(ifp, m); DBG_COUNTER_INC(rx_if_input); continue; } if (mf == NULL) mf = m; if (mt != NULL) mt->m_nextpkt = m; mt = m; } if (mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); } if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); /* * Flush any outstanding LRO work */ #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif if (avail) return true; return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); err: STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); return (false); } #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) static inline qidx_t txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (in_use > 4*minthresh) return (notify_count); if (in_use > 2*minthresh) return (notify_count >> 1); if (in_use > minthresh) return (notify_count >> 3); return (0); } static inline qidx_t txq_max_rs_deferred(iflib_txq_t txq) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (txq->ift_in_use > 4*minthresh) return (notify_count); if (txq->ift_in_use > 2*minthresh) return (notify_count >> 1); if (txq->ift_in_use > minthresh) return (notify_count >> 2); return (2); } #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) /* forward compatibility for cxgb */ #define FIRST_QSET(ctx) 0 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) /* XXX we should be setting this to something other than zero */ #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ (ctx)->ifc_softc_ctx.isc_tx_nsegments) static inline bool iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) { qidx_t dbval, max; bool rang; rang = false; max = TXQ_MAX_DB_DEFERRED(txq, in_use); if (ring || txq->ift_db_pending >= max) { dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); txq->ift_db_pending = txq->ift_npending = 0; rang = true; } return (rang); } #ifdef PKT_DEBUG static void print_pkt(if_pkt_info_t pi) { printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); } #endif #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) static int iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) { if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; struct ether_vlan_header *eh; struct mbuf *m; m = *mp; if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && M_WRITABLE(m) == 0) { if ((m = m_dup(m, M_NOWAIT)) == NULL) { return (ENOMEM); } else { m_freem(*mp); DBG_COUNTER_INC(tx_frees); *mp = m; } } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ if (__predict_false(m->m_len < sizeof(*eh))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) return (ENOMEM); } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { pi->ipi_etype = ntohs(eh->evl_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { pi->ipi_etype = ntohs(eh->evl_encap_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN; } switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: { struct mbuf *n; struct ip *ip = NULL; struct tcphdr *th = NULL; int minthlen; minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); if (__predict_false(m->m_len < minthlen)) { /* * if this code bloat is causing too much of a hit * move it to a separate function and mark it noinline */ if (m->m_len == pi->ipi_ehdrlen) { n = m->m_next; MPASS(n); if (n->m_len >= sizeof(*ip)) { ip = (struct ip *)n->m_data; if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); } } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } } else { ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } pi->ipi_ip_hlen = ip->ip_hl << 2; pi->ipi_ipproto = ip->ip_p; pi->ipi_flags |= IPI_TX_IPV4; /* TCP checksum offload may require TCP header length */ if (IS_TX_OFFLOAD4(pi)) { if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { if (__predict_false(th == NULL)) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) return (ENOMEM); th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO4(pi)) { if (__predict_false(ip->ip_p != IPPROTO_TCP)) return (ENXIO); /* * TSO always requires hardware checksum offload. */ pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { ip->ip_sum = 0; ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); } } } if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) ip->ip_sum = 0; break; } #endif #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); struct tcphdr *th; pi->ipi_ip_hlen = sizeof(struct ip6_hdr); if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) return (ENOMEM); } th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); /* XXX-BZ this will go badly in case of ext hdrs. */ pi->ipi_ipproto = ip6->ip6_nxt; pi->ipi_flags |= IPI_TX_IPV6; /* TCP checksum offload may require TCP header length */ if (IS_TX_OFFLOAD6(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) return (ENOMEM); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO6(pi)) { if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) return (ENXIO); /* * TSO always requires hardware checksum offload. */ pi->ipi_csum_flags |= CSUM_IP6_TCP; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; } } break; } #endif default: pi->ipi_csum_flags &= ~CSUM_OFFLOAD; pi->ipi_ip_hlen = 0; break; } *mp = m; return (0); } /* * If dodgy hardware rejects the scatter gather chain we've handed it * we'll need to remove the mbuf chain from ifsg_m[] before we can add the * m_defrag'd mbufs */ static __noinline struct mbuf * iflib_remove_mbuf(iflib_txq_t txq) { int ntxd, pidx; struct mbuf *m, **ifsd_m; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; pidx = txq->ift_pidx & (ntxd - 1); ifsd_m = txq->ift_sds.ifsd_m; m = ifsd_m[pidx]; ifsd_m[pidx] = NULL; bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[pidx]); if (txq->ift_sds.ifsd_tso_map != NULL) bus_dmamap_unload(txq->ift_tso_desc_tag, txq->ift_sds.ifsd_tso_map[pidx]); #if MEMORY_LOGGING txq->ift_dequeued++; #endif return (m); } static inline caddr_t calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) { qidx_t size; int ntxd; caddr_t start, end, cur, next; ntxd = txq->ift_size; size = txq->ift_txd_size[qid]; start = txq->ift_ifdi[qid].idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*ntxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } /* * Pad an mbuf to ensure a minimum ethernet frame size. * min_frame_size is the frame size (less CRC) to pad the mbuf to */ static __noinline int iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) { /* * 18 is enough bytes to pad an ARP packet to 46 bytes, and * and ARP message is the smallest common payload I can think of */ static char pad[18]; /* just zeros */ int n; struct mbuf *new_head; if (!M_WRITABLE(*m_head)) { new_head = m_dup(*m_head, M_NOWAIT); if (new_head == NULL) { m_freem(*m_head); device_printf(dev, "cannot pad short frame, m_dup() failed"); DBG_COUNTER_INC(encap_pad_mbuf_fail); DBG_COUNTER_INC(tx_frees); return ENOMEM; } m_freem(*m_head); *m_head = new_head; } for (n = min_frame_size - (*m_head)->m_pkthdr.len; n > 0; n -= sizeof(pad)) if (!m_append(*m_head, min(n, sizeof(pad)), pad)) break; if (n > 0) { m_freem(*m_head); device_printf(dev, "cannot pad short frame\n"); DBG_COUNTER_INC(encap_pad_mbuf_fail); DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } return 0; } static int iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; bus_dma_segment_t *segs; struct mbuf *m_head, **ifsd_m; void *next_txd; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; bus_dma_tag_t desc_tag; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ cidx = txq->ift_cidx; pidx = txq->ift_pidx; if (ctx->ifc_flags & IFC_PREFETCH) { next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { next_txd = calc_next_txd(txq, cidx, 0); prefetch(next_txd); } /* prefetch the next cache line of mbuf pointers and flags */ prefetch(&txq->ift_sds.ifsd_m[next]); prefetch(&txq->ift_sds.ifsd_map[next]); next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); } map = txq->ift_sds.ifsd_map[pidx]; ifsd_m = txq->ift_sds.ifsd_m; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { desc_tag = txq->ift_tso_desc_tag; max_segs = scctx->isc_tx_tso_segments_max; map = txq->ift_sds.ifsd_tso_map[pidx]; MPASS(desc_tag != NULL); MPASS(max_segs > 0); } else { desc_tag = txq->ift_desc_tag; max_segs = scctx->isc_tx_nsegments; map = txq->ift_sds.ifsd_map[pidx]; } if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); if (err) { DBG_COUNTER_INC(encap_txd_encap_fail); return err; } } m_head = *m_headp; pkt_info_zero(&pi); pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); pi.ipi_pidx = pidx; pi.ipi_qsidx = txq->ift_id; pi.ipi_len = m_head->m_pkthdr.len; pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; /* deliberate bitwise OR to make one condition */ if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) { DBG_COUNTER_INC(encap_txd_encap_fail); return (err); } m_head = *m_headp; } retry: err = bus_dmamap_load_mbuf_sg(desc_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); defrag: if (__predict_false(err)) { switch (err) { case EFBIG: /* try collapse once and defrag once */ if (remap == 0) { m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); /* try defrag if collapsing fails */ if (m_head == NULL) remap++; } if (remap == 1) { txq->ift_mbuf_defrag++; m_head = m_defrag(*m_headp, M_NOWAIT); } remap++; if (__predict_false(m_head == NULL)) goto defrag_failed; *m_headp = m_head; goto retry; break; case ENOMEM: txq->ift_no_tx_dma_setup++; break; default: txq->ift_no_tx_dma_setup++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; break; } txq->ift_map_failed++; DBG_COUNTER_INC(encap_load_mbuf_fail); DBG_COUNTER_INC(encap_txd_encap_fail); return (err); } ifsd_m[pidx] = m_head; /* * XXX assumes a 1 to 1 relationship between segments and * descriptors - this does not hold true on all drivers, e.g. * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; bus_dmamap_unload(desc_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); DBG_COUNTER_INC(encap_txd_encap_fail); if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); } /* * On Intel cards we can greatly reduce the number of TX interrupts * we see by only setting report status on every Nth descriptor. * However, this also means that the driver will need to keep track * of the descriptors that RS was set on to check them for the DD bit. */ txq->ift_rs_pending += nsegs + 1; if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { pi.ipi_flags |= IPI_TX_INTR; txq->ift_rs_pending = 0; } pi.ipi_segs = segs; pi.ipi_nsegs = nsegs; MPASS(pidx >= 0 && pidx < txq->ift_size); #ifdef PKT_DEBUG print_pkt(&pi); #endif bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); DBG_COUNTER_INC(tx_encap); MPASS(pi.ipi_new_pidx < txq->ift_size); ndesc = pi.ipi_new_pidx - pi.ipi_pidx; if (pi.ipi_new_pidx < pi.ipi_pidx) { ndesc += txq->ift_size; txq->ift_gen = 1; } /* * drivers can need as many as * two sentinels */ MPASS(ndesc <= pi.ipi_nsegs + 2); MPASS(pi.ipi_new_pidx != pidx); MPASS(ndesc > 0); txq->ift_in_use += ndesc; /* * We update the last software descriptor again here because there may * be a sentinel and/or there may be more mbufs than segments */ txq->ift_pidx = pi.ipi_new_pidx; txq->ift_npending += pi.ipi_ndescs; } else { *m_headp = m_head = iflib_remove_mbuf(txq); if (err == EFBIG) { txq->ift_txd_encap_efbig++; if (remap < 2) { remap = 1; goto defrag; } } goto defrag_failed; } /* * err can't possibly be non-zero here, so we don't neet to test it * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail). */ return (err); defrag_failed: txq->ift_mbuf_defrag_failed++; txq->ift_map_failed++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; DBG_COUNTER_INC(encap_txd_encap_fail); return (ENOMEM); } static void iflib_tx_desc_free(iflib_txq_t txq, int n) { uint32_t qsize, cidx, mask, gen; struct mbuf *m, **ifsd_m; bool do_prefetch; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; mask = qsize-1; ifsd_m = txq->ift_sds.ifsd_m; do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); while (n-- > 0) { if (do_prefetch) { prefetch(ifsd_m[(cidx + 3) & mask]); prefetch(ifsd_m[(cidx + 4) & mask]); } if ((m = ifsd_m[cidx]) != NULL) { prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); if (m->m_pkthdr.csum_flags & CSUM_TSO) { bus_dmamap_sync(txq->ift_tso_desc_tag, txq->ift_sds.ifsd_tso_map[cidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_desc_tag, txq->ift_sds.ifsd_tso_map[cidx]); } else { bus_dmamap_sync(txq->ift_desc_tag, txq->ift_sds.ifsd_map[cidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[cidx]); } /* XXX we don't support any drivers that batch packets yet */ MPASS(m->m_nextpkt == NULL); m_freem(m); ifsd_m[cidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif DBG_COUNTER_INC(tx_frees); } if (__predict_false(++cidx == qsize)) { cidx = 0; gen = 0; } } txq->ift_cidx = cidx; txq->ift_gen = gen; } static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) { int reclaim; if_ctx_t ctx = txq->ift_ctx; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); /* * Need a rate-limiting check so that this isn't called every time */ iflib_tx_credits_update(ctx, txq); reclaim = DESC_RECLAIMABLE(txq); if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { #ifdef INVARIANTS if (iflib_verbose_debug) { printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, reclaim, thresh); } #endif return (0); } iflib_tx_desc_free(txq, reclaim); txq->ift_cleaned += reclaim; txq->ift_in_use -= reclaim; return (reclaim); } static struct mbuf ** _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) { int next, size; struct mbuf **items; size = r->size; next = (cidx + CACHE_PTR_INCREMENT) & (size-1); items = __DEVOLATILE(struct mbuf **, &r->items[0]); prefetch(items[(cidx + offset) & (size-1)]); if (remaining > 1) { prefetch2cachelines(&items[next]); prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); } return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); } static void iflib_txq_check_drain(iflib_txq_t txq, int budget) { ifmp_ring_check_drainage(txq->ift_br, budget); } static uint32_t iflib_txq_can_drain(struct ifmp_ring *r) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); } static uint32_t iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; struct ifnet *ifp = ctx->ifc_ifp; struct mbuf **mp, *m; int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; int reclaimed, err, in_use_prev, desc_used; bool do_prefetch, ring, rang; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(txq_drain_notready); return (0); } reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); avail = IDXDIFF(pidx, cidx, r->size); if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { DBG_COUNTER_INC(txq_drain_flushing); for (i = 0; i < avail; i++) { if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq)) m_free(r->items[(cidx + i) & (r->size-1)]); r->items[(cidx + i) & (r->size-1)] = NULL; } return (avail); } if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); DBG_COUNTER_INC(txq_drain_oactive); return (0); } if (reclaimed) txq->ift_qstatus = IFLIB_QUEUE_IDLE; consumed = mcast_sent = bytes_sent = pkt_sent = 0; count = MIN(avail, TX_BATCH_SIZE); #ifdef INVARIANTS if (iflib_verbose_debug) printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, avail, ctx->ifc_flags, TXQ_AVAIL(txq)); #endif do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); avail = TXQ_AVAIL(txq); err = 0; for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { int rem = do_prefetch ? count - i : 0; mp = _ring_peek_one(r, cidx, i, rem); MPASS(mp != NULL && *mp != NULL); if (__predict_false(*mp == (struct mbuf *)txq)) { consumed++; reclaimed++; continue; } in_use_prev = txq->ift_in_use; err = iflib_encap(txq, mp); if (__predict_false(err)) { /* no room - bail out */ if (err == ENOBUFS) break; consumed++; /* we can't send this packet - skip it */ continue; } consumed++; pkt_sent++; m = *mp; DBG_COUNTER_INC(tx_sent); bytes_sent += m->m_pkthdr.len; mcast_sent += !!(m->m_flags & M_MCAST); avail = TXQ_AVAIL(txq); txq->ift_db_pending += (txq->ift_in_use - in_use_prev); desc_used += (txq->ift_in_use - in_use_prev); ETHER_BPF_MTAP(ifp, m); if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) break; rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); if (mcast_sent) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); #ifdef INVARIANTS if (iflib_verbose_debug) printf("consumed=%d\n", consumed); #endif return (consumed); } static uint32_t iflib_txq_drain_always(struct ifmp_ring *r) { return (1); } static uint32_t iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { int i, avail; struct mbuf **mp; iflib_txq_t txq; txq = r->cookie; txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); avail = IDXDIFF(pidx, cidx, r->size); for (i = 0; i < avail; i++) { mp = _ring_peek_one(r, cidx, i, avail - i); if (__predict_false(*mp == (struct mbuf *)txq)) continue; m_freem(*mp); DBG_COUNTER_INC(tx_frees); } MPASS(ifmp_ring_is_stalled(r) == 0); return (avail); } static void iflib_ifmp_purge(iflib_txq_t txq) { struct ifmp_ring *r; r = txq->ift_br; r->drain = iflib_txq_drain_free; r->can_drain = iflib_txq_drain_always; ifmp_ring_check_drainage(r, r->size); r->drain = iflib_txq_drain; r->can_drain = iflib_txq_can_drain; } static void _task_fn_tx(void *context) { iflib_txq_t txq = context; if_ctx_t ctx = txq->ift_ctx; struct ifnet *ifp = ctx->ifc_ifp; int abdicate = ctx->ifc_sysctl_tx_abdicate; #ifdef IFLIB_DIAGNOSTICS txq->ift_cpu_exec_count[curcpu]++; #endif if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; if (if_getcapenable(ifp) & IFCAP_NETMAP) { bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) netmap_tx_irq(ifp, txq->ift_id); IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); return; } #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) iflib_altq_if_start(ifp); #endif if (txq->ift_db_pending) ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); else if (!abdicate) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); /* * When abdicating, we always need to check drainage, not just when we don't enqueue */ if (abdicate) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { #ifdef INVARIANTS int rc = #endif IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); } } static void _task_fn_rx(void *context) { iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; uint16_t budget; #ifdef IFLIB_DIAGNOSTICS rxq->ifr_cpu_exec_count[curcpu]++; #endif DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; more = true; #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { u_int work = 0; if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { more = false; } } #endif budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { #ifdef INVARIANTS int rc = #endif IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); DBG_COUNTER_INC(rx_intr_enables); } } if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) GROUPTASK_ENQUEUE(&rxq->ifr_task); } static void _task_fn_admin(void *context) { if_ctx_t ctx = context; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; iflib_txq_t txq; int i; bool oactive, running, do_reset, do_watchdog, in_detach; uint32_t reset_on = hz / 2; STATE_LOCK(ctx); running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); do_reset = (ctx->ifc_flags & IFC_DO_RESET); do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); in_detach = (ctx->ifc_flags & IFC_IN_DETACH); ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); STATE_UNLOCK(ctx); if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) return; if (in_detach) return; CTX_LOCK(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); } if (do_watchdog) { ctx->ifc_watchdog_events++; IFDI_WATCHDOG_RESET(ctx); } IFDI_UPDATE_ADMIN_STATUS(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { #ifdef DEV_NETMAP reset_on = hz / 2; if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); #endif callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); } IFDI_LINK_INTR_ENABLE(ctx); if (do_reset) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); if (LINK_ACTIVE(ctx) == 0) return; for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); } static void _task_fn_iov(void *context) { if_ctx_t ctx = context; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) return; CTX_LOCK(ctx); IFDI_VFLR_HANDLE(ctx); CTX_UNLOCK(ctx); } static int iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { int err; if_int_delay_info_t info; if_ctx_t ctx; info = (if_int_delay_info_t)arg1; ctx = info->iidi_ctx; info->iidi_req = req; info->iidi_oidp = oidp; CTX_LOCK(ctx); err = IFDI_SYSCTL_INT_DELAY(ctx, info); CTX_UNLOCK(ctx); return (err); } /********************************************************************* * * IFNET FUNCTIONS * **********************************************************************/ static void iflib_if_init_locked(if_ctx_t ctx) { iflib_stop(ctx); iflib_init_locked(ctx); } static void iflib_if_init(void *arg) { if_ctx_t ctx = arg; CTX_LOCK(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static int iflib_if_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq; int err, qidx; int abdicate = ctx->ifc_sysctl_tx_abdicate; if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(tx_frees); m_freem(m); return (ENOBUFS); } MPASS(m->m_nextpkt == NULL); /* ALTQ-enabled interfaces always use queue 0. */ qidx = 0; if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd)) qidx = QIDX(ctx, m); /* * XXX calculate buf_ring based on flowid (divvy up bits?) */ txq = &ctx->ifc_txqs[qidx]; #ifdef DRIVER_BACKPRESSURE if (txq->ift_closed) { while (m != NULL) { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); DBG_COUNTER_INC(tx_frees); m = next; } return (ENOBUFS); } #endif #ifdef notyet qidx = count = 0; mp = marr; next = m; do { count++; next = next->m_nextpkt; } while (next != NULL); if (count > nitems(marr)) if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { /* XXX check nextpkt */ m_freem(m); /* XXX simplify for now */ DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } for (next = m, i = 0; next != NULL; i++) { mp[i] = next; next = next->m_nextpkt; mp[i]->m_nextpkt = NULL; } #endif DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); if (abdicate) GROUPTASK_ENQUEUE(&txq->ift_task); if (err) { if (!abdicate) GROUPTASK_ENQUEUE(&txq->ift_task); /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); DBG_COUNTER_INC(tx_frees); } return (err); } #ifdef ALTQ /* * The overall approach to integrating iflib with ALTQ is to continue to use * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring * is redundant/unnecessary, but doing so minimizes the amount of * ALTQ-specific code required in iflib. It is assumed that the overhead of * redundantly queueing to an intermediate mp_ring is swamped by the * performance limitations inherent in using ALTQ. * * When ALTQ support is compiled in, all iflib drivers will use a transmit * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the * given interface. If ALTQ is enabled for an interface, then all * transmitted packets for that interface will be submitted to the ALTQ * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit() * implementation because it uses IFQ_HANDOFF(), which will duplicatively * update stats that the iflib machinery handles, and which is sensitve to * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start() * will be installed as the start routine for use by ALTQ facilities that * need to trigger queue drains on a scheduled basis. * */ static void iflib_altq_if_start(if_t ifp) { struct ifaltq *ifq = &ifp->if_snd; struct mbuf *m; IFQ_LOCK(ifq); IFQ_DEQUEUE_NOLOCK(ifq, m); while (m != NULL) { iflib_if_transmit(ifp, m); IFQ_DEQUEUE_NOLOCK(ifq, m); } IFQ_UNLOCK(ifq); } static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m) { int err; if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_ENQUEUE(&ifp->if_snd, m, err); if (err == 0) iflib_altq_if_start(ifp); } else err = iflib_if_transmit(ifp, m); return (err); } #endif /* ALTQ */ static void iflib_if_qflush(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq = ctx->ifc_txqs; int i; STATE_LOCK(ctx); ctx->ifc_flags |= IFC_QFLUSH; STATE_UNLOCK(ctx); for (i = 0; i < NTXQSETS(ctx); i++, txq++) while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) iflib_txq_check_drain(txq, 0); STATE_LOCK(ctx); ctx->ifc_flags &= ~IFC_QFLUSH; STATE_UNLOCK(ctx); /* * When ALTQ is enabled, this will also take care of purging the * ALTQ queue(s). */ if_qflush(ifp); } #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \ IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM) static int iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) { if_ctx_t ctx = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int err = 0, reinit = 0, bits; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP,0); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) reinit = 1; #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: CTX_LOCK(ctx); if (ifr->ifr_mtu == if_getmtu(ifp)) { CTX_UNLOCK(ctx); break; } bits = if_getdrvflags(ifp); /* stop the driver and free any clusters before proceeding */ iflib_stop(ctx); if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { STATE_LOCK(ctx); if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) ctx->ifc_flags |= IFC_MULTISEG; else ctx->ifc_flags &= ~IFC_MULTISEG; STATE_UNLOCK(ctx); err = if_setmtu(ifp, ifr->ifr_mtu); } iflib_init_locked(ctx); STATE_LOCK(ctx); if_setdrvflags(ifp, bits); STATE_UNLOCK(ctx); CTX_UNLOCK(ctx); break; case SIOCSIFFLAGS: CTX_LOCK(ctx); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); } } else reinit = 1; } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { iflib_stop(ctx); } ctx->ifc_if_flags = if_getflags(ifp); CTX_UNLOCK(ctx); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); IFDI_MULTI_SET(ctx); IFDI_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); } break; case SIOCSIFMEDIA: CTX_LOCK(ctx); IFDI_MEDIA_SET(ctx); CTX_UNLOCK(ctx); /* falls thru */ case SIOCGIFMEDIA: case SIOCGIFXMEDIA: err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); break; case SIOCGI2C: { struct ifi2creq i2c; err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (err != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { err = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { err = EINVAL; break; } if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) err = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } case SIOCSIFCAP: { int mask, setmask, oldmask; oldmask = if_getcapenable(ifp); mask = ifr->ifr_reqcap ^ oldmask; mask &= ctx->ifc_softc_ctx.isc_capabilities; setmask = 0; #ifdef TCP_OFFLOAD setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); #endif setmask |= (mask & IFCAP_FLAGS); setmask |= (mask & IFCAP_WOL); /* * If any RX csum has changed, change all the ones that * are supported by the driver. */ if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { setmask |= ctx->ifc_softc_ctx.isc_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); } /* * want to ensure that traffic has stopped before we change any of the flags */ if (setmask) { CTX_LOCK(ctx); bits = if_getdrvflags(ifp); if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) iflib_stop(ctx); STATE_LOCK(ctx); if_togglecapenable(ifp, setmask); STATE_UNLOCK(ctx); if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) iflib_init_locked(ctx); STATE_LOCK(ctx); if_setdrvflags(ifp, bits); STATE_UNLOCK(ctx); CTX_UNLOCK(ctx); } if_vlancap(ifp); break; } case SIOCGPRIVATE_0: case SIOCSDRVSPEC: case SIOCGDRVSPEC: CTX_LOCK(ctx); err = IFDI_PRIV_IOCTL(ctx, command, data); CTX_UNLOCK(ctx); break; default: err = ether_ioctl(ifp, command, data); break; } if (reinit) iflib_if_init(ctx); return (err); } static uint64_t iflib_if_get_counter(if_t ifp, ift_counter cnt) { if_ctx_t ctx = if_getsoftc(ifp); return (IFDI_GET_COUNTER(ctx, cnt)); } /********************************************************************* * * OTHER FUNCTIONS EXPORTED TO THE STACK * **********************************************************************/ static void iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_REGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_UNREGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_led_func(void *arg, int onoff) { if_ctx_t ctx = arg; CTX_LOCK(ctx); IFDI_LED_FUNC(ctx, onoff); CTX_UNLOCK(ctx); } /********************************************************************* * * BUS FUNCTION DEFINITIONS * **********************************************************************/ int iflib_device_probe(device_t dev) { pci_vendor_info_t *ent; uint16_t pci_vendor_id, pci_device_id; uint16_t pci_subvendor_id, pci_subdevice_id; uint16_t pci_rev_id; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); pci_rev_id = pci_get_revid(dev); if (sctx->isc_parse_devinfo != NULL) sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); ent = sctx->isc_vendor_info; while (ent->pvi_vendor_id != 0) { if (pci_vendor_id != ent->pvi_vendor_id) { ent++; continue; } if ((pci_device_id == ent->pvi_device_id) && ((pci_subvendor_id == ent->pvi_subvendor_id) || (ent->pvi_subvendor_id == 0)) && ((pci_subdevice_id == ent->pvi_subdevice_id) || (ent->pvi_subdevice_id == 0)) && ((pci_rev_id == ent->pvi_rev_id) || (ent->pvi_rev_id == 0))) { device_set_desc_copy(dev, ent->pvi_name); /* this needs to be changed to zero if the bus probing code * ever stops re-probing on best match because the sctx * may have its values over written by register calls * in subsequent probes */ return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } static void iflib_reset_qvalues(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; int i; scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES; scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH; /* * XXX sanity check that ntxd & nrxd are a power of 2 */ if (ctx->ifc_sysctl_ntxqs != 0) scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; if (ctx->ifc_sysctl_nrxqs != 0) scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; for (i = 0; i < sctx->isc_ntxqs; i++) { if (ctx->ifc_sysctl_ntxds[i] != 0) scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; else scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (ctx->ifc_sysctl_nrxds[i] != 0) scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; else scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; } if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; } if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; } } } int iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) { int err, rid, msix; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; uint16_t main_txq; uint16_t main_rxq; ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); if (sc == NULL) { sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); device_set_softc(dev, ctx); ctx->ifc_flags |= IFC_SC_ALLOCATED; } ctx->ifc_sctx = sctx; ctx->ifc_dev = dev; ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "iflib_register failed %d\n", err); goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; iflib_reset_qvalues(ctx); CTX_LOCK(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); goto fail_unlock; } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; #ifdef INVARIANTS MPASS(scctx->isc_capabilities); if (scctx->isc_capabilities & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; /* XXX change for per-queue sizes */ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); for (i = 0; i < sctx->isc_nrxqs; i++) { if (!powerof2(scctx->isc_nrxd[i])) { /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; goto fail_iflib_detach; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; goto fail_iflib_detach; } } if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ if (if_getcapabilities(ifp) & IFCAP_TSO) { /* * The stack can't handle a TSO size larger than IP_MAXPACKET, * but some MACs do. */ if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, IP_MAXPACKET)); /* * Take maximum number of m_pullup(9)'s in iflib_parse_header() * into account. In the worst case, each of these calls will * add another mbuf and, thus, the requirement for another DMA * segment. So for best performance, it doesn't make sense to * advertize a maximum of TSO segments that typically will * require defragmentation in iflib_encap(). */ if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); } if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* Set up cpu set. If it fails, use the set of all CPUs. */ if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { device_printf(dev, "Unable to fetch CPU list\n"); CPU_COPY(&all_cpus, &ctx->ifc_cpus); } MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); /* ** Now setup MSI or MSI/X, should ** return us the number of supported ** vectors. (Will be 1 for MSI) */ if (sctx->isc_flags & IFLIB_SKIP_MSIX) { msix = scctx->isc_vectors; } else if (scctx->isc_msix_bar != 0) /* * The simple fact that isc_msix_bar is not 0 does not mean we * we have a good value there that is known to work. */ msix = iflib_msix_init(ctx); else { scctx->isc_vectors = 1; scctx->isc_ntxqsets = 1; scctx->isc_nrxqsets = 1; scctx->isc_intr = IFLIB_INTR_LEGACY; msix = 0; } /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail_intr_free; } if ((err = iflib_qset_structures_setup(ctx))) goto fail_queues; /* * Group taskqueues aren't properly set up until SMP is started, * so we disable interrupts until we can handle them post * SI_SUB_SMP. * * XXX: disabling interrupts doesn't actually work, at least for * the non-MSI case. When they occur before SI_SUB_SMP completes, * we do null handling and depend on this not causing too large an * interrupt storm. */ IFDI_INTR_DISABLE(ctx); if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); goto fail_queues; } if (msix <= 1) { rid = 0; if (scctx->isc_intr == IFLIB_INTR_MSI) { MPASS(msix == 1); rid = 1; } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); goto fail_queues; } } ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if ((err = iflib_netmap_attach(ctx))) { device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); goto fail_detach; } *ctxp = ctx; NETDUMP_SET(ctx->ifc_ifp, iflib); if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; CTX_UNLOCK(ctx); return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: iflib_free_intr_mem(ctx); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); fail_iflib_detach: IFDI_DETACH(ctx); fail_unlock: CTX_UNLOCK(ctx); fail_ctx_free: if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (err); } int iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, struct iflib_cloneattach_ctx *clctx) { int err; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; void *sc; uint16_t main_txq; uint16_t main_rxq; ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); ctx->ifc_flags |= IFC_SC_ALLOCATED; if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) ctx->ifc_flags |= IFC_PSEUDO; ctx->ifc_sctx = sctx; ctx->ifc_softc = sc; ctx->ifc_dev = dev; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; /* * XXX sanity check that ntxd & nrxd are a power of 2 */ iflib_reset_qvalues(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); goto fail_ctx_free; } if (sctx->isc_flags & IFLIB_GEN_MAC) iflib_gen_mac(ctx); if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, clctx->cc_params)) != 0) { device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); goto fail_ctx_free; } ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO); #ifdef INVARIANTS MPASS(scctx->isc_capabilities); if (scctx->isc_capabilities & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); ifp->if_flags |= IFF_NOGROUP; if (sctx->isc_flags & IFLIB_PSEUDO) { ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } *ctxp = ctx; /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; return (0); } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; /* XXX change for per-queue sizes */ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); for (i = 0; i < sctx->isc_nrxqs; i++) { if (!powerof2(scctx->isc_nrxd[i])) { /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; goto fail_iflib_detach; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; goto fail_iflib_detach; } } if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ if (if_getcapabilities(ifp) & IFCAP_TSO) { /* * The stack can't handle a TSO size larger than IP_MAXPACKET, * but some MACs do. */ if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, IP_MAXPACKET)); /* * Take maximum number of m_pullup(9)'s in iflib_parse_header() * into account. In the worst case, each of these calls will * add another mbuf and, thus, the requirement for another DMA * segment. So for best performance, it doesn't make sense to * advertize a maximum of TSO segments that typically will * require defragmentation in iflib_encap(). */ if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); } if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* XXX --- can support > 1 -- but keep it simple for now */ scctx->isc_intr = IFLIB_INTR_LEGACY; /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail_iflib_detach; } if ((err = iflib_qset_structures_setup(ctx))) { device_printf(dev, "qset structure setup failed %d\n", err); goto fail_queues; } /* * XXX What if anything do we want to do about interrupts? */ ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); /* XXX handle more than one queue */ for (i = 0; i < scctx->isc_nrxqsets; i++) IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); *ctxp = ctx; if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); fail_iflib_detach: IFDI_DETACH(ctx); fail_ctx_free: free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (err); } int iflib_pseudo_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; struct taskqgroup *tqg; iflib_fl_t fl; /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); if (ctx->ifc_vlan_detach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); ether_ifdetach(ifp); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) free(fl->ifl_rx_bitmap, M_IFLIB); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); if_free(ifp); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (0); } int iflib_device_attach(device_t dev) { if_ctx_t ctx; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_enable_busmaster(dev); return (iflib_device_register(dev, NULL, sctx, &ctx)); } int iflib_device_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; device_t dev = ctx->ifc_dev; int i, j; struct taskqgroup *tqg; iflib_fl_t fl; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev, "Vlan in use, detach first\n"); return (EBUSY); } #ifdef PCI_IOV if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (EBUSY); } #endif STATE_LOCK(ctx); ctx->ifc_flags |= IFC_IN_DETACH; STATE_UNLOCK(ctx); CTX_LOCK(ctx); iflib_stop(ctx); CTX_UNLOCK(ctx); /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); if (ctx->ifc_vlan_detach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); iflib_netmap_detach(ifp); ether_ifdetach(ifp); if (ctx->ifc_led_dev != NULL) led_destroy(ctx->ifc_led_dev); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) free(fl->ifl_rx_bitmap, M_IFLIB); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); CTX_LOCK(ctx); IFDI_DETACH(ctx); CTX_UNLOCK(ctx); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); device_set_softc(ctx->ifc_dev, NULL); iflib_free_intr_mem(ctx); bus_generic_detach(dev); if_free(ifp); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); STATE_LOCK_DESTROY(ctx); free(ctx, M_IFLIB); return (0); } static void iflib_free_intr_mem(if_ctx_t ctx) { if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { pci_release_msi(ctx->ifc_dev); } if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { iflib_irq_free(ctx, &ctx->ifc_legacy_irq); } if (ctx->ifc_msix_mem != NULL) { bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } } int iflib_device_detach(device_t dev) { if_ctx_t ctx = device_get_softc(dev); return (iflib_device_deregister(ctx)); } int iflib_device_suspend(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SUSPEND(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_shutdown(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SHUTDOWN(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_resume(device_t dev) { if_ctx_t ctx = device_get_softc(dev); iflib_txq_t txq = ctx->ifc_txqs; CTX_LOCK(ctx); IFDI_RESUME(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); for (int i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); return (bus_generic_resume(dev)); } int iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_INIT(ctx, num_vfs, params); CTX_UNLOCK(ctx); return (error); } void iflib_device_iov_uninit(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_IOV_UNINIT(ctx); CTX_UNLOCK(ctx); } int iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_VF_ADD(ctx, vfnum, params); CTX_UNLOCK(ctx); return (error); } /********************************************************************* * * MODULE FUNCTION DEFINITIONS * **********************************************************************/ /* * - Start a fast taskqueue thread for each core * - Start a taskqueue for control operations */ static int iflib_module_init(void) { return (0); } static int iflib_module_event_handler(module_t mod, int what, void *arg) { int err; switch (what) { case MOD_LOAD: if ((err = iflib_module_init()) != 0) return (err); break; case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } return (0); } /********************************************************************* * * PUBLIC FUNCTION DEFINITIONS * ordered as in iflib.h * **********************************************************************/ static void _iflib_assert(if_shared_ctx_t sctx) { MPASS(sctx->isc_tx_maxsize); MPASS(sctx->isc_tx_maxsegsize); MPASS(sctx->isc_rx_maxsize); MPASS(sctx->isc_rx_nsegments); MPASS(sctx->isc_rx_maxsegsize); MPASS(sctx->isc_nrxd_min[0]); MPASS(sctx->isc_nrxd_max[0]); MPASS(sctx->isc_nrxd_default[0]); MPASS(sctx->isc_ntxd_min[0]); MPASS(sctx->isc_ntxd_max[0]); MPASS(sctx->isc_ntxd_default[0]); } static void _iflib_pre_assert(if_softc_ctx_t scctx) { MPASS(scctx->isc_txrx->ift_txd_encap); MPASS(scctx->isc_txrx->ift_txd_flush); MPASS(scctx->isc_txrx->ift_txd_credits_update); MPASS(scctx->isc_txrx->ift_rxd_available); MPASS(scctx->isc_txrx->ift_rxd_pkt_get); MPASS(scctx->isc_txrx->ift_rxd_refill); MPASS(scctx->isc_txrx->ift_rxd_flush); } static int iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; driver_t *driver = sctx->isc_driver; device_t dev = ctx->ifc_dev; if_t ifp; _iflib_assert(sctx); CTX_LOCK_INIT(ctx); STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (ENOMEM); } /* * Initialize our context's device specific methods */ kobj_init((kobj_t) ctx, (kobj_class_t) driver); kobj_class_compile((kobj_class_t) driver); driver->refs++; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, ctx); if_setdev(ifp, dev); if_setinitfn(ifp, iflib_if_init); if_setioctlfn(ifp, iflib_if_ioctl); #ifdef ALTQ if_setstartfn(ifp, iflib_altq_if_start); if_settransmitfn(ifp, iflib_altq_if_transmit); if_setsendqready(ifp); #else if_settransmitfn(ifp, iflib_if_transmit); #endif if_setqflushfn(ifp, iflib_if_qflush); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, EVENTHANDLER_PRI_FIRST); ctx->ifc_vlan_detach_event = EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, EVENTHANDLER_PRI_FIRST); ifmedia_init(&ctx->ifc_media, IFM_IMASK, iflib_media_change, iflib_media_status); return (0); } static int iflib_queues_alloc(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int nrxqsets = scctx->isc_nrxqsets; int ntxqsets = scctx->isc_ntxqsets; iflib_txq_t txq; iflib_rxq_t rxq; iflib_fl_t fl = NULL; int i, j, cpu, err, txconf, rxconf; iflib_dma_info_t ifdip; uint32_t *rxqsizes = scctx->isc_rxqsizes; uint32_t *txqsizes = scctx->isc_txqsizes; uint8_t nrxqs = sctx->isc_nrxqs; uint8_t ntxqs = sctx->isc_ntxqs; int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; caddr_t *vaddrs; uint64_t *paddrs; KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); /* Allocate the TX ring struct memory */ if (!(ctx->ifc_txqs = (iflib_txq_t) malloc(sizeof(struct iflib_txq) * ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); err = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(ctx->ifc_rxqs = (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); err = ENOMEM; goto rx_fail; } txq = ctx->ifc_txqs; rxq = ctx->ifc_rxqs; /* * XXX handle allocation failure */ for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_ifdi = ifdip; for (j = 0; j < ntxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_txd_size[j] = scctx->isc_txd_size[j]; bzero((void *)ifdip->idi_vaddr, txqsizes[j]); } txq->ift_ctx = ctx; txq->ift_id = i; if (sctx->isc_flags & IFLIB_HAS_TXCQ) { txq->ift_br_offset = 1; } else { txq->ift_br_offset = 0; } /* XXX fix this */ txq->ift_timer.c_cpu = cpu; if (iflib_txsd_alloc(txq)) { device_printf(dev, "Critical Failure setting up TX buffers\n"); err = ENOMEM; goto err_tx_desc; } /* Initialize the TX lock */ snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", device_get_nameunit(dev), txq->ift_id); mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", device_get_nameunit(dev), txq->ift_id); err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, iflib_txq_can_drain, M_IFLIB, M_WAITOK); if (err) { /* XXX free any allocated rings */ device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_ifdi = ifdip; /* XXX this needs to be changed if #rx queues != #tx queues */ rxq->ifr_ntxqirq = 1; rxq->ifr_txqid[0] = i; for (j = 0; j < nrxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); } rxq->ifr_ctx = ctx; rxq->ifr_id = i; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { rxq->ifr_fl_offset = 1; } else { rxq->ifr_fl_offset = 0; } rxq->ifr_nfl = nfree_lists; if (!(fl = (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate free list memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_fl = fl; for (j = 0; j < nfree_lists; j++) { fl[j].ifl_rxq = rxq; fl[j].ifl_id = j; fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; } /* Allocate receive buffers for the ring */ if (iflib_rxsd_alloc(rxq)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); err = ENOMEM; goto err_rx_desc; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); } /* TXQs */ vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); for (i = 0; i < ntxqsets; i++) { iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; for (j = 0; j < ntxqs; j++, di++) { vaddrs[i*ntxqs + j] = di->idi_vaddr; paddrs[i*ntxqs + j] = di->idi_paddr; } } if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); /* RXQs */ vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); for (i = 0; i < nrxqsets; i++) { iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; for (j = 0; j < nrxqs; j++, di++) { vaddrs[i*nrxqs + j] = di->idi_vaddr; paddrs[i*nrxqs + j] = di->idi_paddr; } } if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); return (0); /* XXX handle allocation failure changes */ err_rx_desc: err_tx_desc: rx_fail: if (ctx->ifc_rxqs != NULL) free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; if (ctx->ifc_txqs != NULL) free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; fail: return (err); } static int iflib_tx_structures_setup(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i; for (i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_setup(txq); return (0); } static void iflib_tx_structures_free(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; if_shared_ctx_t sctx = ctx->ifc_sctx; int i, j; for (i = 0; i < NTXQSETS(ctx); i++, txq++) { iflib_txq_destroy(txq); for (j = 0; j < sctx->isc_ntxqs; j++) iflib_dma_free(&txq->ift_ifdi[j]); } free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; IFDI_QUEUES_FREE(ctx); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int iflib_rx_structures_setup(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; int q; #if defined(INET6) || defined(INET) int i, err; #endif for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { #if defined(INET6) || defined(INET) tcp_lro_free(&rxq->ifr_lc); if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, TCP_LRO_ENTRIES, min(1024, ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); goto fail; } rxq->ifr_lro_enabled = TRUE; #endif IFDI_RXQ_SETUP(ctx, rxq->ifr_id); } return (0); #if defined(INET6) || defined(INET) fail: /* * Free RX software descriptors allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ rxq = ctx->ifc_rxqs; for (i = 0; i < q; ++i, rxq++) { iflib_rx_sds_free(rxq); rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } return (err); #endif } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void iflib_rx_structures_free(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { iflib_rx_sds_free(rxq); } free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; } static int iflib_qset_structures_setup(if_ctx_t ctx) { int err; /* * It is expected that the caller takes care of freeing queues if this * fails. */ if ((err = iflib_tx_structures_setup(ctx)) != 0) { device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); return (err); } if ((err = iflib_rx_structures_setup(ctx)) != 0) device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); return (err); } int iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) { return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); } #ifdef SMP static int find_nth(if_ctx_t ctx, int qid) { cpuset_t cpus; int i, cpuid, eqid, count; CPU_COPY(&ctx->ifc_cpus, &cpus); count = CPU_COUNT(&cpus); eqid = qid % count; /* clear up to the qid'th bit */ for (i = 0; i < eqid; i++) { cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); CPU_CLR(cpuid-1, &cpus); } cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); return (cpuid-1); } #ifdef SCHED_ULE extern struct cpu_group *cpu_top; /* CPU topology */ static int find_child_with_core(int cpu, struct cpu_group *grp) { int i; if (grp->cg_children == 0) return -1; MPASS(grp->cg_child); for (i = 0; i < grp->cg_children; i++) { if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) return i; } return -1; } /* * Find the nth "close" core to the specified core * "close" is defined as the deepest level that shares * at least an L2 cache. With threads, this will be * threads on the same core. If the sahred cache is L3 * or higher, simply returns the same core. */ static int find_close_core(int cpu, int core_offset) { struct cpu_group *grp; int i; int fcpu; cpuset_t cs; grp = cpu_top; if (grp == NULL) return cpu; i = 0; while ((i = find_child_with_core(cpu, grp)) != -1) { /* If the child only has one cpu, don't descend */ if (grp->cg_child[i].cg_count <= 1) break; grp = &grp->cg_child[i]; } /* If they don't share at least an L2 cache, use the same CPU */ if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) return cpu; /* Now pick one */ CPU_COPY(&grp->cg_mask, &cs); /* Add the selected CPU offset to core offset. */ for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { if (fcpu - 1 == cpu) break; CPU_CLR(fcpu - 1, &cs); } MPASS(fcpu); core_offset += i; CPU_COPY(&grp->cg_mask, &cs); for (i = core_offset % grp->cg_count; i > 0; i--) { MPASS(CPU_FFS(&cs)); CPU_CLR(CPU_FFS(&cs) - 1, &cs); } MPASS(CPU_FFS(&cs)); return CPU_FFS(&cs) - 1; } #else static int find_close_core(int cpu, int core_offset __unused) { return cpu; } #endif static int get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) { switch (type) { case IFLIB_INTR_TX: /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ /* XXX handle multiple RX threads per core and more than two core per L2 group */ return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; case IFLIB_INTR_RX: case IFLIB_INTR_RXTX: /* RX queues get the specified core */ return qid / CPU_COUNT(&ctx->ifc_cpus); default: return -1; } } #else #define get_core_offset(ctx, type, qid) CPU_FIRST() #define find_close_core(cpuid, tid) CPU_FIRST() #define find_nth(ctx, gid) CPU_FIRST() #endif /* Just to avoid copy/paste */ static inline int iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name) { int cpuid; int err, tid; cpuid = find_nth(ctx, qid); tid = get_core_offset(ctx, type, qid); MPASS(tid >= 0); cpuid = find_close_core(cpuid, tid); err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); if (err) { device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); return (err); } #ifdef notyet if (cpuid > ctx->ifc_cpuid_highest) ctx->ifc_cpuid_highest = cpuid; #endif return 0; } int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, const char *name) { struct grouptask *gtask; struct taskqgroup *tqg; iflib_filter_info_t info; gtask_fn_t *fn; int tqrid, err; driver_filter_t *intr_fast; void *q; info = &ctx->ifc_filter_info; tqrid = rid; switch (type) { /* XXX merge tx/rx for netmap? */ case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; info = &ctx->ifc_txqs[qid].ift_filter_info; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; tqrid = -1; info = &ctx->ifc_filter_info; gtask = &ctx->ifc_admin_task; tqg = qgroup_if_config_tqg; fn = _task_fn_admin; intr_fast = iflib_fast_intr_ctx; break; default: panic("unknown net intr type"); } info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = q; err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); if (err != 0) { device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); return (err); } if (type == IFLIB_INTR_ADMIN) return (0); if (tqrid != -1) { err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); if (err) return (err); } else { taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); } return (0); } void iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) { struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; int irq_num = -1; int err; switch (type) { case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; if (irq != NULL) irq_num = rman_get_start(irq->ii_res); break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; if (irq != NULL) irq_num = rman_get_start(irq->ii_res); break; case IFLIB_INTR_IOV: q = ctx; gtask = &ctx->ifc_vflr_task; tqg = qgroup_if_config_tqg; fn = _task_fn_iov; break; default: panic("unknown net intr type"); } GROUPTASK_INIT(gtask, 0, fn, q); if (irq_num != -1) { err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); if (err) taskqgroup_attach(tqg, gtask, q, irq_num, name); } else { taskqgroup_attach(tqg, gtask, q, irq_num, name); } } void iflib_irq_free(if_ctx_t ctx, if_irq_t irq) { if (irq->ii_tag) bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); if (irq->ii_res) bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); } static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_irq_t irq = &ctx->ifc_legacy_irq; iflib_filter_info_t info; struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; int tqrid; void *q; int err; q = &ctx->ifc_rxqs[0]; info = &rxq[0].ifr_filter_info; gtask = &rxq[0].ifr_task; tqg = qgroup_if_io_tqg; tqrid = irq->ii_rid = *rid; fn = _task_fn_rx; ctx->ifc_flags |= IFC_LEGACY; info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = ctx; /* We allocate a single interrupt resource */ if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) return (err); GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); return (0); } void iflib_led_create(if_ctx_t ctx) { ctx->ifc_led_dev = led_create(iflib_led_func, ctx, device_get_nameunit(ctx->ifc_dev)); } void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) { GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) { GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); } void iflib_admin_intr_deferred(if_ctx_t ctx) { #ifdef INVARIANTS struct grouptask *gtask; gtask = &ctx->ifc_admin_task; MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); #endif GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); } void iflib_iov_intr_deferred(if_ctx_t ctx) { GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); } void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) { taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); } void iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, const char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); } void iflib_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_if_config_tqg, gtask); } void iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq = ctx->ifc_txqs; if_setbaudrate(ifp, baudrate); if (baudrate >= IF_Gbps(10)) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_PREFETCH; STATE_UNLOCK(ctx); } /* If link down, disable watchdog */ if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) txq->ift_qstatus = IFLIB_QUEUE_IDLE; } ctx->ifc_link_state = link_state; if_link_state_change(ifp, link_state); } static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) { int credits; #ifdef INVARIANTS int credits_pre = txq->ift_cidx_processed; #endif if (ctx->isc_txd_credits_update == NULL) return (0); bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) return (0); txq->ift_processed += credits; txq->ift_cidx_processed += credits; MPASS(credits_pre + credits == txq->ift_cidx_processed); if (txq->ift_cidx_processed >= txq->ift_size) txq->ift_cidx_processed -= txq->ift_size; return (credits); } static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) { return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, budget)); } void iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, const char *description, if_int_delay_info_t info, int offset, int value) { info->iidi_ctx = ctx; info->iidi_offset = offset; info->iidi_value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, iflib_sysctl_int_delay, "I", description); } struct sx * iflib_ctx_lock_get(if_ctx_t ctx) { return (&ctx->ifc_ctx_sx); } static int iflib_msix_init(if_ctx_t ctx) { device_t dev = ctx->ifc_dev; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; int iflib_num_tx_queues, iflib_num_rx_queues; int err, admincnt, bar; iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); bar = ctx->ifc_softc_ctx.isc_msix_bar; admincnt = sctx->isc_admin_intrcnt; /* Override by tuneable */ if (scctx->isc_disable_msix) goto msi; /* * bar == -1 => "trust me I know what I'm doing" * Some drivers are for hardware that is so shoddily * documented that no one knows which bars are which * so the developer has to map all bars. This hack * allows shoddy garbage to use msix in this framework. */ if (bar != -1) { ctx->ifc_msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (ctx->ifc_msix_mem == NULL) { /* May not be enabled */ device_printf(dev, "Unable to map MSIX table \n"); goto msi; } } /* First try MSI/X */ if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ device_printf(dev, "System has MSIX disabled \n"); bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; goto msi; } #if IFLIB_DEBUG /* use only 1 qset in debug mode */ queuemsgs = min(msgs - admincnt, 1); #else queuemsgs = msgs - admincnt; #endif #ifdef RSS queues = imin(queuemsgs, rss_getnumbuckets()); #else queues = queuemsgs; #endif queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) rx_queues = iflib_num_rx_queues; else rx_queues = queues; if (rx_queues > scctx->isc_nrxqsets) rx_queues = scctx->isc_nrxqsets; /* * We want this to be all logical CPUs by default */ if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) tx_queues = iflib_num_tx_queues; else tx_queues = mp_ncpus; if (tx_queues > scctx->isc_ntxqsets) tx_queues = scctx->isc_ntxqsets; if (ctx->ifc_sysctl_qs_eq_override == 0) { #ifdef INVARIANTS if (tx_queues != rx_queues) device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", min(rx_queues, tx_queues), min(rx_queues, tx_queues)); #endif tx_queues = min(rx_queues, tx_queues); rx_queues = min(rx_queues, tx_queues); } device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); vectors = rx_queues + admincnt; if ((err = pci_alloc_msix(dev, &vectors)) == 0) { device_printf(dev, "Using MSIX interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = rx_queues; scctx->isc_ntxqsets = tx_queues; scctx->isc_intr = IFLIB_INTR_MSIX; return (vectors); } else { device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } msi: vectors = pci_msi_count(dev); scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; scctx->isc_vectors = vectors; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { device_printf(dev,"Using an MSI interrupt\n"); scctx->isc_intr = IFLIB_INTR_MSI; } else { scctx->isc_vectors = 1; device_printf(dev,"Using a Legacy interrupt\n"); scctx->isc_intr = IFLIB_INTR_LEGACY; } return (vectors); } static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; static int mp_ring_state_handler(SYSCTL_HANDLER_ARGS) { int rc; uint16_t *state = ((uint16_t *)oidp->oid_arg1); struct sbuf *sb; const char *ring_state = "UNKNOWN"; /* XXX needed ? */ rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); if (state[3] <= 3) ring_state = ring_states[state[3]]; sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", state[0], state[1], state[2], ring_state); rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } enum iflib_ndesc_handler { IFLIB_NTXD_HANDLER, IFLIB_NRXD_HANDLER, }; static int mp_ndesc_handler(SYSCTL_HANDLER_ARGS) { if_ctx_t ctx = (void *)arg1; enum iflib_ndesc_handler type = arg2; char buf[256] = {0}; qidx_t *ndesc; char *p, *next; int nqs, rc, i; MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); nqs = 8; switch(type) { case IFLIB_NTXD_HANDLER: ndesc = ctx->ifc_sysctl_ntxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_ntxqs; break; case IFLIB_NRXD_HANDLER: ndesc = ctx->ifc_sysctl_nrxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_nrxqs; break; default: panic("unhandled type"); } if (nqs == 0) nqs = 8; for (i=0; i<8; i++) { if (i >= nqs) break; if (i) strcat(buf, ","); sprintf(strchr(buf, 0), "%d", ndesc[i]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; i++, p = strsep(&next, " ,")) { ndesc[i] = strtoul(p, NULL, 10); } return(rc); } #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child, *oid_list; struct sysctl_ctx_list *ctx_list; struct sysctl_oid *node; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", CTLFLAG_RD, NULL, "IFLIB fields"); oid_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, "driver version"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, "# of txqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, "# of rxqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, "permit #txq != #rxq"); SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, "disable MSIX (default 0)"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the rx budget"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, "cause tx to abdicate instead of running to completion"); /* XXX change for per-queue sizes */ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", "list of # of tx descriptors to use, 0 = use default #"); SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", "list of # of rx descriptors to use, 0 = use default #"); } static void iflib_add_device_sysctl_post(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; char namebuf[NAME_BUFLEN]; char *qfmt; struct sysctl_oid *queue_node, *fl_node, *node; struct sysctl_oid_list *queue_list, *fl_list; ctx_list = device_get_sysctl_ctx(dev); node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) qfmt = "txq%02d"; else qfmt = "txq%d"; for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); #endif SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", CTLFLAG_RD, &txq->ift_mbuf_defrag, "# of times m_defrag was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", CTLFLAG_RD, &txq->ift_pullups, "# of times m_pullup was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, "# of times no descriptors were available"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, "# of times dma map failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", CTLFLAG_RD, &txq->ift_processed, "descriptors procesed for clean"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, mp_ring_state_handler, "A", "soft ring state"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->ift_br->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->ift_br->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications, "# of consumer abdications in the mp_ring for this queue"); } if (scctx->isc_nrxqsets > 100) qfmt = "rxq%03d"; else if (scctx->isc_nrxqsets > 10) qfmt = "rxq%02d"; else qfmt = "rxq%d"; for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", CTLFLAG_RD, &rxq->ifr_cq_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, "Consumer Index"); } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "freelist Name"); fl_list = SYSCTL_CHILDREN(fl_node); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", CTLFLAG_RD, &fl->ifl_credits, 1, "credits available"); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, "mbufs allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, "mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, "clusters allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, "clusters freed"); #endif } } } void iflib_request_reset(if_ctx_t ctx) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; STATE_UNLOCK(ctx); } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m) { struct mbuf *n; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; n = m; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; } return (n); } #endif #ifdef NETDUMP static void iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { if_ctx_t ctx; ctx = if_getsoftc(ifp); CTX_LOCK(ctx); *nrxr = NRXQSETS(ctx); *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; CTX_UNLOCK(ctx); } static void iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event) { if_ctx_t ctx; if_softc_ctx_t scctx; iflib_fl_t fl; iflib_rxq_t rxq; int i, j; ctx = if_getsoftc(ifp); scctx = &ctx->ifc_softc_ctx; switch (event) { case NETDUMP_START: for (i = 0; i < scctx->isc_nrxqsets; i++) { rxq = &ctx->ifc_rxqs[i]; for (j = 0; j < rxq->ifr_nfl; j++) { fl = rxq->ifr_fl; fl->ifl_zone = m_getzone(fl->ifl_buf_size); } } iflib_no_tx_batch = 1; break; default: break; } } static int iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m) { if_ctx_t ctx; iflib_txq_t txq; int error; ctx = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &ctx->ifc_txqs[0]; error = iflib_encap(txq, &m); if (error == 0) (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); return (error); } static int iflib_netdump_poll(struct ifnet *ifp, int count) { if_ctx_t ctx; if_softc_ctx_t scctx; iflib_txq_t txq; int i; ctx = if_getsoftc(ifp); scctx = &ctx->ifc_softc_ctx; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &ctx->ifc_txqs[0]; (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); for (i = 0; i < scctx->isc_nrxqsets; i++) (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); return (0); } #endif /* NETDUMP */ Index: head/sys/net/iflib.h =================================================================== --- head/sys/net/iflib.h (revision 343290) +++ head/sys/net/iflib.h (revision 343291) @@ -1,451 +1,455 @@ /*- * Copyright (c) 2014-2017, Matthew Macy (mmacy@mattmacy.io) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __IFLIB_H_ #define __IFLIB_H_ #include #include #include #include #include #include struct if_clone; /* * The value type for indexing, limits max descriptors * to 65535 can be conditionally redefined to uint32_t * in the future if the need arises. */ typedef uint16_t qidx_t; #define QIDX_INVALID 0xFFFF struct iflib_ctx; typedef struct iflib_ctx *if_ctx_t; struct if_shared_ctx; typedef struct if_shared_ctx *if_shared_ctx_t; struct if_int_delay_info; typedef struct if_int_delay_info *if_int_delay_info_t; struct if_pseudo; typedef struct if_pseudo *if_pseudo_t; /* * File organization: * - public structures * - iflib accessors * - iflib utility functions * - iflib core functions */ typedef struct if_rxd_frag { uint8_t irf_flid; qidx_t irf_idx; uint16_t irf_len; } *if_rxd_frag_t; +/* bnxt supports 64 with hardware LRO enabled */ +#define IFLIB_MAX_RX_SEGS 64 + typedef struct if_rxd_info { /* set by iflib */ uint16_t iri_qsidx; /* qset index */ uint16_t iri_vtag; /* vlan tag - if flag set */ /* XXX redundant with the new irf_len field */ uint16_t iri_len; /* packet length */ qidx_t iri_cidx; /* consumer index of cq */ struct ifnet *iri_ifp; /* some drivers >1 interface per softc */ /* updated by driver */ if_rxd_frag_t iri_frags; uint32_t iri_flowid; /* RSS hash for packet */ uint32_t iri_csum_flags; /* m_pkthdr csum flags */ uint32_t iri_csum_data; /* m_pkthdr csum data */ uint8_t iri_flags; /* mbuf flags for packet */ uint8_t iri_nfrags; /* number of fragments in packet */ uint8_t iri_rsstype; /* RSS hash type */ uint8_t iri_pad; /* any padding in the received data */ } *if_rxd_info_t; typedef struct if_rxd_update { uint64_t *iru_paddrs; caddr_t *iru_vaddrs; qidx_t *iru_idxs; qidx_t iru_pidx; uint16_t iru_qsidx; uint16_t iru_count; uint16_t iru_buf_size; uint8_t iru_flidx; } *if_rxd_update_t; #define IPI_TX_INTR 0x1 /* send an interrupt when this packet is sent */ #define IPI_TX_IPV4 0x2 /* ethertype IPv4 */ #define IPI_TX_IPV6 0x4 /* ethertype IPv6 */ typedef struct if_pkt_info { bus_dma_segment_t *ipi_segs; /* physical addresses */ uint32_t ipi_len; /* packet length */ uint16_t ipi_qsidx; /* queue set index */ qidx_t ipi_nsegs; /* number of segments */ qidx_t ipi_ndescs; /* number of descriptors used by encap */ uint16_t ipi_flags; /* iflib per-packet flags */ qidx_t ipi_pidx; /* start pidx for encap */ qidx_t ipi_new_pidx; /* next available pidx post-encap */ /* offload handling */ uint8_t ipi_ehdrlen; /* ether header length */ uint8_t ipi_ip_hlen; /* ip header length */ uint8_t ipi_tcp_hlen; /* tcp header length */ uint8_t ipi_ipproto; /* ip protocol */ uint32_t ipi_csum_flags; /* packet checksum flags */ uint16_t ipi_tso_segsz; /* tso segment size */ uint16_t ipi_vtag; /* VLAN tag */ uint16_t ipi_etype; /* ether header type */ uint8_t ipi_tcp_hflags; /* tcp header flags */ uint8_t ipi_mflags; /* packet mbuf flags */ uint32_t ipi_tcp_seq; /* tcp seqno */ uint32_t ipi_tcp_sum; /* tcp csum */ } *if_pkt_info_t; typedef struct if_irq { struct resource *ii_res; int ii_rid; void *ii_tag; } *if_irq_t; struct if_int_delay_info { if_ctx_t iidi_ctx; /* Back-pointer to the iflib ctx (softc) */ int iidi_offset; /* Register offset to read/write */ int iidi_value; /* Current value in usecs */ struct sysctl_oid *iidi_oidp; struct sysctl_req *iidi_req; }; typedef enum { IFLIB_INTR_LEGACY, IFLIB_INTR_MSI, IFLIB_INTR_MSIX } iflib_intr_mode_t; /* * This really belongs in pciio.h or some place more general * but this is the only consumer for now. */ typedef struct pci_vendor_info { uint32_t pvi_vendor_id; uint32_t pvi_device_id; uint32_t pvi_subvendor_id; uint32_t pvi_subdevice_id; uint32_t pvi_rev_id; uint32_t pvi_class_mask; caddr_t pvi_name; } pci_vendor_info_t; #define PVID(vendor, devid, name) {vendor, devid, 0, 0, 0, 0, name} #define PVID_OEM(vendor, devid, svid, sdevid, revid, name) {vendor, devid, svid, sdevid, revid, 0, name} #define PVID_END {0, 0, 0, 0, 0, 0, NULL} #define IFLIB_PNP_DESCR "U32:vendor;U32:device;U32:subvendor;U32:subdevice;" \ "U32:revision;U32:class;D:#" #define IFLIB_PNP_INFO(b, u, t) \ MODULE_PNP_INFO(IFLIB_PNP_DESCR, b, u, t, nitems(t) - 1) typedef struct if_txrx { int (*ift_txd_encap) (void *, if_pkt_info_t); void (*ift_txd_flush) (void *, uint16_t, qidx_t pidx); int (*ift_txd_credits_update) (void *, uint16_t qsidx, bool clear); int (*ift_rxd_available) (void *, uint16_t qsidx, qidx_t pidx, qidx_t budget); int (*ift_rxd_pkt_get) (void *, if_rxd_info_t ri); void (*ift_rxd_refill) (void * , if_rxd_update_t iru); void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, qidx_t pidx); int (*ift_legacy_intr) (void *); } *if_txrx_t; typedef struct if_softc_ctx { int isc_vectors; int isc_nrxqsets; int isc_ntxqsets; uint8_t isc_min_tx_latency; /* disable doorbell update batching */ uint8_t isc_rx_mvec_enable; /* generate mvecs on rx */ uint32_t isc_txrx_budget_bytes_max; int isc_msix_bar; /* can be model specific - initialize in attach_pre */ int isc_tx_nsegments; /* can be model specific - initialize in attach_pre */ int isc_ntxd[8]; int isc_nrxd[8]; uint32_t isc_txqsizes[8]; uint32_t isc_rxqsizes[8]; /* is there such thing as a descriptor that is more than 248 bytes ? */ uint8_t isc_txd_size[8]; uint8_t isc_rxd_size[8]; int isc_tx_tso_segments_max; int isc_tx_tso_size_max; int isc_tx_tso_segsize_max; int isc_tx_csum_flags; int isc_capabilities; int isc_capenable; int isc_rss_table_size; int isc_rss_table_mask; int isc_nrxqsets_max; int isc_ntxqsets_max; uint32_t isc_tx_qdepth; iflib_intr_mode_t isc_intr; uint16_t isc_max_frame_size; /* set at init time by driver */ uint16_t isc_min_frame_size; /* set at init time by driver, only used if IFLIB_NEED_ETHER_PAD is set. */ uint32_t isc_pause_frames; /* set by driver for iflib_timer to detect */ pci_vendor_info_t isc_vendor_info; /* set by iflib prior to attach_pre */ int isc_disable_msix; if_txrx_t isc_txrx; } *if_softc_ctx_t; /* * Initialization values for device */ struct if_shared_ctx { unsigned isc_magic; driver_t *isc_driver; bus_size_t isc_q_align; bus_size_t isc_tx_maxsize; bus_size_t isc_tx_maxsegsize; bus_size_t isc_tso_maxsize; bus_size_t isc_tso_maxsegsize; bus_size_t isc_rx_maxsize; bus_size_t isc_rx_maxsegsize; int isc_rx_nsegments; int isc_admin_intrcnt; /* # of admin/link interrupts */ /* fields necessary for probe */ pci_vendor_info_t *isc_vendor_info; char *isc_driver_version; /* optional function to transform the read values to match the table*/ void (*isc_parse_devinfo) (uint16_t *device_id, uint16_t *subvendor_id, uint16_t *subdevice_id, uint16_t *rev_id); int isc_nrxd_min[8]; int isc_nrxd_default[8]; int isc_nrxd_max[8]; int isc_ntxd_min[8]; int isc_ntxd_default[8]; int isc_ntxd_max[8]; /* actively used during operation */ int isc_nfl __aligned(CACHE_LINE_SIZE); int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */ int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */ int isc_rx_process_limit; int isc_tx_reclaim_thresh; int isc_flags; const char *isc_name; }; typedef struct iflib_dma_info { bus_addr_t idi_paddr; caddr_t idi_vaddr; bus_dma_tag_t idi_tag; bus_dmamap_t idi_map; uint32_t idi_size; } *iflib_dma_info_t; #define IFLIB_MAGIC 0xCAFEF00D typedef enum { IFLIB_INTR_RX, IFLIB_INTR_TX, IFLIB_INTR_RXTX, IFLIB_INTR_ADMIN, IFLIB_INTR_IOV, } iflib_intr_type_t; #ifndef ETH_ADDR_LEN #define ETH_ADDR_LEN 6 #endif /* * Interface has a separate command queue for RX */ #define IFLIB_HAS_RXCQ 0x01 /* * Driver has already allocated vectors */ #define IFLIB_SKIP_MSIX 0x02 /* * Interface is a virtual function */ #define IFLIB_IS_VF 0x04 /* * Interface has a separate command queue for TX */ #define IFLIB_HAS_TXCQ 0x08 /* * Interface does checksum in place */ #define IFLIB_NEED_SCRATCH 0x10 /* * Interface doesn't expect in_pseudo for th_sum */ #define IFLIB_TSO_INIT_IP 0x20 /* * Interface doesn't align IP header */ #define IFLIB_DO_RX_FIXUP 0x40 /* * Driver needs csum zeroed for offloading */ #define IFLIB_NEED_ZERO_CSUM 0x80 /* * Driver needs frames padded to some minimum length */ #define IFLIB_NEED_ETHER_PAD 0x100 /* * Packets can be freed immediately after encap */ #define IFLIB_TXD_ENCAP_PIO 0x00200 /* * Use RX completion handler */ #define IFLIB_RX_COMPLETION 0x00400 /* * Skip refilling cluster free lists */ #define IFLIB_SKIP_CLREFILL 0x00800 /* * Don't reset on hang */ #define IFLIB_NO_HANG_RESET 0x01000 /* * Don't need/want most of the niceties of * queue management */ #define IFLIB_PSEUDO 0x02000 /* * No DMA support needed / wanted */ #define IFLIB_VIRTUAL 0x04000 /* * autogenerate a MAC address */ #define IFLIB_GEN_MAC 0x08000 /* * Interface needs admin task to ignore interface up/down status */ #define IFLIB_ADMIN_ALWAYS_RUN 0x10000 /* * field accessors */ void *iflib_get_softc(if_ctx_t ctx); device_t iflib_get_dev(if_ctx_t ctx); if_t iflib_get_ifp(if_ctx_t ctx); struct ifmedia *iflib_get_media(if_ctx_t ctx); if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx); if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx); void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]); void iflib_request_reset(if_ctx_t ctx); uint8_t iflib_in_detach(if_ctx_t ctx); /* * If the driver can plug cleanly in to newbus use these */ int iflib_device_probe(device_t); int iflib_device_attach(device_t); int iflib_device_detach(device_t); int iflib_device_suspend(device_t); int iflib_device_resume(device_t); int iflib_device_shutdown(device_t); int iflib_device_iov_init(device_t, uint16_t, const nvlist_t *); void iflib_device_iov_uninit(device_t); int iflib_device_iov_add_vf(device_t, uint16_t, const nvlist_t *); /* * If the driver can't plug cleanly in to newbus * use these */ int iflib_device_register(device_t dev, void *softc, if_shared_ctx_t sctx, if_ctx_t *ctxp); int iflib_device_deregister(if_ctx_t); int iflib_irq_alloc(if_ctx_t, if_irq_t, int, driver_filter_t, void *filter_arg, driver_intr_t, void *arg, const char *name); int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, const char *name); void iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name); void iflib_irq_free(if_ctx_t ctx, if_irq_t irq); void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name); void iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, const char *name); void iflib_config_gtask_deinit(struct grouptask *gtask); void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid); void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid); void iflib_admin_intr_deferred(if_ctx_t ctx); void iflib_iov_intr_deferred(if_ctx_t ctx); void iflib_link_state_change(if_ctx_t ctx, int linkstate, uint64_t baudrate); int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags); +int iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags); void iflib_dma_free(iflib_dma_info_t dma); int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count); void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count); struct sx *iflib_ctx_lock_get(if_ctx_t); struct mtx *iflib_qset_lock_get(if_ctx_t, uint16_t); void iflib_led_create(if_ctx_t ctx); void iflib_add_int_delay_sysctl(if_ctx_t, const char *, const char *, if_int_delay_info_t, int, int); /* * Pseudo device support */ if_pseudo_t iflib_clone_register(if_shared_ctx_t); void iflib_clone_deregister(if_pseudo_t); #endif /* __IFLIB_H_ */