diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c index cea62517b2e6..d8995be7f1e9 100644 --- a/sys/dev/netmap/if_ptnet.c +++ b/sys/dev/netmap/if_ptnet.c @@ -1,1981 +1,1979 @@ /*- * Copyright (c) 2016, Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for ptnet paravirtualized network device. */ #include +#include "opt_inet.h" +#include "opt_inet6.h" -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include "opt_inet.h" -#include "opt_inet6.h" - #include #include #include #include #include #include #ifdef WITH_PTNETMAP #ifndef INET #error "INET not defined, cannot support offloadings" #endif static uint64_t ptnet_get_counter(if_t, ift_counter); //#define PTNETMAP_STATS //#define DEBUG #ifdef DEBUG #define DBG(x) x #else /* !DEBUG */ #define DBG(x) #endif /* !DEBUG */ extern int ptnet_vnet_hdr; /* Tunable parameter */ struct ptnet_softc; struct ptnet_queue_stats { uint64_t packets; /* if_[io]packets */ uint64_t bytes; /* if_[io]bytes */ uint64_t errors; /* if_[io]errors */ uint64_t iqdrops; /* if_iqdrops */ uint64_t mcasts; /* if_[io]mcasts */ #ifdef PTNETMAP_STATS uint64_t intrs; uint64_t kicks; #endif /* PTNETMAP_STATS */ }; struct ptnet_queue { struct ptnet_softc *sc; struct resource *irq; void *cookie; int kring_id; struct nm_csb_atok *atok; struct nm_csb_ktoa *ktoa; unsigned int kick; struct mtx lock; struct buf_ring *bufring; /* for TX queues */ struct ptnet_queue_stats stats; #ifdef PTNETMAP_STATS struct ptnet_queue_stats last_stats; #endif /* PTNETMAP_STATS */ struct taskqueue *taskq; struct task task; char lock_name[16]; }; #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) struct ptnet_softc { device_t dev; if_t ifp; struct ifmedia media; struct mtx lock; char lock_name[16]; char hwaddr[ETHER_ADDR_LEN]; /* Mirror of PTFEAT register. */ uint32_t ptfeatures; unsigned int vnet_hdr_len; /* PCI BARs support. */ struct resource *iomem; struct resource *msix_mem; unsigned int num_rings; unsigned int num_tx_rings; struct ptnet_queue *queues; struct ptnet_queue *rxqueues; struct nm_csb_atok *csb_gh; struct nm_csb_ktoa *csb_hg; unsigned int min_tx_space; struct netmap_pt_guest_adapter *ptna; struct callout tick; #ifdef PTNETMAP_STATS struct timeval last_ts; #endif /* PTNETMAP_STATS */ }; #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) static int ptnet_probe(device_t); static int ptnet_attach(device_t); static int ptnet_detach(device_t); static int ptnet_suspend(device_t); static int ptnet_resume(device_t); static int ptnet_shutdown(device_t); static void ptnet_init(void *opaque); static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); static int ptnet_init_locked(struct ptnet_softc *sc); static int ptnet_stop(struct ptnet_softc *sc); static int ptnet_transmit(if_t ifp, struct mbuf *m); static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_qflush(if_t ifp); static void ptnet_tx_task(void *context, int pending); static int ptnet_media_change(if_t ifp); static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); #ifdef PTNETMAP_STATS static void ptnet_tick(void *opaque); #endif static int ptnet_irqs_init(struct ptnet_softc *sc); static void ptnet_irqs_fini(struct ptnet_softc *sc); static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info); static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); static int ptnet_nm_register(struct netmap_adapter *na, int onoff); static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); static void ptnet_tx_intr(void *opaque); static void ptnet_rx_intr(void *opaque); static unsigned ptnet_rx_discard(struct netmap_kring *kring, unsigned int head); static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_rx_task(void *context, int pending); #ifdef DEVICE_POLLING static poll_handler_t ptnet_poll; #endif static device_method_t ptnet_methods[] = { DEVMETHOD(device_probe, ptnet_probe), DEVMETHOD(device_attach, ptnet_attach), DEVMETHOD(device_detach, ptnet_detach), DEVMETHOD(device_suspend, ptnet_suspend), DEVMETHOD(device_resume, ptnet_resume), DEVMETHOD(device_shutdown, ptnet_shutdown), DEVMETHOD_END }; static driver_t ptnet_driver = { "ptnet", ptnet_methods, sizeof(struct ptnet_softc) }; /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, NULL, NULL, SI_ORDER_MIDDLE + 2); static int ptnet_probe(device_t dev) { if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { return (ENXIO); } device_set_desc(dev, "ptnet network adapter"); return (BUS_PROBE_DEFAULT); } static inline void ptnet_kick(struct ptnet_queue *pq) { #ifdef PTNETMAP_STATS pq->stats.kicks ++; #endif /* PTNETMAP_STATS */ bus_write_4(pq->sc->iomem, pq->kick, 0); } #define PTNET_BUF_RING_SIZE 4096 #define PTNET_RX_BUDGET 512 #define PTNET_RX_BATCH 1 #define PTNET_TX_BUDGET 512 #define PTNET_TX_BATCH 64 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) #define PTNET_MAX_PKT_SIZE 65536 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ PTNET_CSUM_OFFLOAD_IPV6) static int ptnet_attach(device_t dev) { uint32_t ptfeatures = 0; unsigned int num_rx_rings, num_tx_rings; struct netmap_adapter na_arg; unsigned int nifp_offset; struct ptnet_softc *sc; if_t ifp; uint32_t macreg; int err, rid; int i; sc = device_get_softc(dev); sc->dev = dev; /* Setup PCI resources. */ pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->iomem == NULL) { device_printf(dev, "Failed to map I/O BAR\n"); return (ENXIO); } /* Negotiate features with the hypervisor. */ if (ptnet_vnet_hdr) { ptfeatures |= PTNETMAP_F_VNET_HDR; } bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ sc->ptfeatures = ptfeatures; num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); sc->num_rings = num_tx_rings + num_rx_rings; sc->num_tx_rings = num_tx_rings; if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { device_printf(dev, "CSB cannot handle that many rings (%u)\n", sc->num_rings); err = ENOMEM; goto err_path; } /* Allocate CSB and carry out CSB allocation protocol. */ sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (sc->csb_gh == NULL) { device_printf(dev, "Failed to allocate CSB\n"); err = ENOMEM; goto err_path; } sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); { /* * We use uint64_t rather than vm_paddr_t since we * need 64 bit addresses even on 32 bit platforms. */ uint64_t paddr = vtophys(sc->csb_gh); /* CSB allocation protocol: write to BAH first, then * to BAL (for both GH and HG sections). */ bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, paddr & 0xffffffff); paddr = vtophys(sc->csb_hg); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, paddr & 0xffffffff); } /* Allocate and initialize per-queue data structures. */ sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->queues == NULL) { err = ENOMEM; goto err_path; } sc->rxqueues = sc->queues + num_tx_rings; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->sc = sc; pq->kring_id = i; pq->kick = PTNET_IO_KICK_BASE + 4 * i; pq->atok = sc->csb_gh + i; pq->ktoa = sc->csb_hg + i; snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", device_get_nameunit(dev), i); mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); if (i >= num_tx_rings) { /* RX queue: fix kring_id. */ pq->kring_id -= num_tx_rings; } else { /* TX queue: allocate buf_ring. */ pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, M_DEVBUF, M_NOWAIT, &pq->lock); if (pq->bufring == NULL) { err = ENOMEM; goto err_path; } } } sc->min_tx_space = 64; /* Safe initial value. */ err = ptnet_irqs_init(sc); if (err) { goto err_path; } /* Setup Ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setbaudrate(ifp, IF_Gbps(10)); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); if_setinitfn(ifp, ptnet_init); if_setioctlfn(ifp, ptnet_ioctl); if_setget_counter(ifp, ptnet_get_counter); if_settransmitfn(ifp, ptnet_transmit); if_setqflushfn(ifp, ptnet_qflush); ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, ptnet_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); sc->hwaddr[0] = (macreg >> 8) & 0xff; sc->hwaddr[1] = macreg & 0xff; macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); sc->hwaddr[2] = (macreg >> 24) & 0xff; sc->hwaddr[3] = (macreg >> 16) & 0xff; sc->hwaddr[4] = (macreg >> 8) & 0xff; sc->hwaddr[5] = macreg & 0xff; ether_ifattach(ifp, sc->hwaddr); if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, 0); if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { /* Similarly to what the vtnet driver does, we can emulate * VLAN offloadings by inserting and removing the 802.1Q * header during transmit and receive. We are then able * to do checksum offloading of VLAN frames. */ if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWTAGGING, 0); } if_setcapenable(ifp, if_getcapabilities(ifp)); #ifdef DEVICE_POLLING /* Don't enable polling by default. */ if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); #endif snprintf(sc->lock_name, sizeof(sc->lock_name), "%s", device_get_nameunit(dev)); mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); callout_init_mtx(&sc->tick, &sc->lock, 0); /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); memset(&na_arg, 0, sizeof(na_arg)); na_arg.ifp = ifp; na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); na_arg.num_tx_rings = num_tx_rings; na_arg.num_rx_rings = num_rx_rings; na_arg.nm_config = ptnet_nm_config; na_arg.nm_krings_create = ptnet_nm_krings_create; na_arg.nm_krings_delete = ptnet_nm_krings_delete; na_arg.nm_dtor = ptnet_nm_dtor; na_arg.nm_intr = ptnet_nm_intr; na_arg.nm_register = ptnet_nm_register; na_arg.nm_txsync = ptnet_nm_txsync; na_arg.nm_rxsync = ptnet_nm_rxsync; netmap_pt_guest_attach(&na_arg, nifp_offset, bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); /* Now a netmap adapter for this ifp has been allocated, and it * can be accessed through NA(ifp). We also have to initialize the CSB * pointer. */ sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); /* If virtio-net header was negotiated, set the virt_hdr_len field in * the netmap adapter, to inform users that this netmap adapter requires * the application to deal with the headers. */ ptnet_update_vnet_hdr(sc); device_printf(dev, "%s() completed\n", __func__); return (0); err_path: ptnet_detach(dev); return err; } /* Stop host sync-kloop if it was running. */ static void ptnet_device_shutdown(struct ptnet_softc *sc) { ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); } static int ptnet_detach(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); int i; ptnet_device_shutdown(sc); #ifdef DEVICE_POLLING if (if_getcapenable(sc->ifp) & IFCAP_POLLING) { ether_poll_deregister(sc->ifp); } #endif callout_drain(&sc->tick); if (sc->queues) { /* Drain taskqueues before calling if_detach. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } } if (sc->ifp) { ether_ifdetach(sc->ifp); /* Uninitialize netmap adapters for this device. */ netmap_detach(sc->ifp); ifmedia_removeall(&sc->media); if_free(sc->ifp); sc->ifp = NULL; } ptnet_irqs_fini(sc); if (sc->csb_gh) { contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); sc->csb_gh = NULL; sc->csb_hg = NULL; } if (sc->queues) { for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (mtx_initialized(&pq->lock)) { mtx_destroy(&pq->lock); } if (pq->bufring != NULL) { buf_ring_free(pq->bufring, M_DEVBUF); } } free(sc->queues, M_DEVBUF); sc->queues = NULL; } if (sc->iomem) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); sc->iomem = NULL; } mtx_destroy(&sc->lock); device_printf(dev, "%s() completed\n", __func__); return (0); } static int ptnet_suspend(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_resume(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_shutdown(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); ptnet_device_shutdown(sc); return (0); } static int ptnet_irqs_init(struct ptnet_softc *sc) { int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); int nvecs = sc->num_rings; device_t dev = sc->dev; int err = ENOSPC; int cpu_cur; int i; if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { device_printf(dev, "Could not find MSI-X capability\n"); return (ENXIO); } sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_mem == NULL) { device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); return (ENXIO); } if (pci_msix_count(dev) < nvecs) { device_printf(dev, "Not enough MSI-X vectors\n"); goto err_path; } err = pci_alloc_msix(dev, &nvecs); if (err) { device_printf(dev, "Failed to allocate MSI-X vectors\n"); goto err_path; } for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; rid = i + 1; pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (pq->irq == NULL) { device_printf(dev, "Failed to allocate interrupt " "for queue #%d\n", i); err = ENOSPC; goto err_path; } } cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; void (*handler)(void *) = ptnet_tx_intr; if (i >= sc->num_tx_rings) { handler = ptnet_rx_intr; } err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL /* intr_filter */, handler, pq, &pq->cookie); if (err) { device_printf(dev, "Failed to register intr handler " "for queue #%d\n", i); goto err_path; } bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); #if 0 bus_bind_intr(sc->dev, pq->irq, cpu_cur); #endif cpu_cur = CPU_NEXT(cpu_cur); } device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; if (i < sc->num_tx_rings) TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); else NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", device_get_nameunit(sc->dev), cpu_cur); cpu_cur = CPU_NEXT(cpu_cur); } return 0; err_path: ptnet_irqs_fini(sc); return err; } static void ptnet_irqs_fini(struct ptnet_softc *sc) { device_t dev = sc->dev; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_free(pq->taskq); pq->taskq = NULL; } if (pq->cookie) { bus_teardown_intr(dev, pq->irq, pq->cookie); pq->cookie = NULL; } if (pq->irq) { bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); pq->irq = NULL; } } if (sc->msix_mem) { pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), sc->msix_mem); sc->msix_mem = NULL; } } static void ptnet_init(void *opaque) { struct ptnet_softc *sc = opaque; PTNET_CORE_LOCK(sc); ptnet_init_locked(sc); PTNET_CORE_UNLOCK(sc); } static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct ptnet_softc *sc = if_getsoftc(ifp); device_t dev = sc->dev; struct ifreq *ifr = (struct ifreq *)data; int mask __unused, err = 0; switch (cmd) { case SIOCSIFFLAGS: device_printf(dev, "SIOCSIFFLAGS %x\n", if_getflags(ifp)); PTNET_CORE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { /* Network stack wants the iff to be up. */ err = ptnet_init_locked(sc); } else { /* Network stack wants the iff to be down. */ err = ptnet_stop(sc); } /* We don't need to do nothing to support IFF_PROMISC, * since that is managed by the backend port. */ PTNET_CORE_UNLOCK(sc); break; case SIOCSIFCAP: device_printf(dev, "SIOCSIFCAP %x %x\n", ifr->ifr_reqcap, if_getcapenable(ifp)); mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { struct ptnet_queue *pq; int i; if (ifr->ifr_reqcap & IFCAP_POLLING) { err = ether_poll_register(ptnet_poll, ifp); if (err) { break; } /* Stop queues and sync with taskqueues. */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; /* Make sure the worker sees the * IFF_DRV_RUNNING down. */ PTNET_Q_LOCK(pq); pq->atok->appl_need_kick = 0; PTNET_Q_UNLOCK(pq); /* Wait for rescheduling to finish. */ if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); } else { err = ether_poll_deregister(ifp); for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; PTNET_Q_LOCK(pq); pq->atok->appl_need_kick = 1; PTNET_Q_UNLOCK(pq); } } } #endif /* DEVICE_POLLING */ if_setcapenable(ifp, ifr->ifr_reqcap); break; case SIOCSIFMTU: /* We support any reasonable MTU. */ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { err = EINVAL; } else { PTNET_CORE_LOCK(sc); if_setmtu(ifp, ifr->ifr_mtu); PTNET_CORE_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return err; } static int ptnet_init_locked(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; unsigned int nm_buf_size; int ret; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { return 0; /* nothing to do */ } device_printf(sc->dev, "%s\n", __func__); /* Translate offload capabilities according to if_capenable. */ if_sethwassist(ifp, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD_IPV6, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); /* * Prepare the interface for netmap mode access. */ netmap_update_config(na_dr); ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_finalize() failed\n"); return ret; } if (sc->ptna->backend_users == 0) { ret = ptnet_nm_krings_create(na_nm); if (ret) { device_printf(sc->dev, "ptnet_nm_krings_create() " "failed\n"); goto err_mem_finalize; } ret = netmap_mem_rings_create(na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_rings_create() " "failed\n"); goto err_rings_create; } ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); if (ret) { device_printf(sc->dev, "netmap_mem_get_lut() " "failed\n"); goto err_get_lut; } } ret = ptnet_nm_register(na_dr, 1 /* on */); if (ret) { goto err_register; } nm_buf_size = NETMAP_BUF_SIZE(na_dr); KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, sc->min_tx_space); #ifdef PTNETMAP_STATS callout_reset(&sc->tick, hz, ptnet_tick, sc); #endif if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); return 0; err_register: memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); err_get_lut: netmap_mem_rings_delete(na_dr); err_rings_create: ptnet_nm_krings_delete(na_nm); err_mem_finalize: netmap_mem_deref(na_dr->nm_mem, na_dr); return ret; } /* To be called under core lock. */ static int ptnet_stop(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; int i; device_printf(sc->dev, "%s\n", __func__); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { return 0; /* nothing to do */ } /* Clear the driver-ready flag, and synchronize with all the queues, * so that after this loop we are sure nobody is working anymore with * the device. This scheme is taken from the vtnet driver. */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); callout_stop(&sc->tick); for (i = 0; i < sc->num_rings; i++) { PTNET_Q_LOCK(sc->queues + i); PTNET_Q_UNLOCK(sc->queues + i); } ptnet_nm_register(na_dr, 0 /* off */); if (sc->ptna->backend_users == 0) { netmap_mem_rings_delete(na_dr); ptnet_nm_krings_delete(na_nm); } netmap_mem_deref(na_dr->nm_mem, na_dr); return 0; } static void ptnet_qflush(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); int i; /* Flush all the bufrings and do the interface flush. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct mbuf *m; PTNET_Q_LOCK(pq); if (pq->bufring) { while ((m = buf_ring_dequeue_sc(pq->bufring))) { m_freem(m); } } PTNET_Q_UNLOCK(pq); } if_qflush(ifp); } static int ptnet_media_change(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ifmedia *ifm = &sc->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return EINVAL; } return 0; } static uint64_t ptnet_get_counter(if_t ifp, ift_counter cnt) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue_stats stats[2]; int i; /* Accumulate statistics over the queues. */ memset(stats, 0, sizeof(stats)); for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; int idx = (i < sc->num_tx_rings) ? 0 : 1; stats[idx].packets += pq->stats.packets; stats[idx].bytes += pq->stats.bytes; stats[idx].errors += pq->stats.errors; stats[idx].iqdrops += pq->stats.iqdrops; stats[idx].mcasts += pq->stats.mcasts; } switch (cnt) { case IFCOUNTER_IPACKETS: return (stats[1].packets); case IFCOUNTER_IQDROPS: return (stats[1].iqdrops); case IFCOUNTER_IERRORS: return (stats[1].errors); case IFCOUNTER_OPACKETS: return (stats[0].packets); case IFCOUNTER_OBYTES: return (stats[0].bytes); case IFCOUNTER_OMCASTS: return (stats[0].mcasts); default: return (if_get_counter_default(ifp, cnt)); } } #ifdef PTNETMAP_STATS /* Called under core lock. */ static void ptnet_tick(void *opaque) { struct ptnet_softc *sc = opaque; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct ptnet_queue_stats cur = pq->stats; struct timeval now; unsigned int delta; microtime(&now); delta = now.tv_usec - sc->last_ts.tv_usec + (now.tv_sec - sc->last_ts.tv_sec) * 1000000; delta /= 1000; /* in milliseconds */ if (delta == 0) continue; device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " "intr %lu\n", i, delta, (cur.packets - pq->last_stats.packets), (cur.kicks - pq->last_stats.kicks), (cur.intrs - pq->last_stats.intrs)); pq->last_stats = cur; } microtime(&sc->last_ts); callout_schedule(&sc->tick, hz); } #endif /* PTNETMAP_STATS */ static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) { /* We are always active, as the backend netmap port is * always open in netmap mode. */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; } static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) { /* * Write a command and read back error status, * with zero meaning success. */ bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); return bus_read_4(sc->iomem, PTNET_IO_PTCTL); } static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) { struct ptnet_softc *sc = if_getsoftc(na->ifp); info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", info->num_tx_rings, info->num_rx_rings, info->num_tx_descs, info->num_rx_descs, info->rx_buf_maxsize); return 0; } static void ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) { int i; /* Sync krings from the host, reading from * CSB. */ for (i = 0; i < sc->num_rings; i++) { struct nm_csb_atok *atok = sc->queues[i].atok; struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; struct netmap_kring *kring; if (i < na->num_tx_rings) { kring = na->tx_rings[i]; } else { kring = na->rx_rings[i - na->num_tx_rings]; } kring->rhead = kring->ring->head = atok->head; kring->rcur = kring->ring->cur = atok->cur; kring->nr_hwcur = ktoa->hwcur; kring->nr_hwtail = kring->rtail = kring->ring->tail = ktoa->hwtail; nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, ktoa->hwcur, atok->head, atok->cur, ktoa->hwtail); nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", t, i, kring->nr_hwcur, kring->rhead, kring->rcur, kring->ring->head, kring->ring->cur, kring->nr_hwtail, kring->rtail, kring->ring->tail); } } static void ptnet_update_vnet_hdr(struct ptnet_softc *sc) { unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; } static int ptnet_nm_register(struct netmap_adapter *na, int onoff) { /* device-specific */ if_t ifp = na->ifp; struct ptnet_softc *sc = if_getsoftc(ifp); int native = (na == &sc->ptna->hwup.up); struct ptnet_queue *pq; int ret = 0; int i; if (!onoff) { sc->ptna->backend_users--; } /* If this is the last netmap client, guest interrupt enable flags may * be in arbitrary state. Since these flags are going to be used also * by the netdevice driver, we have to make sure to start with * notifications enabled. Also, schedule NAPI to flush pending packets * in the RX rings, since we will not receive further interrupts * until these will be processed. */ if (native && !onoff && na->active_fds == 0) { nm_prinf("Exit netmap mode, re-enable interrupts"); for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->atok->appl_need_kick = 1; } } if (onoff) { if (sc->ptna->backend_users == 0) { /* Initialize notification enable fields in the CSB. */ for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->ktoa->kern_need_kick = 1; pq->atok->appl_need_kick = (!(if_getcapenable(ifp) & IFCAP_POLLING) && i >= sc->num_tx_rings); } /* Set the virtio-net header length. */ ptnet_update_vnet_hdr(sc); /* Make sure the host adapter passed through is ready * for txsync/rxsync. */ ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); if (ret) { return ret; } /* Align the guest krings and rings to the state stored * in the CSB. */ ptnet_sync_from_csb(sc, na); } /* If not native, don't call nm_set_native_flags, since we don't want * to replace if_transmit method, nor set NAF_NETMAP_ON */ if (native) { netmap_krings_mode_commit(na, onoff); nm_set_native_flags(na); } } else { if (native) { nm_clear_native_flags(na); netmap_krings_mode_commit(na, onoff); } if (sc->ptna->backend_users == 0) { ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); } } if (onoff) { sc->ptna->backend_users++; } return ret; } static int ptnet_nm_txsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->queues + kring->ring_id; bool notify; notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; bool notify; notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static void ptnet_nm_intr(struct netmap_adapter *na, int onoff) { struct ptnet_softc *sc = if_getsoftc(na->ifp); int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->atok->appl_need_kick = onoff; } } static void ptnet_tx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { return; } /* Schedule the tasqueue to flush process transmissions requests. * However, vtnet, if_em and if_igb just call ptnet_transmit() here, * at least when using MSI-X interrupts. The if_em driver, instead * schedule taskqueue when using legacy interrupts. */ taskqueue_enqueue(pq->taskq, &pq->task); } static void ptnet_rx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; unsigned int unused; DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { return; } /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, * receive-side processing is executed directly in the interrupt * service routine. Alternatively, we may schedule the taskqueue. */ ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static void ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, unsigned int head, unsigned int sync_flags) { struct netmap_ring *ring = kring->ring; struct nm_csb_atok *atok = pq->atok; struct nm_csb_ktoa *ktoa = pq->ktoa; /* Some packets have been pushed to the netmap ring. We have * to tell the host to process the new packets, updating cur * and head in the CSB. */ ring->head = ring->cur = head; /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ kring->rcur = kring->rhead = head; nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); /* Kick the host if needed. */ if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { atok->sync_flags = sync_flags; ptnet_kick(pq); } } #define PTNET_TX_NOSPACE(_h, _k, _min) \ ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ (_k)->rtail - (_h)) < (_min) /* This function may be called by the network stack, or by * by the taskqueue thread. */ static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct netmap_adapter *na = &sc->ptna->dr.up; if_t ifp = sc->ifp; unsigned int batch_count = 0; struct nm_csb_atok *atok; struct nm_csb_ktoa *ktoa; struct netmap_kring *kring; struct netmap_ring *ring; struct netmap_slot *slot; unsigned int count = 0; unsigned int minspace; unsigned int head; unsigned int lim; struct mbuf *mhead; struct mbuf *mf; int nmbuf_bytes; uint8_t *nmbuf; if (!PTNET_Q_TRYLOCK(pq)) { /* We failed to acquire the lock, schedule the taskqueue. */ nm_prlim(1, "Deferring TX work"); if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } return 0; } if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { PTNET_Q_UNLOCK(pq); nm_prlim(1, "Interface is down"); return ENETDOWN; } atok = pq->atok; ktoa = pq->ktoa; kring = na->tx_rings[pq->kring_id]; ring = kring->ring; lim = kring->nkr_num_slots - 1; head = ring->head; minspace = sc->min_tx_space; while (count < budget) { if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* We ran out of slot, let's see if the host has * freed up some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ktoa, kring); if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* Still no slots available. Reactivate the * interrupts so that we can be notified * when some free slots are made available by * the host. */ atok->appl_need_kick = 1; /* Double check. We need a full barrier to * prevent the store to atok->appl_need_kick * to be reordered with the load from * ktoa->hwcur and ktoa->hwtail (store-load * barrier). */ nm_stld_barrier(); ptnet_sync_tail(ktoa, kring); if (likely(PTNET_TX_NOSPACE(head, kring, minspace))) { break; } nm_prlim(1, "Found more slots by doublecheck"); /* More slots were freed before reactivating * the interrupts. */ atok->appl_need_kick = 0; } } mhead = drbr_peek(ifp, pq->bufring); if (!mhead) { break; } /* Initialize transmission state variables. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; /* If needed, prepare the virtio-net header at the beginning * of the first slot. */ if (have_vnet_hdr) { struct virtio_net_hdr *vh = (struct virtio_net_hdr *)nmbuf; /* For performance, we could replace this memset() with * two 8-bytes-wide writes. */ memset(nmbuf, 0, PTNET_HDR_SIZE); if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { mhead = virtio_net_tx_offload(ifp, mhead, false, vh); if (unlikely(!mhead)) { /* Packet dropped because errors * occurred while preparing the vnet * header. Let's go ahead with the next * packet. */ pq->stats.errors ++; drbr_advance(ifp, pq->bufring); continue; } } nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " "csum_start %u csum_ofs %u hdr_len = %u " "gso_size %u gso_type %x", __func__, mhead->m_pkthdr.csum_flags, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_bytes += PTNET_HDR_SIZE; } for (mf = mhead; mf; mf = mf->m_next) { uint8_t *mdata = mf->m_data; int mlen = mf->m_len; for (;;) { int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; if (mlen < copy) { copy = mlen; } memcpy(nmbuf, mdata, copy); mdata += copy; mlen -= copy; nmbuf += copy; nmbuf_bytes += copy; if (!mlen) { break; } slot->len = nmbuf_bytes; slot->flags = NS_MOREFRAG; head = nm_next(head, lim); KASSERT(head != ring->tail, ("Unexpectedly run out of TX space")); slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; } } /* Complete last slot and update head. */ slot->len = nmbuf_bytes; slot->flags = 0; head = nm_next(head, lim); /* Consume the packet just processed. */ drbr_advance(ifp, pq->bufring); /* Copy the packet to listeners. */ ETHER_BPF_MTAP(ifp, mhead); pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; if (mhead->m_flags & M_MCAST) { pq->stats.mcasts ++; } m_freem(mhead); count ++; if (++batch_count == PTNET_TX_BATCH) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); batch_count = 0; } } if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); } if (count >= budget && may_resched) { DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", drbr_inuse(ifp, pq->bufring))); taskqueue_enqueue(pq->taskq, &pq->task); } PTNET_Q_UNLOCK(pq); return count; } static int ptnet_transmit(if_t ifp, struct mbuf *m) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue *pq; unsigned int queue_idx; int err; DBG(device_printf(sc->dev, "transmit %p\n", m)); /* Insert 802.1Q header if needed. */ if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if (m == NULL) { return ENOBUFS; } m->m_flags &= ~M_VLANTAG; } /* Get the flow-id if available. */ queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? m->m_pkthdr.flowid : curcpu; if (unlikely(queue_idx >= sc->num_tx_rings)) { queue_idx %= sc->num_tx_rings; } pq = sc->queues + queue_idx; err = drbr_enqueue(ifp, pq->bufring, m); if (err) { /* ENOBUFS when the bufring is full */ nm_prlim(1, "%s: drbr_enqueue() failed %d\n", __func__, err); pq->stats.errors ++; return err; } if (if_getcapenable(ifp) & IFCAP_POLLING) { /* If polling is on, the transmit queues will be * drained by the poller. */ return 0; } err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); return (err < 0) ? err : 0; } static unsigned int ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot = ring->slot + head; for (;;) { head = nm_next(head, kring->nkr_num_slots - 1); if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { break; } slot = ring->slot + head; } return head; } static inline struct mbuf * ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) { uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; do { unsigned int copy; if (mtail->m_len == MCLBYTES) { struct mbuf *mf; mf = m_getcl(M_NOWAIT, MT_DATA, 0); if (unlikely(!mf)) { return NULL; } mtail->m_next = mf; mtail = mf; mdata = mtod(mtail, uint8_t *); mtail->m_len = 0; } copy = MCLBYTES - mtail->m_len; if (nmbuf_len < copy) { copy = nmbuf_len; } memcpy(mdata, nmbuf, copy); nmbuf += copy; nmbuf_len -= copy; mdata += copy; mtail->m_len += copy; } while (nmbuf_len); return mtail; } static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct nm_csb_atok *atok = pq->atok; struct nm_csb_ktoa *ktoa = pq->ktoa; struct netmap_adapter *na = &sc->ptna->dr.up; struct netmap_kring *kring = na->rx_rings[pq->kring_id]; struct netmap_ring *ring = kring->ring; unsigned int const lim = kring->nkr_num_slots - 1; unsigned int batch_count = 0; if_t ifp = sc->ifp; unsigned int count = 0; uint32_t head; PTNET_Q_LOCK(pq); if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { goto unlock; } kring->nr_kflags &= ~NKR_PENDINTR; head = ring->head; while (count < budget) { uint32_t prev_head = head; struct mbuf *mhead, *mtail; struct virtio_net_hdr *vh; struct netmap_slot *slot; unsigned int nmbuf_len; uint8_t *nmbuf; int deliver = 1; /* the mbuf to the network stack. */ host_sync: if (head == ring->tail) { /* We ran out of slot, let's see if the host has * added some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ktoa, kring); if (head == ring->tail) { /* Still no slots available. Reactivate * interrupts as they were disabled by the * host thread right before issuing the * last interrupt. */ atok->appl_need_kick = 1; /* Double check for more completed RX slots. * We need a full barrier to prevent the store * to atok->appl_need_kick to be reordered with * the load from ktoa->hwcur and ktoa->hwtail * (store-load barrier). */ nm_stld_barrier(); ptnet_sync_tail(ktoa, kring); if (likely(head == ring->tail)) { break; } atok->appl_need_kick = 0; } } /* Initialize ring state variables, possibly grabbing the * virtio-net header. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; vh = (struct virtio_net_hdr *)nmbuf; if (have_vnet_hdr) { if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { /* There is no good reason why host should * put the header in multiple netmap slots. * If this is the case, discard. */ nm_prlim(1, "Fragmented vnet-hdr: dropping"); head = ptnet_rx_discard(kring, head); pq->stats.iqdrops ++; deliver = 0; goto skip; } nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " "csum_ofs %u hdr_len = %u gso_size %u " "gso_type %x", __func__, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_len -= PTNET_HDR_SIZE; } /* Allocate the head of a new mbuf chain. * We use m_getcl() to allocate an mbuf with standard cluster * size (MCLBYTES). In the future we could use m_getjcl() * to choose different sizes. */ mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(mhead == NULL)) { device_printf(sc->dev, "%s: failed to allocate mbuf " "head\n", __func__); pq->stats.errors ++; break; } /* Initialize the mbuf state variables. */ mhead->m_pkthdr.len = nmbuf_len; mtail->m_len = 0; /* Scan all the netmap slots containing the current packet. */ for (;;) { DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " "len %u, flags %u\n", __func__, head, ring->tail, slot->len, slot->flags)); mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); if (unlikely(!mtail)) { /* Ouch. We ran out of memory while processing * a packet. We have to restore the previous * head position, free the mbuf chain, and * schedule the taskqueue to give the packet * another chance. */ device_printf(sc->dev, "%s: failed to allocate" " mbuf frag, reset head %u --> %u\n", __func__, head, prev_head); head = prev_head; m_freem(mhead); pq->stats.errors ++; if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } goto escape; } /* We have to increment head irrespective of the * NS_MOREFRAG being set or not. */ head = nm_next(head, lim); if (!(slot->flags & NS_MOREFRAG)) { break; } if (unlikely(head == ring->tail)) { /* The very last slot prepared by the host has * the NS_MOREFRAG set. Drop it and continue * the outer cycle (to do the double-check). */ nm_prlim(1, "Incomplete packet: dropping"); m_freem(mhead); pq->stats.iqdrops ++; goto host_sync; } slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; mhead->m_pkthdr.len += nmbuf_len; } mhead->m_pkthdr.rcvif = ifp; mhead->m_pkthdr.csum_flags = 0; /* Store the queue idx in the packet header. */ mhead->m_pkthdr.flowid = pq->kring_id; M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { struct ether_header *eh; eh = mtod(mhead, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ptnet_vlan_tag_remove(mhead); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) vh->csum_start -= ETHER_VLAN_ENCAP_LEN; } } if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) { m_freem(mhead); nm_prlim(1, "Csum offload error: dropping"); pq->stats.iqdrops ++; deliver = 0; } skip: count ++; if (++batch_count >= PTNET_RX_BATCH) { /* Some packets have been (or will be) pushed to the network * stack. We need to update the CSB to tell the host about * the new ring->cur and ring->head (RX buffer refill). */ ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); batch_count = 0; } if (likely(deliver)) { pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; PTNET_Q_UNLOCK(pq); if_input(ifp, mhead); PTNET_Q_LOCK(pq); /* The ring->head index (and related indices) are * updated under pq lock by ptnet_ring_update(). * Since we dropped the lock to call if_input(), we * must reload ring->head and restart processing the * ring from there. */ head = ring->head; if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { /* The interface has gone down while we didn't * have the lock. Stop any processing and exit. */ goto unlock; } } } escape: if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); } if (count >= budget && may_resched) { /* If we ran out of budget or the double-check found new * slots to process, schedule the taskqueue. */ DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", head, ring->tail)); taskqueue_enqueue(pq->taskq, &pq->task); } unlock: PTNET_Q_UNLOCK(pq); return count; } static void ptnet_rx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_tx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); } #ifdef DEVICE_POLLING /* We don't need to handle differently POLL_AND_CHECK_STATUS and * POLL_ONLY, since we don't have an Interrupt Status Register. */ static int ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) { struct ptnet_softc *sc = if_getsoftc(ifp); unsigned int queue_budget; unsigned int count = 0; bool borrow = false; int i; KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); queue_budget = MAX(budget / sc->num_rings, 1); nm_prlim(1, "Per-queue budget is %d", queue_budget); while (budget) { unsigned int rcnt = 0; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (borrow) { queue_budget = MIN(queue_budget, budget); if (queue_budget == 0) { break; } } if (i < sc->num_tx_rings) { rcnt += ptnet_drain_transmit_queue(pq, queue_budget, false); } else { rcnt += ptnet_rx_eof(pq, queue_budget, false); } } if (!rcnt) { /* A scan of the queues gave no result, we can * stop here. */ break; } if (rcnt > budget) { /* This may happen when initial budget < sc->num_rings, * since one packet budget is given to each queue * anyway. Just pretend we didn't eat "so much". */ rcnt = budget; } count += rcnt; budget -= rcnt; borrow = true; } return count; } #endif /* DEVICE_POLLING */ #endif /* WITH_PTNETMAP */ diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c index 479c34b4f2ee..bc31957d492c 100644 --- a/sys/dev/virtio/network/if_vtnet.c +++ b/sys/dev/virtio/network/if_vtnet.c @@ -1,4478 +1,4478 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO network devices. */ #include +#include "opt_inet.h" +#include "opt_inet6.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" -#include "opt_inet.h" -#include "opt_inet6.h" - #if defined(INET) || defined(INET6) #include #endif #ifdef __NO_STRICT_ALIGNMENT #define VTNET_ETHER_ALIGN 0 #else /* Strict alignment */ #define VTNET_ETHER_ALIGN ETHER_ALIGN #endif static int vtnet_modevent(module_t, int, void *); static int vtnet_probe(device_t); static int vtnet_attach(device_t); static int vtnet_detach(device_t); static int vtnet_suspend(device_t); static int vtnet_resume(device_t); static int vtnet_shutdown(device_t); static int vtnet_attach_completed(device_t); static int vtnet_config_change(device_t); static int vtnet_negotiate_features(struct vtnet_softc *); static int vtnet_setup_features(struct vtnet_softc *); static int vtnet_init_rxq(struct vtnet_softc *, int); static int vtnet_init_txq(struct vtnet_softc *, int); static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); static void vtnet_free_rxtx_queues(struct vtnet_softc *); static int vtnet_alloc_rx_filters(struct vtnet_softc *); static void vtnet_free_rx_filters(struct vtnet_softc *); static int vtnet_alloc_virtqueues(struct vtnet_softc *); static void vtnet_alloc_interface(struct vtnet_softc *); static int vtnet_setup_interface(struct vtnet_softc *); static int vtnet_ioctl_mtu(struct vtnet_softc *, u_int); static int vtnet_ioctl_ifflags(struct vtnet_softc *); static int vtnet_ioctl_multi(struct vtnet_softc *); static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *); static int vtnet_ioctl(if_t, u_long, caddr_t); static uint64_t vtnet_get_counter(if_t, ift_counter); static int vtnet_rxq_populate(struct vtnet_rxq *); static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_new_buf(struct vtnet_rxq *); static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *, uint16_t, int, struct virtio_net_hdr *); static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *, uint16_t, int, struct virtio_net_hdr *); static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int); static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_rxq_eof(struct vtnet_rxq *); static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries); static void vtnet_rx_vq_intr(void *); static void vtnet_rxq_tq_intr(void *, int); static int vtnet_txq_intr_threshold(struct vtnet_txq *); static int vtnet_txq_below_threshold(struct vtnet_txq *); static int vtnet_txq_notify(struct vtnet_txq *); static void vtnet_txq_free_mbufs(struct vtnet_txq *); static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *, int *, int *, int *); static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int, int, struct virtio_net_hdr *); static struct mbuf * vtnet_txq_offload(struct vtnet_txq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, struct vtnet_tx_header *); static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int); #ifdef VTNET_LEGACY_TX static void vtnet_start_locked(struct vtnet_txq *, if_t); static void vtnet_start(if_t); #else static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *); static int vtnet_txq_mq_start(if_t, struct mbuf *); static void vtnet_txq_tq_deferred(void *, int); #endif static void vtnet_txq_start(struct vtnet_txq *); static void vtnet_txq_tq_intr(void *, int); static int vtnet_txq_eof(struct vtnet_txq *); static void vtnet_tx_vq_intr(void *); static void vtnet_tx_start_all(struct vtnet_softc *); #ifndef VTNET_LEGACY_TX static void vtnet_qflush(if_t); #endif static int vtnet_watchdog(struct vtnet_txq *); static void vtnet_accum_stats(struct vtnet_softc *, struct vtnet_rxq_stats *, struct vtnet_txq_stats *); static void vtnet_tick(void *); static void vtnet_start_taskqueues(struct vtnet_softc *); static void vtnet_free_taskqueues(struct vtnet_softc *); static void vtnet_drain_taskqueues(struct vtnet_softc *); static void vtnet_drain_rxtx_queues(struct vtnet_softc *); static void vtnet_stop_rendezvous(struct vtnet_softc *); static void vtnet_stop(struct vtnet_softc *); static int vtnet_virtio_reinit(struct vtnet_softc *); static void vtnet_init_rx_filters(struct vtnet_softc *); static int vtnet_init_rx_queues(struct vtnet_softc *); static int vtnet_init_tx_queues(struct vtnet_softc *); static int vtnet_init_rxtx_queues(struct vtnet_softc *); static void vtnet_set_active_vq_pairs(struct vtnet_softc *); static void vtnet_update_rx_offloads(struct vtnet_softc *); static int vtnet_reinit(struct vtnet_softc *); static void vtnet_init_locked(struct vtnet_softc *, int); static void vtnet_init(void *); static void vtnet_free_ctrl_vq(struct vtnet_softc *); static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, struct sglist *, int, int); static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t); static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, bool); static int vtnet_set_promisc(struct vtnet_softc *, bool); static int vtnet_set_allmulti(struct vtnet_softc *, bool); static void vtnet_rx_filter(struct vtnet_softc *); static void vtnet_rx_filter_mac(struct vtnet_softc *); static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_rx_filter_vlan(struct vtnet_softc *); static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_register_vlan(void *, if_t, uint16_t); static void vtnet_unregister_vlan(void *, if_t, uint16_t); static void vtnet_update_speed_duplex(struct vtnet_softc *); static int vtnet_is_link_up(struct vtnet_softc *); static void vtnet_update_link_status(struct vtnet_softc *); static int vtnet_ifmedia_upd(if_t); static void vtnet_ifmedia_sts(if_t, struct ifmediareq *); static void vtnet_get_macaddr(struct vtnet_softc *); static void vtnet_set_macaddr(struct vtnet_softc *); static void vtnet_attached_set_macaddr(struct vtnet_softc *); static void vtnet_vlan_tag_remove(struct mbuf *); static void vtnet_set_rx_process_limit(struct vtnet_softc *); static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_rxq *); static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_txq *); static void vtnet_setup_queue_sysctl(struct vtnet_softc *); static void vtnet_load_tunables(struct vtnet_softc *); static void vtnet_setup_sysctl(struct vtnet_softc *); static int vtnet_rxq_enable_intr(struct vtnet_rxq *); static void vtnet_rxq_disable_intr(struct vtnet_rxq *); static int vtnet_txq_enable_intr(struct vtnet_txq *); static void vtnet_txq_disable_intr(struct vtnet_txq *); static void vtnet_enable_rx_interrupts(struct vtnet_softc *); static void vtnet_enable_tx_interrupts(struct vtnet_softc *); static void vtnet_enable_interrupts(struct vtnet_softc *); static void vtnet_disable_rx_interrupts(struct vtnet_softc *); static void vtnet_disable_tx_interrupts(struct vtnet_softc *); static void vtnet_disable_interrupts(struct vtnet_softc *); static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); DEBUGNET_DEFINE(vtnet); #define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val) #define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val) #define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val) #define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val) #define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val) #define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val) /* Tunables. */ static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "VirtIO Net driver parameters"); static int vtnet_csum_disable = 0; SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, &vtnet_csum_disable, 0, "Disables receive and send checksum offload"); static int vtnet_fixup_needs_csum = 0; SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN, &vtnet_fixup_needs_csum, 0, "Calculate valid checksum for NEEDS_CSUM packets"); static int vtnet_tso_disable = 0; SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable, 0, "Disables TSO"); static int vtnet_lro_disable = 0; SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable, 0, "Disables hardware LRO"); static int vtnet_mq_disable = 0; SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable, 0, "Disables multiqueue support"); static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs"); static int vtnet_tso_maxlen = IP_MAXPACKET; SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, &vtnet_tso_maxlen, 0, "TSO burst limit"); static int vtnet_rx_process_limit = 1024; SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &vtnet_rx_process_limit, 0, "Number of RX segments processed in one pass"); static int vtnet_lro_entry_count = 128; SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, &vtnet_lro_entry_count, 0, "Software LRO entry count"); /* Enable sorted LRO, and the depth of the mbuf queue. */ static int vtnet_lro_mbufq_depth = 0; SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue"); static uma_zone_t vtnet_tx_header_zone; static struct virtio_feature_desc vtnet_feature_desc[] = { { VIRTIO_NET_F_CSUM, "TxChecksum" }, { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" }, { VIRTIO_NET_F_MAC, "MAC" }, { VIRTIO_NET_F_GSO, "TxGSO" }, { VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" }, { VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" }, { VIRTIO_NET_F_GUEST_ECN, "RxLROECN" }, { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, { VIRTIO_NET_F_STATUS, "Status" }, { VIRTIO_NET_F_CTRL_VQ, "CtrlVq" }, { VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" }, { VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" }, { VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" }, { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, { VIRTIO_NET_F_MQ, "Multiqueue" }, { VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" }, { VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" }, { 0, NULL } }; static device_method_t vtnet_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtnet_probe), DEVMETHOD(device_attach, vtnet_attach), DEVMETHOD(device_detach, vtnet_detach), DEVMETHOD(device_suspend, vtnet_suspend), DEVMETHOD(device_resume, vtnet_resume), DEVMETHOD(device_shutdown, vtnet_shutdown), /* VirtIO methods. */ DEVMETHOD(virtio_attach_completed, vtnet_attach_completed), DEVMETHOD(virtio_config_change, vtnet_config_change), DEVMETHOD_END }; #ifdef DEV_NETMAP #include #endif static driver_t vtnet_driver = { .name = "vtnet", .methods = vtnet_methods, .size = sizeof(struct vtnet_softc) }; VIRTIO_DRIVER_MODULE(vtnet, vtnet_driver, vtnet_modevent, NULL); MODULE_VERSION(vtnet, 1); MODULE_DEPEND(vtnet, virtio, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(vtnet, netmap, 1, 1, 1); #endif VIRTIO_SIMPLE_PNPINFO(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter"); static int vtnet_modevent(module_t mod __unused, int type, void *unused __unused) { int error = 0; static int loaded = 0; switch (type) { case MOD_LOAD: if (loaded++ == 0) { vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", sizeof(struct vtnet_tx_header), NULL, NULL, NULL, NULL, 0, 0); #ifdef DEBUGNET /* * We need to allocate from this zone in the transmit path, so ensure * that we have at least one item per header available. * XXX add a separate zone like we do for mbufs? otherwise we may alloc * buckets */ uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2); uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2); #endif } break; case MOD_QUIESCE: if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) error = EBUSY; break; case MOD_UNLOAD: if (--loaded == 0) { uma_zdestroy(vtnet_tx_header_zone); vtnet_tx_header_zone = NULL; } break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static int vtnet_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, vtnet)); } static int vtnet_attach(device_t dev) { struct vtnet_softc *sc; int error; sc = device_get_softc(dev); sc->vtnet_dev = dev; virtio_set_feature_desc(dev, vtnet_feature_desc); VTNET_CORE_LOCK_INIT(sc); callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); vtnet_load_tunables(sc); vtnet_alloc_interface(sc); vtnet_setup_sysctl(sc); error = vtnet_setup_features(sc); if (error) { device_printf(dev, "cannot setup features\n"); goto fail; } error = vtnet_alloc_rx_filters(sc); if (error) { device_printf(dev, "cannot allocate Rx filters\n"); goto fail; } error = vtnet_alloc_rxtx_queues(sc); if (error) { device_printf(dev, "cannot allocate queues\n"); goto fail; } error = vtnet_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = vtnet_setup_interface(sc); if (error) { device_printf(dev, "cannot setup interface\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_NET); if (error) { device_printf(dev, "cannot setup interrupts\n"); ether_ifdetach(sc->vtnet_ifp); goto fail; } #ifdef DEV_NETMAP vtnet_netmap_attach(sc); #endif vtnet_start_taskqueues(sc); fail: if (error) vtnet_detach(dev); return (error); } static int vtnet_detach(device_t dev) { struct vtnet_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; if (device_is_attached(dev)) { VTNET_CORE_LOCK(sc); vtnet_stop(sc); VTNET_CORE_UNLOCK(sc); callout_drain(&sc->vtnet_tick_ch); vtnet_drain_taskqueues(sc); ether_ifdetach(ifp); } #ifdef DEV_NETMAP netmap_detach(ifp); #endif if (sc->vtnet_pfil != NULL) { pfil_head_unregister(sc->vtnet_pfil); sc->vtnet_pfil = NULL; } vtnet_free_taskqueues(sc); if (sc->vtnet_vlan_attach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); sc->vtnet_vlan_attach = NULL; } if (sc->vtnet_vlan_detach != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach); sc->vtnet_vlan_detach = NULL; } ifmedia_removeall(&sc->vtnet_media); if (ifp != NULL) { if_free(ifp); sc->vtnet_ifp = NULL; } vtnet_free_rxtx_queues(sc); vtnet_free_rx_filters(sc); if (sc->vtnet_ctrl_vq != NULL) vtnet_free_ctrl_vq(sc); VTNET_CORE_LOCK_DESTROY(sc); return (0); } static int vtnet_suspend(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_stop(sc); sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_resume(device_t dev) { struct vtnet_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) vtnet_init_locked(sc, 0); sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (vtnet_suspend(dev)); } static int vtnet_attach_completed(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_attached_set_macaddr(sc); VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_config_change(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_update_link_status(sc); if (sc->vtnet_link_active != 0) vtnet_tx_start_all(sc); VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_negotiate_features(struct vtnet_softc *sc) { device_t dev; uint64_t features, negotiated_features; int no_csum; dev = sc->vtnet_dev; features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES : VTNET_LEGACY_FEATURES; /* * TSO and LRO are only available when their corresponding checksum * offload feature is also negotiated. */ no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable); if (no_csum) features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM); if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) features &= ~VTNET_TSO_FEATURES; if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) features &= ~VTNET_LRO_FEATURES; #ifndef VTNET_LEGACY_TX if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) features &= ~VIRTIO_NET_F_MQ; #else features &= ~VIRTIO_NET_F_MQ; #endif negotiated_features = virtio_negotiate_features(dev, features); if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { uint16_t mtu; mtu = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, mtu)); if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) { device_printf(dev, "Invalid MTU value: %d. " "MTU feature disabled.\n", mtu); features &= ~VIRTIO_NET_F_MTU; negotiated_features = virtio_negotiate_features(dev, features); } } if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { uint16_t npairs; npairs = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) { device_printf(dev, "Invalid max_virtqueue_pairs value: " "%d. Multiqueue feature disabled.\n", npairs); features &= ~VIRTIO_NET_F_MQ; negotiated_features = virtio_negotiate_features(dev, features); } } if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { /* * LRO without mergeable buffers requires special care. This * is not ideal because every receive buffer must be large * enough to hold the maximum TCP packet, the Ethernet header, * and the header. This requires up to 34 descriptors with * MCLBYTES clusters. If we do not have indirect descriptors, * LRO is disabled since the virtqueue will not contain very * many receive buffers. */ if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { device_printf(dev, "Host LRO disabled since both mergeable buffers " "and indirect descriptors were not negotiated\n"); features &= ~VTNET_LRO_FEATURES; negotiated_features = virtio_negotiate_features(dev, features); } else sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; } sc->vtnet_features = negotiated_features; sc->vtnet_negotiated_features = negotiated_features; return (virtio_finalize_features(dev)); } static int vtnet_setup_features(struct vtnet_softc *sc) { device_t dev; int error; dev = sc->vtnet_dev; error = vtnet_negotiate_features(sc); if (error) return (error); if (virtio_with_feature(dev, VIRTIO_F_VERSION_1)) sc->vtnet_flags |= VTNET_FLAG_MODERN; if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtnet_flags |= VTNET_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX; if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { /* This feature should always be negotiated. */ sc->vtnet_flags |= VTNET_FLAG_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { sc->vtnet_max_mtu = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, mtu)); } else sc->vtnet_max_mtu = VTNET_MAX_MTU; if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else if (vtnet_modern(sc)) { /* This is identical to the mergeable header. */ sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1); } else sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE; else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG; else sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE; /* * Favor "hardware" LRO if negotiated, but support software LRO as * a fallback; there is usually little benefit (or worse) with both. */ if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 && virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0) sc->vtnet_flags |= VTNET_FLAG_SW_LRO; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX; else sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN; sc->vtnet_req_vq_pairs = 1; sc->vtnet_max_vq_pairs = 1; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); } } if (sc->vtnet_max_vq_pairs > 1) { int req; /* * Limit the maximum number of requested queue pairs to the * number of CPUs and the configured maximum. */ req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); if (req < 0) req = 1; if (req == 0) req = mp_ncpus; if (req > sc->vtnet_max_vq_pairs) req = sc->vtnet_max_vq_pairs; if (req > mp_ncpus) req = mp_ncpus; if (req > 1) { sc->vtnet_req_vq_pairs = req; sc->vtnet_flags |= VTNET_FLAG_MQ; } } return (0); } static int vtnet_init_rxq(struct vtnet_softc *sc, int id) { struct vtnet_rxq *rxq; rxq = &sc->vtnet_rxqs[id]; snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); rxq->vtnrx_sc = sc; rxq->vtnrx_id = id; rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); if (rxq->vtnrx_sg == NULL) return (ENOMEM); #if defined(INET) || defined(INET6) if (vtnet_software_lro(sc)) { if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp, sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0) return (ENOMEM); } #endif NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); return (rxq->vtnrx_tq == NULL ? ENOMEM : 0); } static int vtnet_init_txq(struct vtnet_softc *sc, int id) { struct vtnet_txq *txq; txq = &sc->vtnet_txqs[id]; snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF); txq->vtntx_sc = sc; txq->vtntx_id = id; txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); if (txq->vtntx_sg == NULL) return (ENOMEM); #ifndef VTNET_LEGACY_TX txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &txq->vtntx_mtx); if (txq->vtntx_br == NULL) return (ENOMEM); TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq); #endif TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq); txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT, taskqueue_thread_enqueue, &txq->vtntx_tq); if (txq->vtntx_tq == NULL) return (ENOMEM); return (0); } static int vtnet_alloc_rxtx_queues(struct vtnet_softc *sc) { int i, npairs, error; npairs = sc->vtnet_max_vq_pairs; sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF, M_NOWAIT | M_ZERO); sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL) return (ENOMEM); for (i = 0; i < npairs; i++) { error = vtnet_init_rxq(sc, i); if (error) return (error); error = vtnet_init_txq(sc, i); if (error) return (error); } vtnet_set_rx_process_limit(sc); vtnet_setup_queue_sysctl(sc); return (0); } static void vtnet_destroy_rxq(struct vtnet_rxq *rxq) { rxq->vtnrx_sc = NULL; rxq->vtnrx_id = -1; #if defined(INET) || defined(INET6) tcp_lro_free(&rxq->vtnrx_lro); #endif if (rxq->vtnrx_sg != NULL) { sglist_free(rxq->vtnrx_sg); rxq->vtnrx_sg = NULL; } if (mtx_initialized(&rxq->vtnrx_mtx) != 0) mtx_destroy(&rxq->vtnrx_mtx); } static void vtnet_destroy_txq(struct vtnet_txq *txq) { txq->vtntx_sc = NULL; txq->vtntx_id = -1; if (txq->vtntx_sg != NULL) { sglist_free(txq->vtntx_sg); txq->vtntx_sg = NULL; } #ifndef VTNET_LEGACY_TX if (txq->vtntx_br != NULL) { buf_ring_free(txq->vtntx_br, M_DEVBUF); txq->vtntx_br = NULL; } #endif if (mtx_initialized(&txq->vtntx_mtx) != 0) mtx_destroy(&txq->vtntx_mtx); } static void vtnet_free_rxtx_queues(struct vtnet_softc *sc) { int i; if (sc->vtnet_rxqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_rxq(&sc->vtnet_rxqs[i]); free(sc->vtnet_rxqs, M_DEVBUF); sc->vtnet_rxqs = NULL; } if (sc->vtnet_txqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_txq(&sc->vtnet_txqs[i]); free(sc->vtnet_txqs, M_DEVBUF); sc->vtnet_txqs = NULL; } } static int vtnet_alloc_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_mac_filter == NULL) return (ENOMEM); } if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) * VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_vlan_filter == NULL) return (ENOMEM); } return (0); } static void vtnet_free_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_mac_filter != NULL) { free(sc->vtnet_mac_filter, M_DEVBUF); sc->vtnet_mac_filter = NULL; } if (sc->vtnet_vlan_filter != NULL) { free(sc->vtnet_vlan_filter, M_DEVBUF); sc->vtnet_vlan_filter = NULL; } } static int vtnet_alloc_virtqueues(struct vtnet_softc *sc) { device_t dev; struct vq_alloc_info *info; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, idx, flags, nvqs, error; dev = sc->vtnet_dev; flags = 0; nvqs = sc->vtnet_max_vq_pairs * 2; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) nvqs++; info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) { rxq = &sc->vtnet_rxqs[i]; VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); txq = &sc->vtnet_txqs[i]; VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, vtnet_tx_vq_intr, txq, &txq->vtntx_vq, "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); } /* These queues will not be used so allocate the minimum resources. */ for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) { rxq = &sc->vtnet_rxqs[i]; VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq, "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); txq = &sc->vtnet_txqs[i]; VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq, "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); } if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); } /* * TODO: Enable interrupt binding if this is multiqueue. This will * only matter when per-virtqueue MSIX is available. */ if (sc->vtnet_flags & VTNET_FLAG_MQ) flags |= 0; error = virtio_alloc_virtqueues(dev, flags, nvqs, info); free(info, M_TEMP); return (error); } static void vtnet_alloc_interface(struct vtnet_softc *sc) { device_t dev; if_t ifp; dev = sc->vtnet_dev; ifp = if_alloc(IFT_ETHER); sc->vtnet_ifp = ifp; if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); } static int vtnet_setup_interface(struct vtnet_softc *sc) { device_t dev; struct pfil_head_args pa; if_t ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setbaudrate(ifp, IF_Gbps(10)); if_setinitfn(ifp, vtnet_init); if_setioctlfn(ifp, vtnet_ioctl); if_setgetcounterfn(ifp, vtnet_get_counter); #ifndef VTNET_LEGACY_TX if_settransmitfn(ifp, vtnet_txq_mq_start); if_setqflushfn(ifp, vtnet_qflush); #else struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq; if_setstartfn(ifp, vtnet_start); if_setsendqlen(ifp, virtqueue_size(vq) - 1); if_setsendqready(ifp); #endif vtnet_get_macaddr(sc); if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts); ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO); if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { int gso; if_setcapabilitiesbit(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0); gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO); if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; if (if_getcapabilities(ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { int tso_maxlen; if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen", vtnet_tso_maxlen); if_sethwtsomax(ifp, tso_maxlen - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); if_sethwtsomaxsegcount(ifp, sc->vtnet_tx_nsegs - 1); if_sethwtsomaxsegsize(ifp, PAGE_SIZE); } } if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0); #ifdef notyet /* BMV: Rx checksums not distinguished between IPv4 and IPv6. */ if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0); #endif if (vtnet_tunable_int(sc, "fixup_needs_csum", vtnet_fixup_needs_csum) != 0) sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM; /* Support either "hardware" or software LRO. */ if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); } if (if_getcapabilities(ifp) & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) { /* * VirtIO does not support VLAN tagging, but we can fake * it by inserting and removing the 802.1Q header during * transmit and receive. We are then able to do checksum * offloading of VLAN frames. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0); } if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO) if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); /* * Capabilities after here are not enabled by default. */ if_setcapenable(ifp, if_getcapabilities(ifp)); if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); } ether_ifattach(ifp, sc->vtnet_hwaddr); /* Tell the upper layer(s) we support long frames. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); DEBUGNET_SET(ifp, vtnet); pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = if_name(ifp); sc->vtnet_pfil = pfil_head_register(&pa); return (0); } static int vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu) { int framesz; if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) return (MJUMPAGESIZE); else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) return (MCLBYTES); /* * Try to scale the receive mbuf cluster size from the MTU. We * could also use the VQ size to influence the selected size, * but that would only matter for very small queues. */ if (vtnet_modern(sc)) { MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1)); framesz = sizeof(struct virtio_net_hdr_v1); } else framesz = sizeof(struct vtnet_rx_header); framesz += sizeof(struct ether_vlan_header) + mtu; /* * Account for the offsetting we'll do elsewhere so we allocate the * right size for the mtu. */ if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) { framesz += VTNET_ETHER_ALIGN; } if (framesz <= MCLBYTES) return (MCLBYTES); else if (framesz <= MJUMPAGESIZE) return (MJUMPAGESIZE); else if (framesz <= MJUM9BYTES) return (MJUM9BYTES); /* Sane default; avoid 16KB clusters. */ return (MCLBYTES); } static int vtnet_ioctl_mtu(struct vtnet_softc *sc, u_int mtu) { if_t ifp; int clustersz; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (if_getmtu(ifp) == mtu) return (0); else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu) return (EINVAL); if_setmtu(ifp, mtu); clustersz = vtnet_rx_cluster_size(sc, mtu); if (clustersz != sc->vtnet_rx_clustersz && if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); vtnet_init_locked(sc, 0); } return (0); } static int vtnet_ioctl_ifflags(struct vtnet_softc *sc) { if_t ifp; int drv_running; ifp = sc->vtnet_ifp; drv_running = (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; VTNET_CORE_LOCK_ASSERT(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { if (drv_running) vtnet_stop(sc); goto out; } if (!drv_running) { vtnet_init_locked(sc, 0); goto out; } if ((if_getflags(ifp) ^ sc->vtnet_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) vtnet_rx_filter(sc); else { /* * We don't support filtering out multicast, so * ALLMULTI is always set. */ if_setflagbits(ifp, IFF_ALLMULTI, 0); if_setflagbits(ifp, IFF_PROMISC, 0); } } out: sc->vtnet_if_flags = if_getflags(ifp); return (0); } static int vtnet_ioctl_multi(struct vtnet_softc *sc) { if_t ifp; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX && if_getdrvflags(ifp) & IFF_DRV_RUNNING) vtnet_rx_filter_mac(sc); return (0); } static int vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr) { if_t ifp; int mask, reinit, update; ifp = sc->vtnet_ifp; mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^ if_getcapenable(ifp); reinit = update = 0; VTNET_CORE_LOCK_ASSERT(sc); if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if (mask & IFCAP_TXCSUM_IPV6) if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); if (mask & IFCAP_TSO4) if_togglecapenable(ifp, IFCAP_TSO4); if (mask & IFCAP_TSO6) if_togglecapenable(ifp, IFCAP_TSO6); if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) { /* * These Rx features require the negotiated features to * be updated. Avoid a full reinit if possible. */ if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) update = 1; else reinit = 1; /* BMV: Avoid needless renegotiation for just software LRO. */ if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) == IFCAP_LRO && vtnet_software_lro(sc)) reinit = update = 0; if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_RXCSUM_IPV6) if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6); if (mask & IFCAP_LRO) if_togglecapenable(ifp, IFCAP_LRO); /* * VirtIO does not distinguish between IPv4 and IPv6 checksums * so treat them as a pair. Guest TSO (LRO) requires receive * checksums. */ if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { if_setcapenablebit(ifp, IFCAP_RXCSUM, 0); #ifdef notyet if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0); #endif } else if_setcapenablebit(ifp, 0, (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)); } if (mask & IFCAP_VLAN_HWFILTER) { /* These Rx features require renegotiation. */ reinit = 1; if (mask & IFCAP_VLAN_HWFILTER) if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER); } if (mask & IFCAP_VLAN_HWTSO) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if (mask & IFCAP_VLAN_HWTAGGING) if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if (reinit) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); vtnet_init_locked(sc, 0); } else if (update) vtnet_update_rx_offloads(sc); } return (0); } static int vtnet_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct vtnet_softc *sc; struct ifreq *ifr; int error; sc = if_getsoftc(ifp); ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFMTU: VTNET_CORE_LOCK(sc); error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu); VTNET_CORE_UNLOCK(sc); break; case SIOCSIFFLAGS: VTNET_CORE_LOCK(sc); error = vtnet_ioctl_ifflags(sc); VTNET_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: VTNET_CORE_LOCK(sc); error = vtnet_ioctl_multi(sc); VTNET_CORE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); break; case SIOCSIFCAP: VTNET_CORE_LOCK(sc); error = vtnet_ioctl_ifcap(sc, ifr); VTNET_CORE_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc); return (error); } static int vtnet_rxq_populate(struct vtnet_rxq *rxq) { struct virtqueue *vq; int nbufs, error; #ifdef DEV_NETMAP error = vtnet_netmap_rxq_populate(rxq); if (error >= 0) return (error); #endif /* DEV_NETMAP */ vq = rxq->vtnrx_vq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtnet_rxq_new_buf(rxq); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); /* * EMSGSIZE signifies the virtqueue did not have enough * entries available to hold the last mbuf. This is not * an error. */ if (error == EMSGSIZE) error = 0; } return (error); } static void vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq) { struct virtqueue *vq; struct mbuf *m; int last; #ifdef DEV_NETMAP struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp), rxq->vtnrx_id, NR_RX); #else /* !DEV_NETMAP */ void *kring = NULL; #endif /* !DEV_NETMAP */ vq = rxq->vtnrx_vq; last = 0; while ((m = virtqueue_drain(vq, &last)) != NULL) { if (kring == NULL) m_freem(m); } KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in rx queue %p", __func__, rxq)); } static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) { struct mbuf *m_head, *m_tail, *m; int i, size; m_head = NULL; size = sc->vtnet_rx_clustersz; KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs)); for (i = 0; i < nbufs; i++) { m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size); if (m == NULL) { sc->vtnet_stats.mbuf_alloc_failed++; m_freem(m_head); return (NULL); } m->m_len = size; /* * Need to offset the mbuf if the header we're going to add * will misalign. */ if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) { m_adj(m, VTNET_ETHER_ALIGN); } if (m_head != NULL) { m_tail->m_next = m; m_tail = m; } else m_head = m_tail = m; } if (m_tailp != NULL) *m_tailp = m_tail; return (m_head); } /* * Slow path for when LRO without mergeable buffers is negotiated. */ static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0, int len0) { struct vtnet_softc *sc; struct mbuf *m, *m_prev, *m_new, *m_tail; int len, clustersz, nreplace, error; sc = rxq->vtnrx_sc; clustersz = sc->vtnet_rx_clustersz; /* * Need to offset the mbuf if the header we're going to add will * misalign, account for that here. */ if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) clustersz -= VTNET_ETHER_ALIGN; m_prev = NULL; m_tail = NULL; nreplace = 0; m = m0; len = len0; /* * Since these mbuf chains are so large, avoid allocating a complete * replacement when the received frame did not consume the entire * chain. Unused mbufs are moved to the tail of the replacement mbuf. */ while (len > 0) { if (m == NULL) { sc->vtnet_stats.rx_frame_too_large++; return (EMSGSIZE); } /* * Every mbuf should have the expected cluster size since that * is also used to allocate the replacements. */ KASSERT(m->m_len == clustersz, ("%s: mbuf size %d not expected cluster size %d", __func__, m->m_len, clustersz)); m->m_len = MIN(m->m_len, len); len -= m->m_len; m_prev = m; m = m->m_next; nreplace++; } KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs, ("%s: invalid replacement mbuf count %d max %d", __func__, nreplace, sc->vtnet_rx_nmbufs)); m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); if (m_new == NULL) { m_prev->m_len = clustersz; return (ENOBUFS); } /* * Move any unused mbufs from the received mbuf chain onto the * end of the replacement chain. */ if (m_prev->m_next != NULL) { m_tail->m_next = m_prev->m_next; m_prev->m_next = NULL; } error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * The replacement is suppose to be an copy of the one * dequeued so this is a very unexpected error. * * Restore the m0 chain to the original state if it was * modified so we can then discard it. */ if (m_tail->m_next != NULL) { m_prev->m_next = m_tail->m_next; m_tail->m_next = NULL; } m_prev->m_len = clustersz; sc->vtnet_stats.rx_enq_replacement_failed++; m_freem(m_new); } return (error); } static int vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) { struct vtnet_softc *sc; struct mbuf *m_new; int error; sc = rxq->vtnrx_sc; if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len)); MPASS(m->m_next == NULL); if (m->m_len < len) return (EMSGSIZE); m_new = vtnet_rx_alloc_buf(sc, 1, NULL); if (m_new == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { sc->vtnet_stats.rx_enq_replacement_failed++; m_freem(m_new); } else m->m_len = len; return (error); } static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) { struct vtnet_softc *sc; struct sglist *sg; int header_inlined, error; sc = rxq->vtnrx_sc; sg = rxq->vtnrx_sg; KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, ("%s: mbuf chain without LRO_NOMRG", __func__)); VTNET_RXQ_LOCK_ASSERT(rxq); sglist_reset(sg); header_inlined = vtnet_modern(sc) || (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */ /* * Note: The mbuf has been already adjusted when we allocate it if we * have to do strict alignment. */ if (header_inlined) error = sglist_append_mbuf(sg, m); else { struct vtnet_rx_header *rxhdr = mtod(m, struct vtnet_rx_header *); MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); /* Append the header and remaining mbuf data. */ error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); if (error) return (error); error = sglist_append(sg, &rxhdr[1], m->m_len - sizeof(struct vtnet_rx_header)); if (error) return (error); if (m->m_next != NULL) error = sglist_append_mbuf(sg, m->m_next); } if (error) return (error); return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg)); } static int vtnet_rxq_new_buf(struct vtnet_rxq *rxq) { struct vtnet_softc *sc; struct mbuf *m; int error; sc = rxq->vtnrx_sc; m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL); if (m == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m); if (error) m_freem(m); return (error); } static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype, int hoff, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int error; sc = rxq->vtnrx_sc; /* * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does * not have an analogous CSUM flag. The checksum has been validated, * but is incomplete (TCP/UDP pseudo header). * * The packet is likely from another VM on the same host that itself * performed checksum offloading so Tx/Rx is basically a memcpy and * the checksum has little value. * * Default to receiving the packet as-is for performance reasons, but * this can cause issues if the packet is to be forwarded because it * does not contain a valid checksum. This patch may be helpful: * https://reviews.freebsd.org/D6611. In the meantime, have the driver * compute the checksum if requested. * * BMV: Need to add an CSUM_PARTIAL flag? */ if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) { error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr); return (error); } /* * Compute the checksum in the driver so the packet will contain a * valid checksum. The checksum is at csum_offset from csum_start. */ switch (etype) { #if defined(INET) || defined(INET6) case ETHERTYPE_IP: case ETHERTYPE_IPV6: { int csum_off, csum_end; uint16_t csum; csum_off = hdr->csum_start + hdr->csum_offset; csum_end = csum_off + sizeof(uint16_t); /* Assume checksum will be in the first mbuf. */ if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) return (1); /* * Like in_delayed_cksum()/in6_delayed_cksum(), compute the * checksum and write it at the specified offset. We could * try to verify the packet: csum_start should probably * correspond to the start of the TCP/UDP header. * * BMV: Need to properly handle UDP with zero checksum. Is * the IPv4 header checksum implicitly validated? */ csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start); *(uint16_t *)(mtodo(m, csum_off)) = csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; } #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } return (0); } static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused) { #if 0 struct vtnet_softc *sc; #endif int protocol; #if 0 sc = rxq->vtnrx_sc; #endif switch (etype) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(m->m_len < hoff + sizeof(struct ip))) protocol = IPPROTO_DONE; else { struct ip *ip = (struct ip *)(m->m_data + hoff); protocol = ip->ip_p; } break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr)) || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) protocol = IPPROTO_DONE; break; #endif default: protocol = IPPROTO_DONE; break; } switch (protocol) { case IPPROTO_TCP: case IPPROTO_UDP: m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; default: /* * FreeBSD does not support checksum offloading of this * protocol. Let the stack re-verify the checksum later * if the protocol is supported. */ #if 0 if_printf(sc->vtnet_ifp, "%s: checksum offload of unsupported protocol " "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n", __func__, etype, protocol, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } static int vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { const struct ether_header *eh; int hoff; uint16_t etype; eh = mtod(m, const struct ether_header *); etype = ntohs(eh->ether_type); if (etype == ETHERTYPE_VLAN) { /* TODO BMV: Handle QinQ. */ const struct ether_vlan_header *evh = mtod(m, const struct ether_vlan_header *); etype = ntohs(evh->evl_proto); hoff = sizeof(struct ether_vlan_header); } else hoff = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr)); else /* VIRTIO_NET_HDR_F_DATA_VALID */ return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr)); } static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs) { struct mbuf *m; while (--nbufs > 0) { m = virtqueue_dequeue(rxq->vtnrx_vq, NULL); if (m == NULL) break; vtnet_rxq_discard_buf(rxq, m); } } static void vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) { int error __diagused; /* * Requeue the discarded mbuf. This should always be successful * since it was just dequeued. */ error = vtnet_rxq_enqueue_buf(rxq, m); KASSERT(error == 0, ("%s: cannot requeue discarded mbuf %d", __func__, error)); } static int vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m_tail; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; m_tail = m_head; while (--nbufs > 0) { struct mbuf *m; uint32_t len; m = virtqueue_dequeue(vq, &len); if (m == NULL) { rxq->vtnrx_stats.vrxs_ierrors++; goto fail; } if (vtnet_rxq_new_buf(rxq) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); goto fail; } if (m->m_len < len) len = m->m_len; m->m_len = len; m->m_flags &= ~M_PKTHDR; m_head->m_pkthdr.len += len; m_tail->m_next = m; m_tail = m; } return (0); fail: sc->vtnet_stats.rx_mergeable_failed++; m_freem(m_head); return (1); } #if defined(INET) || defined(INET6) static int vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m) { struct lro_ctrl *lro; lro = &rxq->vtnrx_lro; if (lro->lro_mbuf_max != 0) { tcp_lro_queue_mbuf(lro, m); return (0); } return (tcp_lro_rx(lro, m, 0)); } #endif static void vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; if_t ifp; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { struct ether_header *eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { vtnet_vlan_tag_remove(m); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; } } m->m_pkthdr.flowid = rxq->vtnrx_id; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { if (vtnet_rxq_csum(rxq, m, hdr) == 0) rxq->vtnrx_stats.vrxs_csum++; else rxq->vtnrx_stats.vrxs_csum_failed++; } if (hdr->gso_size != 0) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV6: m->m_pkthdr.lro_nsegs = howmany(m->m_pkthdr.len, hdr->gso_size); rxq->vtnrx_stats.vrxs_host_lro++; break; } } rxq->vtnrx_stats.vrxs_ipackets++; rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; #if defined(INET) || defined(INET6) if (vtnet_software_lro(sc) && if_getcapenable(ifp) & IFCAP_LRO) { if (vtnet_lro_rx(rxq, m) == 0) return; } #endif if_input(ifp, m); } static int vtnet_rxq_eof(struct vtnet_rxq *rxq) { struct virtio_net_hdr lhdr, *hdr; struct vtnet_softc *sc; if_t ifp; struct virtqueue *vq; int deq, count; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; ifp = sc->vtnet_ifp; deq = 0; count = sc->vtnet_rx_process_limit; VTNET_RXQ_LOCK_ASSERT(rxq); CURVNET_SET_QUIET(if_getvnet(ifp)); while (count-- > 0) { struct mbuf *m; uint32_t len, nbufs, adjsz; m = virtqueue_dequeue(vq, &len); if (m == NULL) break; deq++; if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { rxq->vtnrx_stats.vrxs_ierrors++; vtnet_rxq_discard_buf(rxq, m); continue; } if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) { struct virtio_net_hdr_mrg_rxbuf *mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); kmsan_mark(mhdr, sizeof(*mhdr), KMSAN_STATE_INITED); nbufs = vtnet_htog16(sc, mhdr->num_buffers); adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else if (vtnet_modern(sc)) { nbufs = 1; /* num_buffers is always 1 */ adjsz = sizeof(struct virtio_net_hdr_v1); } else { nbufs = 1; adjsz = sizeof(struct vtnet_rx_header); /* * Account for our gap between the header and start of * data to keep the segments separated. */ len += VTNET_RX_HEADER_PAD; } if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); continue; } m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_flags = 0; if (nbufs > 1) { /* Dequeue the rest of chain. */ if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0) continue; } kmsan_mark_mbuf(m, KMSAN_STATE_INITED); /* * Save an endian swapped version of the header prior to it * being stripped. The header is always at the start of the * mbuf data. num_buffers was already saved (and not needed) * so use the standard header. */ hdr = mtod(m, struct virtio_net_hdr *); lhdr.flags = hdr->flags; lhdr.gso_type = hdr->gso_type; lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len); lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size); lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start); lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset); m_adj(m, adjsz); if (PFIL_HOOKED_IN(sc->vtnet_pfil)) { pfil_return_t pfil; pfil = pfil_mbuf_in(sc->vtnet_pfil, &m, ifp, NULL); switch (pfil) { case PFIL_DROPPED: case PFIL_CONSUMED: continue; default: KASSERT(pfil == PFIL_PASS, ("Filter returned %d!", pfil)); } } vtnet_rxq_input(rxq, m, &lhdr); } if (deq > 0) { #if defined(INET) || defined(INET6) if (vtnet_software_lro(sc)) tcp_lro_flush_all(&rxq->vtnrx_lro); #endif virtqueue_notify(vq); } CURVNET_RESTORE(); return (count > 0 ? 0 : EAGAIN); } static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries) { struct vtnet_softc *sc; if_t ifp; u_int more; #ifdef DEV_NETMAP int nmirq; #endif /* DEV_NETMAP */ sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_rxq_disable_intr(rxq); return; } VTNET_RXQ_LOCK(rxq); #ifdef DEV_NETMAP /* * We call netmap_rx_irq() under lock to prevent concurrent calls. * This is not necessary to serialize the access to the RX vq, but * rather to avoid races that may happen if this interface is * attached to a VALE switch, which would cause received packets * to stall in the RX queue (nm_kr_tryget() could find the kring * busy when called from netmap_bwrap_intr_notify()). */ nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more); if (nmirq != NM_IRQ_PASS) { VTNET_RXQ_UNLOCK(rxq); if (nmirq == NM_IRQ_RESCHED) { taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } return; } #endif /* DEV_NETMAP */ again: if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); /* * This is an occasional condition or race (when !more), * so retry a few times before scheduling the taskqueue. */ if (tries-- > 0) goto again; rxq->vtnrx_stats.vrxs_rescheduled++; VTNET_RXQ_UNLOCK(rxq); taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } else VTNET_RXQ_UNLOCK(rxq); } static void vtnet_rx_vq_intr(void *xrxq) { struct vtnet_rxq *rxq; rxq = xrxq; vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES); } static void vtnet_rxq_tq_intr(void *xrxq, int pending __unused) { struct vtnet_rxq *rxq; rxq = xrxq; vtnet_rx_vq_process(rxq, 0); } static int vtnet_txq_intr_threshold(struct vtnet_txq *txq) { struct vtnet_softc *sc; int threshold; sc = txq->vtntx_sc; /* * The Tx interrupt is disabled until the queue free count falls * below our threshold. Completed frames are drained from the Tx * virtqueue before transmitting new frames and in the watchdog * callout, so the frequency of Tx interrupts is greatly reduced, * at the cost of not freeing mbufs as quickly as they otherwise * would be. */ threshold = virtqueue_size(txq->vtntx_vq) / 4; /* * Without indirect descriptors, leave enough room for the most * segments we handle. */ if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && threshold < sc->vtnet_tx_nsegs) threshold = sc->vtnet_tx_nsegs; return (threshold); } static int vtnet_txq_below_threshold(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold); } static int vtnet_txq_notify(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; txq->vtntx_watchdog = VTNET_TX_TIMEOUT; virtqueue_notify(vq); if (vtnet_txq_enable_intr(txq) == 0) return (0); /* * Drain frames that were completed since last checked. If this * causes the queue to go above the threshold, the caller should * continue transmitting. */ if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) { virtqueue_disable_intr(vq); return (1); } return (0); } static void vtnet_txq_free_mbufs(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; int last; #ifdef DEV_NETMAP struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp), txq->vtntx_id, NR_TX); #else /* !DEV_NETMAP */ void *kring = NULL; #endif /* !DEV_NETMAP */ vq = txq->vtntx_vq; last = 0; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { if (kring == NULL) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } } KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in tx queue %p", __func__, txq)); } /* * BMV: This can go away once we finally have offsets in the mbuf header. */ static int vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, int *proto, int *start) { struct vtnet_softc *sc; struct ether_vlan_header *evh; #if defined(INET) || defined(INET6) int offset; #endif sc = txq->vtntx_sc; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); #if defined(INET) || defined(INET6) offset = sizeof(struct ether_vlan_header); #endif } else { *etype = ntohs(evh->evl_encap_proto); #if defined(INET) || defined(INET6) offset = sizeof(struct ether_header); #endif } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: sc->vtnet_stats.tx_csum_unknown_ethtype++; return (EINVAL); } return (0); } static int vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, int offset, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct vtnet_softc *sc; struct tcphdr *tcp, tcphdr; sc = txq->vtntx_sc; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2)); hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz); hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (__predict_false(tcp->th_flags & TH_CWR)) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In * FreeBSD, ECN support is not on a per-interface basis, * but globally via the net.inet.tcp.ecn.enable sysctl * knob. The default is off. */ if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(sc->vtnet_ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } txq->vtntx_stats.vtxs_tso++; return (0); } static struct mbuf * vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int flags, etype, csum_start, proto, error; sc = txq->vtntx_sc; flags = m->m_pkthdr.csum_flags; error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start); if (error) goto drop; if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) { /* Sanity check the parsed mbuf matches the offload flags. */ if (__predict_false((flags & VTNET_CSUM_OFFLOAD && etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6 && etype != ETHERTYPE_IPV6))) { sc->vtnet_stats.tx_csum_proto_mismatch++; goto drop; } hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = vtnet_gtoh16(sc, csum_start); hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data); txq->vtntx_stats.vtxs_csum++; } if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { /* * Sanity check the parsed mbuf IP protocol is TCP, and * VirtIO TSO reqires the checksum offloading above. */ if (__predict_false(proto != IPPROTO_TCP)) { sc->vtnet_stats.tx_tso_not_tcp++; goto drop; } else if (__predict_false((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) { sc->vtnet_stats.tx_tso_without_csum++; goto drop; } error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static int vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, struct vtnet_tx_header *txhdr) { struct vtnet_softc *sc; struct virtqueue *vq; struct sglist *sg; struct mbuf *m; int error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; sg = txq->vtntx_sg; m = *m_head; sglist_reset(sg); error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); if (error != 0 || sg->sg_nseg != 1) { KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d", __func__, error, sg->sg_nseg)); goto fail; } error = sglist_append_mbuf(sg, m); if (error) { m = m_defrag(m, M_NOWAIT); if (m == NULL) goto fail; *m_head = m; sc->vtnet_stats.tx_defragged++; error = sglist_append_mbuf(sg, m); if (error) goto fail; } txhdr->vth_mbuf = m; error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); return (error); fail: sc->vtnet_stats.tx_defrag_failed++; m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } static int vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags) { struct vtnet_tx_header *txhdr; struct virtio_net_hdr *hdr; struct mbuf *m; int error; m = *m_head; M_ASSERTPKTHDR(m); txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO); if (txhdr == NULL) { m_freem(m); *m_head = NULL; return (ENOMEM); } /* * Always use the non-mergeable header, regardless if mergable headers * were negotiated, because for transmit num_buffers is always zero. * The vtnet_hdr_size is used to enqueue the right header size segment. */ hdr = &txhdr->vth_uhdr.hdr; if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } m->m_flags &= ~M_VLANTAG; } if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) { m = vtnet_txq_offload(txq, m, hdr); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } } error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); fail: if (error) uma_zfree(vtnet_tx_header_zone, txhdr); return (error); } #ifdef VTNET_LEGACY_TX static void vtnet_start_locked(struct vtnet_txq *txq, if_t ifp) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m0; int tries, enq; sc = txq->vtntx_sc; vq = txq->vtntx_vq; tries = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || sc->vtnet_link_active == 0) return; vtnet_txq_eof(txq); again: enq = 0; while (!if_sendq_empty(ifp)) { if (virtqueue_full(vq)) break; m0 = if_dequeue(ifp); if (m0 == NULL) break; if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) { if (m0 != NULL) if_sendq_prepend(ifp, m0); break; } enq++; ETHER_BPF_MTAP(ifp, m0); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } } static void vtnet_start(if_t ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; sc = if_getsoftc(ifp); txq = &sc->vtnet_txqs[0]; VTNET_TXQ_LOCK(txq); vtnet_start_locked(txq, ifp); VTNET_TXQ_UNLOCK(txq); } #else /* !VTNET_LEGACY_TX */ static int vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m) { struct vtnet_softc *sc; struct virtqueue *vq; struct buf_ring *br; if_t ifp; int enq, tries, error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; br = txq->vtntx_br; ifp = sc->vtnet_ifp; tries = 0; error = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || sc->vtnet_link_active == 0) { if (m != NULL) error = drbr_enqueue(ifp, br, m); return (error); } if (m != NULL) { error = drbr_enqueue(ifp, br, m); if (error) return (error); } vtnet_txq_eof(txq); again: enq = 0; while ((m = drbr_peek(ifp, br)) != NULL) { if (virtqueue_full(vq)) { drbr_putback(ifp, br, m); break; } if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) { if (m != NULL) drbr_putback(ifp, br, m); else drbr_advance(ifp, br); break; } drbr_advance(ifp, br); enq++; ETHER_BPF_MTAP(ifp, m); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } return (0); } static int vtnet_txq_mq_start(if_t ifp, struct mbuf *m) { struct vtnet_softc *sc; struct vtnet_txq *txq; int i, npairs, error; sc = if_getsoftc(ifp); npairs = sc->vtnet_act_vq_pairs; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % npairs; else i = curcpu % npairs; txq = &sc->vtnet_txqs[i]; if (VTNET_TXQ_TRYLOCK(txq) != 0) { error = vtnet_txq_mq_start_locked(txq, m); VTNET_TXQ_UNLOCK(txq); } else { error = drbr_enqueue(ifp, txq->vtntx_br, m); taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask); } return (error); } static void vtnet_txq_tq_deferred(void *xtxq, int pending __unused) { struct vtnet_softc *sc; struct vtnet_txq *txq; txq = xtxq; sc = txq->vtntx_sc; VTNET_TXQ_LOCK(txq); if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); VTNET_TXQ_UNLOCK(txq); } #endif /* VTNET_LEGACY_TX */ static void vtnet_txq_start(struct vtnet_txq *txq) { struct vtnet_softc *sc; if_t ifp; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; #ifdef VTNET_LEGACY_TX if (!if_sendq_empty(ifp)) vtnet_start_locked(txq, ifp); #else if (!drbr_empty(ifp, txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); #endif } static void vtnet_txq_tq_intr(void *xtxq, int pending __unused) { struct vtnet_softc *sc; struct vtnet_txq *txq; if_t ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static int vtnet_txq_eof(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; struct mbuf *m; int deq; vq = txq->vtntx_vq; deq = 0; VTNET_TXQ_LOCK_ASSERT(txq); while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { m = txhdr->vth_mbuf; deq++; txq->vtntx_stats.vtxs_opackets++; txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) txq->vtntx_stats.vtxs_omcasts++; m_freem(m); uma_zfree(vtnet_tx_header_zone, txhdr); } if (virtqueue_empty(vq)) txq->vtntx_watchdog = 0; return (deq); } static void vtnet_tx_vq_intr(void *xtxq) { struct vtnet_softc *sc; struct vtnet_txq *txq; if_t ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_txq_disable_intr(txq); return; } #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS) return; #endif /* DEV_NETMAP */ VTNET_TXQ_LOCK(txq); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static void vtnet_tx_start_all(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } } #ifndef VTNET_LEGACY_TX static void vtnet_qflush(if_t ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct mbuf *m; int i; sc = if_getsoftc(ifp); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL) m_freem(m); VTNET_TXQ_UNLOCK(txq); } if_qflush(ifp); } #endif static int vtnet_watchdog(struct vtnet_txq *txq) { if_t ifp; ifp = txq->vtntx_sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if (txq->vtntx_watchdog == 1) { /* * Only drain completed frames if the watchdog is about to * expire. If any frames were drained, there may be enough * free descriptors now available to transmit queued frames. * In that case, the timer will immediately be decremented * below, but the timeout is generous enough that should not * be a problem. */ if (vtnet_txq_eof(txq) != 0) vtnet_txq_start(txq); } if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) { VTNET_TXQ_UNLOCK(txq); return (0); } VTNET_TXQ_UNLOCK(txq); if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id); return (1); } static void vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc, struct vtnet_txq_stats *txacc) { bzero(rxacc, sizeof(struct vtnet_rxq_stats)); bzero(txacc, sizeof(struct vtnet_txq_stats)); for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq_stats *rxst; struct vtnet_txq_stats *txst; rxst = &sc->vtnet_rxqs[i].vtnrx_stats; rxacc->vrxs_ipackets += rxst->vrxs_ipackets; rxacc->vrxs_ibytes += rxst->vrxs_ibytes; rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops; rxacc->vrxs_csum += rxst->vrxs_csum; rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed; rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled; txst = &sc->vtnet_txqs[i].vtntx_stats; txacc->vtxs_opackets += txst->vtxs_opackets; txacc->vtxs_obytes += txst->vtxs_obytes; txacc->vtxs_csum += txst->vtxs_csum; txacc->vtxs_tso += txst->vtxs_tso; txacc->vtxs_rescheduled += txst->vtxs_rescheduled; } } static uint64_t vtnet_get_counter(if_t ifp, ift_counter cnt) { struct vtnet_softc *sc; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; sc = if_getsoftc(ifp); vtnet_accum_stats(sc, &rxaccum, &txaccum); switch (cnt) { case IFCOUNTER_IPACKETS: return (rxaccum.vrxs_ipackets); case IFCOUNTER_IQDROPS: return (rxaccum.vrxs_iqdrops); case IFCOUNTER_IERRORS: return (rxaccum.vrxs_ierrors); case IFCOUNTER_OPACKETS: return (txaccum.vtxs_opackets); #ifndef VTNET_LEGACY_TX case IFCOUNTER_OBYTES: return (txaccum.vtxs_obytes); case IFCOUNTER_OMCASTS: return (txaccum.vtxs_omcasts); #endif default: return (if_get_counter_default(ifp, cnt)); } } static void vtnet_tick(void *xsc) { struct vtnet_softc *sc; if_t ifp; int i, timedout; sc = xsc; ifp = sc->vtnet_ifp; timedout = 0; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]); if (timedout != 0) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); vtnet_init_locked(sc, 0); } else callout_schedule(&sc->vtnet_tick_ch, hz); } static void vtnet_start_taskqueues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, error; dev = sc->vtnet_dev; /* * Errors here are very difficult to recover from - we cannot * easily fail because, if this is during boot, we will hang * when freeing any successfully started taskqueues because * the scheduler isn't up yet. * * Most drivers just ignore the return value - it only fails * with ENOMEM so an error is not likely. */ for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); if (error) { device_printf(dev, "failed to start rx taskq %d\n", rxq->vtnrx_id); } txq = &sc->vtnet_txqs[i]; error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET, "%s txq %d", device_get_nameunit(dev), txq->vtntx_id); if (error) { device_printf(dev, "failed to start tx taskq %d\n", txq->vtntx_id); } } } static void vtnet_free_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) { taskqueue_free(rxq->vtnrx_tq); rxq->vtnrx_tq = NULL; } txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_free(txq->vtntx_tq); txq->vtntx_tq = NULL; } } } static void vtnet_drain_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask); #ifndef VTNET_LEGACY_TX taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask); #endif } } } static void vtnet_drain_rxtx_queues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; vtnet_rxq_free_mbufs(rxq); txq = &sc->vtnet_txqs[i]; vtnet_txq_free_mbufs(txq); } } static void vtnet_stop_rendezvous(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; VTNET_CORE_LOCK_ASSERT(sc); /* * Lock and unlock the per-queue mutex so we known the stop * state is visible. Doing only the active queues should be * sufficient, but it does not cost much extra to do all the * queues. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; VTNET_RXQ_LOCK(rxq); VTNET_RXQ_UNLOCK(rxq); txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); VTNET_TXQ_UNLOCK(txq); } } static void vtnet_stop(struct vtnet_softc *sc) { device_t dev; if_t ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); sc->vtnet_link_active = 0; callout_stop(&sc->vtnet_tick_ch); /* Only advisory. */ vtnet_disable_interrupts(sc); #ifdef DEV_NETMAP /* Stop any pending txsync/rxsync and disable them. */ netmap_disable_all_rings(ifp); #endif /* DEV_NETMAP */ /* * Stop the host adapter. This resets it to the pre-initialized * state. It will not generate any interrupts until after it is * reinitialized. */ virtio_stop(dev); vtnet_stop_rendezvous(sc); vtnet_drain_rxtx_queues(sc); sc->vtnet_act_vq_pairs = 1; } static int vtnet_virtio_reinit(struct vtnet_softc *sc) { device_t dev; if_t ifp; uint64_t features; int error; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; features = sc->vtnet_negotiated_features; /* * Re-negotiate with the host, removing any disabled receive * features. Transmit features are disabled only on our side * via if_capenable and if_hwassist. */ if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0) features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES); if ((if_getcapenable(ifp) & IFCAP_LRO) == 0) features &= ~VTNET_LRO_FEATURES; if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) features &= ~VIRTIO_NET_F_CTRL_VLAN; error = virtio_reinit(dev, features); if (error) { device_printf(dev, "virtio reinit error %d\n", error); return (error); } sc->vtnet_features = features; virtio_reinit_complete(dev); return (0); } static void vtnet_init_rx_filters(struct vtnet_softc *sc) { if_t ifp; ifp = sc->vtnet_ifp; if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { vtnet_rx_filter(sc); vtnet_rx_filter_mac(sc); } if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) vtnet_rx_filter_vlan(sc); } static int vtnet_init_rx_queues(struct vtnet_softc *sc) { device_t dev; if_t ifp; struct vtnet_rxq *rxq; int i, clustersz, error; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; clustersz = vtnet_rx_cluster_size(sc, if_getmtu(ifp)); sc->vtnet_rx_clustersz = clustersz; if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) { sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, clustersz); KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, ("%s: too many rx mbufs %d for %d segments", __func__, sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); } else sc->vtnet_rx_nmbufs = 1; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; /* Hold the lock to satisfy asserts. */ VTNET_RXQ_LOCK(rxq); error = vtnet_rxq_populate(rxq); VTNET_RXQ_UNLOCK(rxq); if (error) { device_printf(dev, "cannot populate Rx queue %d\n", i); return (error); } } return (0); } static int vtnet_init_tx_queues(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; txq->vtntx_watchdog = 0; txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq); #ifdef DEV_NETMAP netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0); #endif /* DEV_NETMAP */ } return (0); } static int vtnet_init_rxtx_queues(struct vtnet_softc *sc) { int error; error = vtnet_init_rx_queues(sc); if (error) return (error); error = vtnet_init_tx_queues(sc); if (error) return (error); return (0); } static void vtnet_set_active_vq_pairs(struct vtnet_softc *sc) { device_t dev; int npairs; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) { sc->vtnet_act_vq_pairs = 1; return; } npairs = sc->vtnet_req_vq_pairs; if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { device_printf(dev, "cannot set active queue pairs to %d, " "falling back to 1 queue pair\n", npairs); npairs = 1; } sc->vtnet_act_vq_pairs = npairs; } static void vtnet_update_rx_offloads(struct vtnet_softc *sc) { if_t ifp; uint64_t features; int error; ifp = sc->vtnet_ifp; features = sc->vtnet_features; VTNET_CORE_LOCK_ASSERT(sc); if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) features |= VIRTIO_NET_F_GUEST_CSUM; else features &= ~VIRTIO_NET_F_GUEST_CSUM; } if (if_getcapabilities(ifp) & IFCAP_LRO && !vtnet_software_lro(sc)) { if (if_getcapenable(ifp) & IFCAP_LRO) features |= VTNET_LRO_FEATURES; else features &= ~VTNET_LRO_FEATURES; } error = vtnet_ctrl_guest_offloads(sc, features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN | VIRTIO_NET_F_GUEST_UFO)); if (error) { device_printf(sc->vtnet_dev, "%s: cannot update Rx features\n", __func__); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); vtnet_init_locked(sc, 0); } } else sc->vtnet_features = features; } static int vtnet_reinit(struct vtnet_softc *sc) { if_t ifp; int error; ifp = sc->vtnet_ifp; bcopy(if_getlladdr(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); error = vtnet_virtio_reinit(sc); if (error) return (error); vtnet_set_macaddr(sc); vtnet_set_active_vq_pairs(sc); if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) vtnet_init_rx_filters(sc); if_sethwassist(ifp, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD_IPV6, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); error = vtnet_init_rxtx_queues(sc); if (error) return (error); return (0); } static void vtnet_init_locked(struct vtnet_softc *sc, int init_mode) { if_t ifp; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; vtnet_stop(sc); #ifdef DEV_NETMAP /* Once stopped we can update the netmap flags, if necessary. */ switch (init_mode) { case VTNET_INIT_NETMAP_ENTER: nm_set_native_flags(NA(ifp)); break; case VTNET_INIT_NETMAP_EXIT: nm_clear_native_flags(NA(ifp)); break; } #endif /* DEV_NETMAP */ if (vtnet_reinit(sc) != 0) { vtnet_stop(sc); return; } if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); vtnet_update_link_status(sc); vtnet_enable_interrupts(sc); callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); #ifdef DEV_NETMAP /* Re-enable txsync/rxsync. */ netmap_enable_all_rings(ifp); #endif /* DEV_NETMAP */ } static void vtnet_init(void *xsc) { struct vtnet_softc *sc; sc = xsc; VTNET_CORE_LOCK(sc); vtnet_init_locked(sc, 0); VTNET_CORE_UNLOCK(sc); } static void vtnet_free_ctrl_vq(struct vtnet_softc *sc) { /* * The control virtqueue is only polled and therefore it should * already be empty. */ KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq)); } static void vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, struct sglist *sg, int readable, int writable) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ); VTNET_CORE_LOCK_ASSERT(sc); if (!virtqueue_empty(vq)) return; /* * Poll for the response, but the command is likely completed before * returning from the notify. */ if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) { virtqueue_notify(vq); virtqueue_poll(vq, NULL); } } static int vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr __aligned(2); uint8_t pad1; uint8_t addr[ETHER_ADDR_LEN] __aligned(8); uint8_t pad2; uint8_t ack; } s; int error; error = 0; MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC); s.hdr.class = VIRTIO_NET_CTRL_MAC; s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN); s.ack = VIRTIO_NET_ERR; sglist_init(&sg, nitems(segs), segs); error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); MPASS(error == 0 && sg.sg_nseg == nitems(segs)); if (error == 0) vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr __aligned(2); uint8_t pad1; uint64_t offloads __aligned(8); uint8_t pad2; uint8_t ack; } s; int error; error = 0; MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS; s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET; s.offloads = vtnet_gtoh64(sc, offloads); s.ack = VIRTIO_NET_ERR; sglist_init(&sg, nitems(segs), segs); error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); MPASS(error == 0 && sg.sg_nseg == nitems(segs)); if (error == 0) vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr __aligned(2); uint8_t pad1; struct virtio_net_ctrl_mq mq __aligned(2); uint8_t pad2; uint8_t ack; } s; int error; error = 0; MPASS(sc->vtnet_flags & VTNET_FLAG_MQ); s.hdr.class = VIRTIO_NET_CTRL_MQ; s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs); s.ack = VIRTIO_NET_ERR; sglist_init(&sg, nitems(segs), segs); error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); MPASS(error == 0 && sg.sg_nseg == nitems(segs)); if (error == 0) vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, bool on) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr __aligned(2); uint8_t pad1; uint8_t onoff; uint8_t pad2; uint8_t ack; } s; int error; error = 0; MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); s.hdr.class = VIRTIO_NET_CTRL_RX; s.hdr.cmd = cmd; s.onoff = on; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, nitems(segs), segs); error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); MPASS(error == 0 && sg.sg_nseg == nitems(segs)); if (error == 0) vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_set_promisc(struct vtnet_softc *sc, bool on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); } static int vtnet_set_allmulti(struct vtnet_softc *sc, bool on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); } static void vtnet_rx_filter(struct vtnet_softc *sc) { device_t dev; if_t ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (vtnet_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC) != 0) { device_printf(dev, "cannot %s promiscuous mode\n", if_getflags(ifp) & IFF_PROMISC ? "enable" : "disable"); } if (vtnet_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI) != 0) { device_printf(dev, "cannot %s all-multicast mode\n", if_getflags(ifp) & IFF_ALLMULTI ? "enable" : "disable"); } } static u_int vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt) { struct vtnet_softc *sc = arg; if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) return (0); if (ucnt < VTNET_MAX_MAC_ENTRIES) bcopy(LLADDR(sdl), &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); return (1); } static u_int vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { struct vtnet_mac_filter *filter = arg; if (mcnt < VTNET_MAX_MAC_ENTRIES) bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); return (1); } static void vtnet_rx_filter_mac(struct vtnet_softc *sc) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct vtnet_mac_filter *filter; struct sglist_seg segs[4]; struct sglist sg; if_t ifp; bool promisc, allmulti; u_int ucnt, mcnt; int error; uint8_t ack; ifp = sc->vtnet_ifp; filter = sc->vtnet_mac_filter; error = 0; MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); VTNET_CORE_LOCK_ASSERT(sc); /* Unicast MAC addresses: */ ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc); promisc = (ucnt > VTNET_MAX_MAC_ENTRIES); if (promisc) { ucnt = 0; if_printf(ifp, "more than %d MAC addresses assigned, " "falling back to promiscuous mode\n", VTNET_MAX_MAC_ENTRIES); } /* Multicast MAC addresses: */ mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter); allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES); if (allmulti) { mcnt = 0; if_printf(ifp, "more than %d multicast MAC addresses " "assigned, falling back to all-multicast mode\n", VTNET_MAX_MAC_ENTRIES); } if (promisc && allmulti) goto out; filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt); filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt); hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, nitems(segs), segs); error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &filter->vmf_unicast, sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN); error |= sglist_append(&sg, &filter->vmf_multicast, sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); MPASS(error == 0 && sg.sg_nseg == nitems(segs)); if (error == 0) vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); if (ack != VIRTIO_NET_OK) if_printf(ifp, "error setting host MAC filter table\n"); out: if (promisc && vtnet_set_promisc(sc, true) != 0) if_printf(ifp, "cannot enable promiscuous mode\n"); if (allmulti && vtnet_set_allmulti(sc, true) != 0) if_printf(ifp, "cannot enable all-multicast mode\n"); } static int vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr __aligned(2); uint8_t pad1; uint16_t tag __aligned(2); uint8_t pad2; uint8_t ack; } s; int error; error = 0; MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); s.hdr.class = VIRTIO_NET_CTRL_VLAN; s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; s.tag = vtnet_gtoh16(sc, tag); s.ack = VIRTIO_NET_ERR; sglist_init(&sg, nitems(segs), segs); error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); MPASS(error == 0 && sg.sg_nseg == nitems(segs)); if (error == 0) vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static void vtnet_rx_filter_vlan(struct vtnet_softc *sc) { int i, bit; uint32_t w; uint16_t tag; MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); VTNET_CORE_LOCK_ASSERT(sc); /* Enable the filter for each configured VLAN. */ for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { w = sc->vtnet_vlan_filter[i]; while ((bit = ffs(w) - 1) != -1) { w &= ~(1 << bit); tag = sizeof(w) * CHAR_BIT * i + bit; if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { device_printf(sc->vtnet_dev, "cannot enable VLAN %d filter\n", tag); } } } } static void vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { if_t ifp; int idx, bit; ifp = sc->vtnet_ifp; idx = (tag >> 5) & 0x7F; bit = tag & 0x1F; if (tag == 0 || tag > 4095) return; VTNET_CORE_LOCK(sc); if (add) sc->vtnet_vlan_filter[idx] |= (1 << bit); else sc->vtnet_vlan_filter[idx] &= ~(1 << bit); if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER && if_getdrvflags(ifp) & IFF_DRV_RUNNING && vtnet_exec_vlan_filter(sc, add, tag) != 0) { device_printf(sc->vtnet_dev, "cannot %s VLAN %d %s the host filter table\n", add ? "add" : "remove", tag, add ? "to" : "from"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_register_vlan(void *arg, if_t ifp, uint16_t tag) { if (if_getsoftc(ifp) != arg) return; vtnet_update_vlan_filter(arg, 1, tag); } static void vtnet_unregister_vlan(void *arg, if_t ifp, uint16_t tag) { if (if_getsoftc(ifp) != arg) return; vtnet_update_vlan_filter(arg, 0, tag); } static void vtnet_update_speed_duplex(struct vtnet_softc *sc) { if_t ifp; uint32_t speed; ifp = sc->vtnet_ifp; if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0) return; /* BMV: Ignore duplex. */ speed = virtio_read_dev_config_4(sc->vtnet_dev, offsetof(struct virtio_net_config, speed)); if (speed != UINT32_MAX) if_setbaudrate(ifp, IF_Mbps(speed)); } static int vtnet_is_link_up(struct vtnet_softc *sc) { uint16_t status; if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0) return (1); status = virtio_read_dev_config_2(sc->vtnet_dev, offsetof(struct virtio_net_config, status)); return ((status & VIRTIO_NET_S_LINK_UP) != 0); } static void vtnet_update_link_status(struct vtnet_softc *sc) { if_t ifp; int link; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); link = vtnet_is_link_up(sc); /* Notify if the link status has changed. */ if (link != 0 && sc->vtnet_link_active == 0) { vtnet_update_speed_duplex(sc); sc->vtnet_link_active = 1; if_link_state_change(ifp, LINK_STATE_UP); } else if (link == 0 && sc->vtnet_link_active != 0) { sc->vtnet_link_active = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } } static int vtnet_ifmedia_upd(if_t ifp __unused) { return (EOPNOTSUPP); } static void vtnet_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct vtnet_softc *sc; sc = if_getsoftc(ifp); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; VTNET_CORE_LOCK(sc); if (vtnet_is_link_up(sc) != 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_10G_T | IFM_FDX; } else ifmr->ifm_active |= IFM_NONE; VTNET_CORE_UNLOCK(sc); } static void vtnet_get_macaddr(struct vtnet_softc *sc) { if (sc->vtnet_flags & VTNET_FLAG_MAC) { virtio_read_device_config_array(sc->vtnet_dev, offsetof(struct virtio_net_config, mac), &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN); } else { /* Generate a random locally administered unicast address. */ sc->vtnet_hwaddr[0] = 0xB2; arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); } } static void vtnet_set_macaddr(struct vtnet_softc *sc) { device_t dev; int error; dev = sc->vtnet_dev; if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr); if (error) device_printf(dev, "unable to set MAC address\n"); return; } /* MAC in config is read-only in modern VirtIO. */ if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) { for (int i = 0; i < ETHER_ADDR_LEN; i++) { virtio_write_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i, sc->vtnet_hwaddr[i]); } } } static void vtnet_attached_set_macaddr(struct vtnet_softc *sc) { /* Assign MAC address if it was generated. */ if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) vtnet_set_macaddr(sc); } static void vtnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static void vtnet_set_rx_process_limit(struct vtnet_softc *sc) { int limit; limit = vtnet_tunable_int(sc, "rx_process_limit", vtnet_rx_process_limit); if (limit < 0) limit = INT_MAX; sc->vtnet_rx_process_limit = limit; } static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_rxq *rxq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_rxq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue"); list = SYSCTL_CHILDREN(node); stats = &rxq->vtnrx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, &stats->vrxs_ipackets, "Receive packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, &stats->vrxs_ibytes, "Receive bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, &stats->vrxs_iqdrops, "Receive drops"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, &stats->vrxs_ierrors, "Receive errors"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vrxs_csum, "Receive checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, &stats->vrxs_csum_failed, "Receive checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD, &stats->vrxs_host_lro, "Receive host segmentation offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vrxs_rescheduled, "Receive interrupt handler rescheduled"); } static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_txq *txq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_txq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue"); list = SYSCTL_CHILDREN(node); stats = &txq->vtntx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, &stats->vtxs_opackets, "Transmit packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, &stats->vtxs_obytes, "Transmit bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, &stats->vtxs_omcasts, "Transmit multicasts"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vtxs_csum, "Transmit checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, &stats->vtxs_tso, "Transmit TCP segmentation offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vtxs_rescheduled, "Transmit interrupt handler rescheduled"); } static void vtnet_setup_queue_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; int i; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); } } static void vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_softc *sc) { struct vtnet_statistics *stats; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; vtnet_accum_stats(sc, &rxaccum, &txaccum); stats = &sc->vtnet_stats; stats->rx_csum_offloaded = rxaccum.vrxs_csum; stats->rx_csum_failed = rxaccum.vrxs_csum_failed; stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled; stats->tx_csum_offloaded = txaccum.vtxs_csum; stats->tx_tso_offloaded = txaccum.vtxs_tso; stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", CTLFLAG_RD, &stats->mbuf_alloc_failed, "Mbuf cluster allocation failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", CTLFLAG_RD, &stats->rx_frame_too_large, "Received frame larger than the mbuf chain"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", CTLFLAG_RD, &stats->rx_enq_replacement_failed, "Enqueuing the replacement receive mbuf failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", CTLFLAG_RD, &stats->rx_mergeable_failed, "Mergeable buffers receive failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", CTLFLAG_RD, &stats->rx_csum_bad_ethtype, "Received checksum offloaded buffer with unsupported " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", CTLFLAG_RD, &stats->rx_csum_bad_ipproto, "Received checksum offloaded buffer with incorrect IP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", CTLFLAG_RD, &stats->rx_csum_bad_offset, "Received checksum offloaded buffer with incorrect offset"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto", CTLFLAG_RD, &stats->rx_csum_bad_proto, "Received checksum offloaded buffer with incorrect protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", CTLFLAG_RD, &stats->rx_csum_failed, "Received buffer checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", CTLFLAG_RD, &stats->rx_csum_offloaded, "Received buffer checksum offload succeeded"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", CTLFLAG_RD, &stats->rx_task_rescheduled, "Times the receive interrupt task rescheduled itself"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype", CTLFLAG_RD, &stats->tx_csum_unknown_ethtype, "Aborted transmit of checksum offloaded buffer with unknown " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch", CTLFLAG_RD, &stats->tx_csum_proto_mismatch, "Aborted transmit of checksum offloaded buffer because mismatched " "protocols"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", CTLFLAG_RD, &stats->tx_tso_not_tcp, "Aborted transmit of TSO buffer with non TCP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum", CTLFLAG_RD, &stats->tx_tso_without_csum, "Aborted transmit of TSO buffer without TCP checksum offload"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", CTLFLAG_RD, &stats->tx_defragged, "Transmit mbufs defragged"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", CTLFLAG_RD, &stats->tx_defrag_failed, "Aborted transmit of buffer because defrag failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", CTLFLAG_RD, &stats->tx_csum_offloaded, "Offloaded checksum of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", CTLFLAG_RD, &stats->tx_tso_offloaded, "Segmentation offload of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", CTLFLAG_RD, &stats->tx_task_rescheduled, "Times the transmit interrupt task rescheduled itself"); } static void vtnet_setup_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, "Number of maximum supported virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs", CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0, "Number of requested virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, "Number of active virtqueue pairs"); vtnet_setup_stat_sysctl(ctx, child, sc); } static void vtnet_load_tunables(struct vtnet_softc *sc) { sc->vtnet_lro_entry_count = vtnet_tunable_int(sc, "lro_entry_count", vtnet_lro_entry_count); if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES) sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES; sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc, "lro_mbufq_depth", vtnet_lro_mbufq_depth); } static int vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) { return (virtqueue_enable_intr(rxq->vtnrx_vq)); } static void vtnet_rxq_disable_intr(struct vtnet_rxq *rxq) { virtqueue_disable_intr(rxq->vtnrx_vq); } static int vtnet_txq_enable_intr(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; if (vtnet_txq_below_threshold(txq) != 0) return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG)); /* * The free count is above our threshold. Keep the Tx interrupt * disabled until the queue is fuller. */ return (0); } static void vtnet_txq_disable_intr(struct vtnet_txq *txq) { virtqueue_disable_intr(txq->vtntx_vq); } static void vtnet_enable_rx_interrupts(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (vtnet_rxq_enable_intr(rxq) != 0) taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } } static void vtnet_enable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_enable_intr(&sc->vtnet_txqs[i]); } static void vtnet_enable_interrupts(struct vtnet_softc *sc) { vtnet_enable_rx_interrupts(sc); vtnet_enable_tx_interrupts(sc); } static void vtnet_disable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_disable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); } static void vtnet_disable_interrupts(struct vtnet_softc *sc) { vtnet_disable_rx_interrupts(sc); vtnet_disable_tx_interrupts(sc); } static int vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def) { char path[64]; snprintf(path, sizeof(path), "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob); TUNABLE_INT_FETCH(path, &def); return (def); } #ifdef DEBUGNET static void vtnet_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) { struct vtnet_softc *sc; sc = if_getsoftc(ifp); VTNET_CORE_LOCK(sc); *nrxr = sc->vtnet_req_vq_pairs; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = sc->vtnet_rx_clustersz; VTNET_CORE_UNLOCK(sc); } static void vtnet_debugnet_event(if_t ifp __unused, enum debugnet_ev event) { struct vtnet_softc *sc; static bool sw_lro_enabled = false; /* * Disable software LRO, since it would require entering the network * epoch when calling vtnet_txq_eof() in vtnet_debugnet_poll(). */ sc = if_getsoftc(ifp); switch (event) { case DEBUGNET_START: sw_lro_enabled = (sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0; if (sw_lro_enabled) sc->vtnet_flags &= ~VTNET_FLAG_SW_LRO; break; case DEBUGNET_END: if (sw_lro_enabled) sc->vtnet_flags |= VTNET_FLAG_SW_LRO; break; } } static int vtnet_debugnet_transmit(if_t ifp, struct mbuf *m) { struct vtnet_softc *sc; struct vtnet_txq *txq; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &sc->vtnet_txqs[0]; error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE); if (error == 0) (void)vtnet_txq_notify(txq); return (error); } static int vtnet_debugnet_poll(if_t ifp, int count) { struct vtnet_softc *sc; int i; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); (void)vtnet_txq_eof(&sc->vtnet_txqs[0]); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]); return (0); } #endif /* DEBUGNET */