Index: stable/12/sys/dev/netmap/if_ptnet.c =================================================================== --- stable/12/sys/dev/netmap/if_ptnet.c (revision 344045) +++ stable/12/sys/dev/netmap/if_ptnet.c (revision 344046) @@ -1,2320 +1,2303 @@ /*- * Copyright (c) 2016, Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* Driver for ptnet paravirtualized network device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #ifndef INET #error "INET not defined, cannot support offloadings" #endif #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t, ift_counter); #else typedef struct ifnet *if_t; #define if_getsoftc(_ifp) (_ifp)->if_softc #endif //#define PTNETMAP_STATS //#define DEBUG #ifdef DEBUG #define DBG(x) x #else /* !DEBUG */ #define DBG(x) #endif /* !DEBUG */ extern int ptnet_vnet_hdr; /* Tunable parameter */ struct ptnet_softc; struct ptnet_queue_stats { uint64_t packets; /* if_[io]packets */ uint64_t bytes; /* if_[io]bytes */ uint64_t errors; /* if_[io]errors */ uint64_t iqdrops; /* if_iqdrops */ uint64_t mcasts; /* if_[io]mcasts */ #ifdef PTNETMAP_STATS uint64_t intrs; uint64_t kicks; #endif /* PTNETMAP_STATS */ }; struct ptnet_queue { struct ptnet_softc *sc; struct resource *irq; void *cookie; int kring_id; struct nm_csb_atok *atok; struct nm_csb_ktoa *ktoa; unsigned int kick; struct mtx lock; struct buf_ring *bufring; /* for TX queues */ struct ptnet_queue_stats stats; #ifdef PTNETMAP_STATS struct ptnet_queue_stats last_stats; #endif /* PTNETMAP_STATS */ struct taskqueue *taskq; struct task task; char lock_name[16]; }; #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) struct ptnet_softc { device_t dev; if_t ifp; struct ifmedia media; struct mtx lock; char lock_name[16]; char hwaddr[ETHER_ADDR_LEN]; /* Mirror of PTFEAT register. */ uint32_t ptfeatures; unsigned int vnet_hdr_len; /* PCI BARs support. */ struct resource *iomem; struct resource *msix_mem; unsigned int num_rings; unsigned int num_tx_rings; struct ptnet_queue *queues; struct ptnet_queue *rxqueues; struct nm_csb_atok *csb_gh; struct nm_csb_ktoa *csb_hg; unsigned int min_tx_space; struct netmap_pt_guest_adapter *ptna; struct callout tick; #ifdef PTNETMAP_STATS struct timeval last_ts; #endif /* PTNETMAP_STATS */ }; #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) static int ptnet_probe(device_t); static int ptnet_attach(device_t); static int ptnet_detach(device_t); static int ptnet_suspend(device_t); static int ptnet_resume(device_t); static int ptnet_shutdown(device_t); static void ptnet_init(void *opaque); static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); static int ptnet_init_locked(struct ptnet_softc *sc); static int ptnet_stop(struct ptnet_softc *sc); static int ptnet_transmit(if_t ifp, struct mbuf *m); static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_qflush(if_t ifp); static void ptnet_tx_task(void *context, int pending); static int ptnet_media_change(if_t ifp); static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); #ifdef PTNETMAP_STATS static void ptnet_tick(void *opaque); #endif static int ptnet_irqs_init(struct ptnet_softc *sc); static void ptnet_irqs_fini(struct ptnet_softc *sc); static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info); static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); static int ptnet_nm_register(struct netmap_adapter *na, int onoff); static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); static void ptnet_tx_intr(void *opaque); static void ptnet_rx_intr(void *opaque); static unsigned ptnet_rx_discard(struct netmap_kring *kring, unsigned int head); static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_rx_task(void *context, int pending); #ifdef DEVICE_POLLING static poll_handler_t ptnet_poll; #endif static device_method_t ptnet_methods[] = { DEVMETHOD(device_probe, ptnet_probe), DEVMETHOD(device_attach, ptnet_attach), DEVMETHOD(device_detach, ptnet_detach), DEVMETHOD(device_suspend, ptnet_suspend), DEVMETHOD(device_resume, ptnet_resume), DEVMETHOD(device_shutdown, ptnet_shutdown), DEVMETHOD_END }; static driver_t ptnet_driver = { "ptnet", ptnet_methods, sizeof(struct ptnet_softc) }; /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ static devclass_t ptnet_devclass; DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, NULL, NULL, SI_ORDER_MIDDLE + 2); static int ptnet_probe(device_t dev) { if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { return (ENXIO); } device_set_desc(dev, "ptnet network adapter"); return (BUS_PROBE_DEFAULT); } static inline void ptnet_kick(struct ptnet_queue *pq) { #ifdef PTNETMAP_STATS pq->stats.kicks ++; #endif /* PTNETMAP_STATS */ bus_write_4(pq->sc->iomem, pq->kick, 0); } #define PTNET_BUF_RING_SIZE 4096 #define PTNET_RX_BUDGET 512 #define PTNET_RX_BATCH 1 #define PTNET_TX_BUDGET 512 #define PTNET_TX_BATCH 64 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) #define PTNET_MAX_PKT_SIZE 65536 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ CSUM_SCTP_IPV6) #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ PTNET_CSUM_OFFLOAD_IPV6) static int ptnet_attach(device_t dev) { uint32_t ptfeatures = 0; unsigned int num_rx_rings, num_tx_rings; struct netmap_adapter na_arg; unsigned int nifp_offset; struct ptnet_softc *sc; if_t ifp; uint32_t macreg; int err, rid; int i; sc = device_get_softc(dev); sc->dev = dev; /* Setup PCI resources. */ pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->iomem == NULL) { device_printf(dev, "Failed to map I/O BAR\n"); return (ENXIO); } /* Negotiate features with the hypervisor. */ if (ptnet_vnet_hdr) { ptfeatures |= PTNETMAP_F_VNET_HDR; } bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ sc->ptfeatures = ptfeatures; num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); sc->num_rings = num_tx_rings + num_rx_rings; sc->num_tx_rings = num_tx_rings; if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { device_printf(dev, "CSB cannot handle that many rings (%u)\n", sc->num_rings); err = ENOMEM; goto err_path; } /* Allocate CSB and carry out CSB allocation protocol. */ sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (sc->csb_gh == NULL) { device_printf(dev, "Failed to allocate CSB\n"); err = ENOMEM; goto err_path; } sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); { /* * We use uint64_t rather than vm_paddr_t since we * need 64 bit addresses even on 32 bit platforms. */ uint64_t paddr = vtophys(sc->csb_gh); /* CSB allocation protocol: write to BAH first, then * to BAL (for both GH and HG sections). */ bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, paddr & 0xffffffff); paddr = vtophys(sc->csb_hg); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, paddr & 0xffffffff); } /* Allocate and initialize per-queue data structures. */ sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->queues == NULL) { err = ENOMEM; goto err_path; } sc->rxqueues = sc->queues + num_tx_rings; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->sc = sc; pq->kring_id = i; pq->kick = PTNET_IO_KICK_BASE + 4 * i; pq->atok = sc->csb_gh + i; pq->ktoa = sc->csb_hg + i; snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", device_get_nameunit(dev), i); mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); if (i >= num_tx_rings) { /* RX queue: fix kring_id. */ pq->kring_id -= num_tx_rings; } else { /* TX queue: allocate buf_ring. */ pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, M_DEVBUF, M_NOWAIT, &pq->lock); if (pq->bufring == NULL) { err = ENOMEM; goto err_path; } } } sc->min_tx_space = 64; /* Safe initial value. */ err = ptnet_irqs_init(sc); if (err) { goto err_path; } /* Setup Ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Failed to allocate ifnet\n"); err = ENOMEM; goto err_path; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; ifp->if_init = ptnet_init; ifp->if_ioctl = ptnet_ioctl; #if __FreeBSD_version >= 1100000 ifp->if_get_counter = ptnet_get_counter; #endif ifp->if_transmit = ptnet_transmit; ifp->if_qflush = ptnet_qflush; ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, ptnet_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); sc->hwaddr[0] = (macreg >> 8) & 0xff; sc->hwaddr[1] = macreg & 0xff; macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); sc->hwaddr[2] = (macreg >> 24) & 0xff; sc->hwaddr[3] = (macreg >> 16) & 0xff; sc->hwaddr[4] = (macreg >> 8) & 0xff; sc->hwaddr[5] = macreg & 0xff; ether_ifattach(ifp, sc->hwaddr); ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { /* Similarly to what the vtnet driver does, we can emulate * VLAN offloadings by inserting and removing the 802.1Q * header during transmit and receive. We are then able * to do checksum offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWTAGGING; } ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING /* Don't enable polling by default. */ ifp->if_capabilities |= IFCAP_POLLING; #endif snprintf(sc->lock_name, sizeof(sc->lock_name), "%s", device_get_nameunit(dev)); mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); callout_init_mtx(&sc->tick, &sc->lock, 0); /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); memset(&na_arg, 0, sizeof(na_arg)); na_arg.ifp = ifp; na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); na_arg.num_tx_rings = num_tx_rings; na_arg.num_rx_rings = num_rx_rings; na_arg.nm_config = ptnet_nm_config; na_arg.nm_krings_create = ptnet_nm_krings_create; na_arg.nm_krings_delete = ptnet_nm_krings_delete; na_arg.nm_dtor = ptnet_nm_dtor; na_arg.nm_intr = ptnet_nm_intr; na_arg.nm_register = ptnet_nm_register; na_arg.nm_txsync = ptnet_nm_txsync; na_arg.nm_rxsync = ptnet_nm_rxsync; netmap_pt_guest_attach(&na_arg, nifp_offset, bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); /* Now a netmap adapter for this ifp has been allocated, and it * can be accessed through NA(ifp). We also have to initialize the CSB * pointer. */ sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); /* If virtio-net header was negotiated, set the virt_hdr_len field in * the netmap adapter, to inform users that this netmap adapter requires * the application to deal with the headers. */ ptnet_update_vnet_hdr(sc); device_printf(dev, "%s() completed\n", __func__); return (0); err_path: ptnet_detach(dev); return err; } /* Stop host sync-kloop if it was running. */ static void ptnet_device_shutdown(struct ptnet_softc *sc) { ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); } static int ptnet_detach(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); int i; ptnet_device_shutdown(sc); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { ether_poll_deregister(sc->ifp); } #endif callout_drain(&sc->tick); if (sc->queues) { /* Drain taskqueues before calling if_detach. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } } if (sc->ifp) { ether_ifdetach(sc->ifp); /* Uninitialize netmap adapters for this device. */ netmap_detach(sc->ifp); ifmedia_removeall(&sc->media); if_free(sc->ifp); sc->ifp = NULL; } ptnet_irqs_fini(sc); if (sc->csb_gh) { contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); sc->csb_gh = NULL; sc->csb_hg = NULL; } if (sc->queues) { for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (mtx_initialized(&pq->lock)) { mtx_destroy(&pq->lock); } if (pq->bufring != NULL) { buf_ring_free(pq->bufring, M_DEVBUF); } } free(sc->queues, M_DEVBUF); sc->queues = NULL; } if (sc->iomem) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); sc->iomem = NULL; } mtx_destroy(&sc->lock); device_printf(dev, "%s() completed\n", __func__); return (0); } static int ptnet_suspend(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_resume(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_shutdown(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); ptnet_device_shutdown(sc); return (0); } static int ptnet_irqs_init(struct ptnet_softc *sc) { int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); int nvecs = sc->num_rings; device_t dev = sc->dev; int err = ENOSPC; int cpu_cur; int i; if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { device_printf(dev, "Could not find MSI-X capability\n"); return (ENXIO); } sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_mem == NULL) { device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); return (ENXIO); } if (pci_msix_count(dev) < nvecs) { device_printf(dev, "Not enough MSI-X vectors\n"); goto err_path; } err = pci_alloc_msix(dev, &nvecs); if (err) { device_printf(dev, "Failed to allocate MSI-X vectors\n"); goto err_path; } for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; rid = i + 1; pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (pq->irq == NULL) { device_printf(dev, "Failed to allocate interrupt " "for queue #%d\n", i); err = ENOSPC; goto err_path; } } cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; void (*handler)(void *) = ptnet_tx_intr; if (i >= sc->num_tx_rings) { handler = ptnet_rx_intr; } err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL /* intr_filter */, handler, pq, &pq->cookie); if (err) { device_printf(dev, "Failed to register intr handler " "for queue #%d\n", i); goto err_path; } bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); #if 0 bus_bind_intr(sc->dev, pq->irq, cpu_cur); #endif cpu_cur = CPU_NEXT(cpu_cur); } device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; static void (*handler)(void *context, int pending); handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; TASK_INIT(&pq->task, 0, handler, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", device_get_nameunit(sc->dev), cpu_cur); cpu_cur = CPU_NEXT(cpu_cur); } return 0; err_path: ptnet_irqs_fini(sc); return err; } static void ptnet_irqs_fini(struct ptnet_softc *sc) { device_t dev = sc->dev; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_free(pq->taskq); pq->taskq = NULL; } if (pq->cookie) { bus_teardown_intr(dev, pq->irq, pq->cookie); pq->cookie = NULL; } if (pq->irq) { bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); pq->irq = NULL; } } if (sc->msix_mem) { pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), sc->msix_mem); sc->msix_mem = NULL; } } static void ptnet_init(void *opaque) { struct ptnet_softc *sc = opaque; PTNET_CORE_LOCK(sc); ptnet_init_locked(sc); PTNET_CORE_UNLOCK(sc); } static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct ptnet_softc *sc = if_getsoftc(ifp); device_t dev = sc->dev; struct ifreq *ifr = (struct ifreq *)data; int mask __unused, err = 0; switch (cmd) { case SIOCSIFFLAGS: device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); PTNET_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* Network stack wants the iff to be up. */ err = ptnet_init_locked(sc); } else { /* Network stack wants the iff to be down. */ err = ptnet_stop(sc); } /* We don't need to do nothing to support IFF_PROMISC, * since that is managed by the backend port. */ PTNET_CORE_UNLOCK(sc); break; case SIOCSIFCAP: device_printf(dev, "SIOCSIFCAP %x %x\n", ifr->ifr_reqcap, ifp->if_capenable); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { struct ptnet_queue *pq; int i; if (ifr->ifr_reqcap & IFCAP_POLLING) { err = ether_poll_register(ptnet_poll, ifp); if (err) { break; } /* Stop queues and sync with taskqueues. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; /* Make sure the worker sees the * IFF_DRV_RUNNING down. */ PTNET_Q_LOCK(pq); pq->atok->appl_need_kick = 0; PTNET_Q_UNLOCK(pq); /* Wait for rescheduling to finish. */ if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } ifp->if_drv_flags |= IFF_DRV_RUNNING; } else { err = ether_poll_deregister(ifp); for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; PTNET_Q_LOCK(pq); pq->atok->appl_need_kick = 1; PTNET_Q_UNLOCK(pq); } } } #endif /* DEVICE_POLLING */ ifp->if_capenable = ifr->ifr_reqcap; break; case SIOCSIFMTU: /* We support any reasonable MTU. */ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { err = EINVAL; } else { PTNET_CORE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; PTNET_CORE_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return err; } static int ptnet_init_locked(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; unsigned int nm_buf_size; int ret; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return 0; /* nothing to do */ } device_printf(sc->dev, "%s\n", __func__); /* Translate offload capabilities according to if_capenable. */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; /* * Prepare the interface for netmap mode access. */ netmap_update_config(na_dr); ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_finalize() failed\n"); return ret; } if (sc->ptna->backend_users == 0) { ret = ptnet_nm_krings_create(na_nm); if (ret) { device_printf(sc->dev, "ptnet_nm_krings_create() " "failed\n"); goto err_mem_finalize; } ret = netmap_mem_rings_create(na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_rings_create() " "failed\n"); goto err_rings_create; } ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); if (ret) { device_printf(sc->dev, "netmap_mem_get_lut() " "failed\n"); goto err_get_lut; } } ret = ptnet_nm_register(na_dr, 1 /* on */); if (ret) { goto err_register; } nm_buf_size = NETMAP_BUF_SIZE(na_dr); KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, sc->min_tx_space); #ifdef PTNETMAP_STATS callout_reset(&sc->tick, hz, ptnet_tick, sc); #endif ifp->if_drv_flags |= IFF_DRV_RUNNING; return 0; err_register: memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); err_get_lut: netmap_mem_rings_delete(na_dr); err_rings_create: ptnet_nm_krings_delete(na_nm); err_mem_finalize: netmap_mem_deref(na_dr->nm_mem, na_dr); return ret; } /* To be called under core lock. */ static int ptnet_stop(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; int i; device_printf(sc->dev, "%s\n", __func__); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { return 0; /* nothing to do */ } /* Clear the driver-ready flag, and synchronize with all the queues, * so that after this loop we are sure nobody is working anymore with * the device. This scheme is taken from the vtnet driver. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->tick); for (i = 0; i < sc->num_rings; i++) { PTNET_Q_LOCK(sc->queues + i); PTNET_Q_UNLOCK(sc->queues + i); } ptnet_nm_register(na_dr, 0 /* off */); if (sc->ptna->backend_users == 0) { netmap_mem_rings_delete(na_dr); ptnet_nm_krings_delete(na_nm); } netmap_mem_deref(na_dr->nm_mem, na_dr); return 0; } static void ptnet_qflush(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); int i; /* Flush all the bufrings and do the interface flush. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct mbuf *m; PTNET_Q_LOCK(pq); if (pq->bufring) { while ((m = buf_ring_dequeue_sc(pq->bufring))) { m_freem(m); } } PTNET_Q_UNLOCK(pq); } if_qflush(ifp); } static int ptnet_media_change(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ifmedia *ifm = &sc->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return EINVAL; } return 0; } #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t ifp, ift_counter cnt) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue_stats stats[2]; int i; /* Accumulate statistics over the queues. */ memset(stats, 0, sizeof(stats)); for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; int idx = (i < sc->num_tx_rings) ? 0 : 1; stats[idx].packets += pq->stats.packets; stats[idx].bytes += pq->stats.bytes; stats[idx].errors += pq->stats.errors; stats[idx].iqdrops += pq->stats.iqdrops; stats[idx].mcasts += pq->stats.mcasts; } switch (cnt) { case IFCOUNTER_IPACKETS: return (stats[1].packets); case IFCOUNTER_IQDROPS: return (stats[1].iqdrops); case IFCOUNTER_IERRORS: return (stats[1].errors); case IFCOUNTER_OPACKETS: return (stats[0].packets); case IFCOUNTER_OBYTES: return (stats[0].bytes); case IFCOUNTER_OMCASTS: return (stats[0].mcasts); default: return (if_get_counter_default(ifp, cnt)); } } #endif #ifdef PTNETMAP_STATS /* Called under core lock. */ static void ptnet_tick(void *opaque) { struct ptnet_softc *sc = opaque; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct ptnet_queue_stats cur = pq->stats; struct timeval now; unsigned int delta; microtime(&now); delta = now.tv_usec - sc->last_ts.tv_usec + (now.tv_sec - sc->last_ts.tv_sec) * 1000000; delta /= 1000; /* in milliseconds */ if (delta == 0) continue; device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " "intr %lu\n", i, delta, (cur.packets - pq->last_stats.packets), (cur.kicks - pq->last_stats.kicks), (cur.intrs - pq->last_stats.intrs)); pq->last_stats = cur; } microtime(&sc->last_ts); callout_schedule(&sc->tick, hz); } #endif /* PTNETMAP_STATS */ static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) { /* We are always active, as the backend netmap port is * always open in netmap mode. */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; } static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) { /* * Write a command and read back error status, * with zero meaning success. */ bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); return bus_read_4(sc->iomem, PTNET_IO_PTCTL); } static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) { struct ptnet_softc *sc = if_getsoftc(na->ifp); info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", info->num_tx_rings, info->num_rx_rings, info->num_tx_descs, info->num_rx_descs, info->rx_buf_maxsize); return 0; } static void ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) { int i; /* Sync krings from the host, reading from * CSB. */ for (i = 0; i < sc->num_rings; i++) { struct nm_csb_atok *atok = sc->queues[i].atok; struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; struct netmap_kring *kring; if (i < na->num_tx_rings) { kring = na->tx_rings[i]; } else { kring = na->rx_rings[i - na->num_tx_rings]; } kring->rhead = kring->ring->head = atok->head; kring->rcur = kring->ring->cur = atok->cur; kring->nr_hwcur = ktoa->hwcur; kring->nr_hwtail = kring->rtail = kring->ring->tail = ktoa->hwtail; - ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, + nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, ktoa->hwcur, atok->head, atok->cur, ktoa->hwtail); - ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", + nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", t, i, kring->nr_hwcur, kring->rhead, kring->rcur, kring->ring->head, kring->ring->cur, kring->nr_hwtail, kring->rtail, kring->ring->tail); } } static void ptnet_update_vnet_hdr(struct ptnet_softc *sc) { unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; } static int ptnet_nm_register(struct netmap_adapter *na, int onoff) { /* device-specific */ if_t ifp = na->ifp; struct ptnet_softc *sc = if_getsoftc(ifp); int native = (na == &sc->ptna->hwup.up); struct ptnet_queue *pq; - enum txrx t; int ret = 0; int i; if (!onoff) { sc->ptna->backend_users--; } /* If this is the last netmap client, guest interrupt enable flags may * be in arbitrary state. Since these flags are going to be used also * by the netdevice driver, we have to make sure to start with * notifications enabled. Also, schedule NAPI to flush pending packets * in the RX rings, since we will not receive further interrupts * until these will be processed. */ if (native && !onoff && na->active_fds == 0) { - D("Exit netmap mode, re-enable interrupts"); + nm_prinf("Exit netmap mode, re-enable interrupts"); for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->atok->appl_need_kick = 1; } } if (onoff) { if (sc->ptna->backend_users == 0) { /* Initialize notification enable fields in the CSB. */ for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->ktoa->kern_need_kick = 1; pq->atok->appl_need_kick = (!(ifp->if_capenable & IFCAP_POLLING) && i >= sc->num_tx_rings); } /* Set the virtio-net header length. */ ptnet_update_vnet_hdr(sc); /* Make sure the host adapter passed through is ready * for txsync/rxsync. */ ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); if (ret) { return ret; } /* Align the guest krings and rings to the state stored * in the CSB. */ ptnet_sync_from_csb(sc, na); } /* If not native, don't call nm_set_native_flags, since we don't want * to replace if_transmit method, nor set NAF_NETMAP_ON */ if (native) { - for_rx_tx(t) { - for (i = 0; i <= nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_on(kring)) { - kring->nr_mode = NKR_NETMAP_ON; - } - } - } + netmap_krings_mode_commit(na, onoff); nm_set_native_flags(na); } } else { if (native) { nm_clear_native_flags(na); - for_rx_tx(t) { - for (i = 0; i <= nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_off(kring)) { - kring->nr_mode = NKR_NETMAP_OFF; - } - } - } + netmap_krings_mode_commit(na, onoff); } if (sc->ptna->backend_users == 0) { ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); } } if (onoff) { sc->ptna->backend_users++; } return ret; } static int ptnet_nm_txsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->queues + kring->ring_id; bool notify; notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; bool notify; notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static void ptnet_nm_intr(struct netmap_adapter *na, int onoff) { struct ptnet_softc *sc = if_getsoftc(na->ifp); int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->atok->appl_need_kick = onoff; } } static void ptnet_tx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { return; } /* Schedule the tasqueue to flush process transmissions requests. * However, vtnet, if_em and if_igb just call ptnet_transmit() here, * at least when using MSI-X interrupts. The if_em driver, instead * schedule taskqueue when using legacy interrupts. */ taskqueue_enqueue(pq->taskq, &pq->task); } static void ptnet_rx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; unsigned int unused; DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { return; } /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, * receive-side processing is executed directly in the interrupt * service routine. Alternatively, we may schedule the taskqueue. */ ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } /* The following offloadings-related functions are taken from the vtnet * driver, but the same functionality is required for the ptnet driver. * As a temporary solution, I copied this code from vtnet and I started * to generalize it (taking away driver-specific statistic accounting), * making as little modifications as possible. * In the future we need to share these functions between vtnet and ptnet. */ static int ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) { struct ether_vlan_header *evh; int offset; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: /* Here we should increment the tx_csum_bad_ethtype counter. */ return (EINVAL); } return (0); } static int ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, int offset, bool allow_ecn, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct tcphdr *tcp, tcphdr; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = offset + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (tcp->th_flags & TH_CWR) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, * ECN support is not on a per-interface basis, but globally via * the net.inet.tcp.ecn.enable sysctl knob. The default is off. */ if (!allow_ecn) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } /* Here we should increment tx_tso counter. */ return (0); } static struct mbuf * ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, struct virtio_net_hdr *hdr) { int flags, etype, csum_start, proto, error; flags = m->m_pkthdr.csum_flags; error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); if (error) goto drop; if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { /* * We could compare the IP protocol vs the CSUM_ flag too, * but that really should not be necessary. */ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; /* Here we should increment the tx_csum counter. */ } if (flags & CSUM_TSO) { if (__predict_false(proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. * Here we should increment the tx_tso_not_tcp * counter. */ goto drop; } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, ("%s: mbuf %p TSO without checksum offload %#x", __func__, m, flags)); error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, allow_ecn, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static void ptnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } /* * Use the checksum offset in the VirtIO header to set the * correct CSUM_* flags. */ static int ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { #if defined(INET) || defined(INET6) int offset = hdr->csum_start + hdr->csum_offset; #endif /* Only do a basic sanity check on the offset. */ switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(offset < ip_start + sizeof(struct ip))) return (1); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) return (1); break; #endif default: /* Here we should increment the rx_csum_bad_ethtype counter. */ return (1); } /* * Use the offset to determine the appropriate CSUM_* flags. This is * a bit dirty, but we can get by with it since the checksum offsets * happen to be different. We assume the host host does not do IPv4 * header checksum offloading. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case offsetof(struct sctphdr, checksum): m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* Here we should increment the rx_csum_bad_offset counter. */ return (1); } return (0); } static int ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { int offset, proto; switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip; if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) return (1); ip = (struct ip *)(m->m_data + ip_start); proto = ip->ip_p; offset = ip_start + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < ip_start + sizeof(struct ip6_hdr))) return (1); offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); if (__predict_false(offset < 0)) return (1); break; #endif default: /* Here we should increment the rx_csum_bad_ethtype counter. */ return (1); } switch (proto) { case IPPROTO_TCP: if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_UDP: if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_SCTP: if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* * For the remaining protocols, FreeBSD does not support * checksum offloading, so the checksum will be recomputed. */ #if 0 if_printf(ifp, "cksum offload of unsupported " "protocol eth_type=%#x proto=%d csum_start=%d " "csum_offset=%d\n", __func__, eth_type, proto, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } /* * Set the appropriate CSUM_* flags. Unfortunately, the information * provided is not directly useful to us. The VirtIO header gives the * offset of the checksum, which is all Linux needs, but this is not * how FreeBSD does things. We are forced to peek inside the packet * a bit. * * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD * could accept the offsets and let the stack figure it out. */ static int ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; uint16_t eth_type; int offset, error; eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { /* BMV: We should handle nested VLAN tags too. */ evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else offset = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); else error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); return (error); } /* End of offloading-related functions to be shared with vtnet. */ static void ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, unsigned int head, unsigned int sync_flags) { struct netmap_ring *ring = kring->ring; struct nm_csb_atok *atok = pq->atok; struct nm_csb_ktoa *ktoa = pq->ktoa; /* Some packets have been pushed to the netmap ring. We have * to tell the host to process the new packets, updating cur * and head in the CSB. */ ring->head = ring->cur = head; /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ kring->rcur = kring->rhead = head; nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); /* Kick the host if needed. */ if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { atok->sync_flags = sync_flags; ptnet_kick(pq); } } #define PTNET_TX_NOSPACE(_h, _k, _min) \ ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ (_k)->rtail - (_h)) < (_min) /* This function may be called by the network stack, or by * by the taskqueue thread. */ static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct netmap_adapter *na = &sc->ptna->dr.up; if_t ifp = sc->ifp; unsigned int batch_count = 0; struct nm_csb_atok *atok; struct nm_csb_ktoa *ktoa; struct netmap_kring *kring; struct netmap_ring *ring; struct netmap_slot *slot; unsigned int count = 0; unsigned int minspace; unsigned int head; unsigned int lim; struct mbuf *mhead; struct mbuf *mf; int nmbuf_bytes; uint8_t *nmbuf; if (!PTNET_Q_TRYLOCK(pq)) { /* We failed to acquire the lock, schedule the taskqueue. */ - RD(1, "Deferring TX work"); + nm_prlim(1, "Deferring TX work"); if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } return 0; } if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { PTNET_Q_UNLOCK(pq); - RD(1, "Interface is down"); + nm_prlim(1, "Interface is down"); return ENETDOWN; } atok = pq->atok; ktoa = pq->ktoa; kring = na->tx_rings[pq->kring_id]; ring = kring->ring; lim = kring->nkr_num_slots - 1; head = ring->head; minspace = sc->min_tx_space; while (count < budget) { if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* We ran out of slot, let's see if the host has * freed up some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ktoa, kring); if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* Still no slots available. Reactivate the * interrupts so that we can be notified * when some free slots are made available by * the host. */ atok->appl_need_kick = 1; /* Double check. We need a full barrier to * prevent the store to atok->appl_need_kick * to be reordered with the load from * ktoa->hwcur and ktoa->hwtail (store-load * barrier). */ nm_stld_barrier(); ptnet_sync_tail(ktoa, kring); if (likely(PTNET_TX_NOSPACE(head, kring, minspace))) { break; } - RD(1, "Found more slots by doublecheck"); + nm_prlim(1, "Found more slots by doublecheck"); /* More slots were freed before reactivating * the interrupts. */ atok->appl_need_kick = 0; } } mhead = drbr_peek(ifp, pq->bufring); if (!mhead) { break; } /* Initialize transmission state variables. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; /* If needed, prepare the virtio-net header at the beginning * of the first slot. */ if (have_vnet_hdr) { struct virtio_net_hdr *vh = (struct virtio_net_hdr *)nmbuf; /* For performance, we could replace this memset() with * two 8-bytes-wide writes. */ memset(nmbuf, 0, PTNET_HDR_SIZE); if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { mhead = ptnet_tx_offload(ifp, mhead, false, vh); if (unlikely(!mhead)) { /* Packet dropped because errors * occurred while preparing the vnet * header. Let's go ahead with the next * packet. */ pq->stats.errors ++; drbr_advance(ifp, pq->bufring); continue; } } - ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " + nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " "csum_start %u csum_ofs %u hdr_len = %u " "gso_size %u gso_type %x", __func__, mhead->m_pkthdr.csum_flags, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_bytes += PTNET_HDR_SIZE; } for (mf = mhead; mf; mf = mf->m_next) { uint8_t *mdata = mf->m_data; int mlen = mf->m_len; for (;;) { int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; if (mlen < copy) { copy = mlen; } memcpy(nmbuf, mdata, copy); mdata += copy; mlen -= copy; nmbuf += copy; nmbuf_bytes += copy; if (!mlen) { break; } slot->len = nmbuf_bytes; slot->flags = NS_MOREFRAG; head = nm_next(head, lim); KASSERT(head != ring->tail, ("Unexpectedly run out of TX space")); slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; } } /* Complete last slot and update head. */ slot->len = nmbuf_bytes; slot->flags = 0; head = nm_next(head, lim); /* Consume the packet just processed. */ drbr_advance(ifp, pq->bufring); /* Copy the packet to listeners. */ ETHER_BPF_MTAP(ifp, mhead); pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; if (mhead->m_flags & M_MCAST) { pq->stats.mcasts ++; } m_freem(mhead); count ++; if (++batch_count == PTNET_TX_BATCH) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); batch_count = 0; } } if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); } if (count >= budget && may_resched) { - DBG(RD(1, "out of budget: resched, %d mbufs pending\n", + DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", drbr_inuse(ifp, pq->bufring))); taskqueue_enqueue(pq->taskq, &pq->task); } PTNET_Q_UNLOCK(pq); return count; } static int ptnet_transmit(if_t ifp, struct mbuf *m) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue *pq; unsigned int queue_idx; int err; DBG(device_printf(sc->dev, "transmit %p\n", m)); /* Insert 802.1Q header if needed. */ if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if (m == NULL) { return ENOBUFS; } m->m_flags &= ~M_VLANTAG; } /* Get the flow-id if available. */ queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? m->m_pkthdr.flowid : curcpu; if (unlikely(queue_idx >= sc->num_tx_rings)) { queue_idx %= sc->num_tx_rings; } pq = sc->queues + queue_idx; err = drbr_enqueue(ifp, pq->bufring, m); if (err) { /* ENOBUFS when the bufring is full */ - RD(1, "%s: drbr_enqueue() failed %d\n", + nm_prlim(1, "%s: drbr_enqueue() failed %d\n", __func__, err); pq->stats.errors ++; return err; } if (ifp->if_capenable & IFCAP_POLLING) { /* If polling is on, the transmit queues will be * drained by the poller. */ return 0; } err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); return (err < 0) ? err : 0; } static unsigned int ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot = ring->slot + head; for (;;) { head = nm_next(head, kring->nkr_num_slots - 1); if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { break; } slot = ring->slot + head; } return head; } static inline struct mbuf * ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) { uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; do { unsigned int copy; if (mtail->m_len == MCLBYTES) { struct mbuf *mf; mf = m_getcl(M_NOWAIT, MT_DATA, 0); if (unlikely(!mf)) { return NULL; } mtail->m_next = mf; mtail = mf; mdata = mtod(mtail, uint8_t *); mtail->m_len = 0; } copy = MCLBYTES - mtail->m_len; if (nmbuf_len < copy) { copy = nmbuf_len; } memcpy(mdata, nmbuf, copy); nmbuf += copy; nmbuf_len -= copy; mdata += copy; mtail->m_len += copy; } while (nmbuf_len); return mtail; } static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct nm_csb_atok *atok = pq->atok; struct nm_csb_ktoa *ktoa = pq->ktoa; struct netmap_adapter *na = &sc->ptna->dr.up; struct netmap_kring *kring = na->rx_rings[pq->kring_id]; struct netmap_ring *ring = kring->ring; unsigned int const lim = kring->nkr_num_slots - 1; unsigned int batch_count = 0; if_t ifp = sc->ifp; unsigned int count = 0; uint32_t head; PTNET_Q_LOCK(pq); if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { goto unlock; } kring->nr_kflags &= ~NKR_PENDINTR; head = ring->head; while (count < budget) { uint32_t prev_head = head; struct mbuf *mhead, *mtail; struct virtio_net_hdr *vh; struct netmap_slot *slot; unsigned int nmbuf_len; uint8_t *nmbuf; int deliver = 1; /* the mbuf to the network stack. */ host_sync: if (head == ring->tail) { /* We ran out of slot, let's see if the host has * added some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ktoa, kring); if (head == ring->tail) { /* Still no slots available. Reactivate * interrupts as they were disabled by the * host thread right before issuing the * last interrupt. */ atok->appl_need_kick = 1; /* Double check for more completed RX slots. * We need a full barrier to prevent the store * to atok->appl_need_kick to be reordered with * the load from ktoa->hwcur and ktoa->hwtail * (store-load barrier). */ nm_stld_barrier(); ptnet_sync_tail(ktoa, kring); if (likely(head == ring->tail)) { break; } atok->appl_need_kick = 0; } } /* Initialize ring state variables, possibly grabbing the * virtio-net header. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; vh = (struct virtio_net_hdr *)nmbuf; if (have_vnet_hdr) { if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { /* There is no good reason why host should * put the header in multiple netmap slots. * If this is the case, discard. */ - RD(1, "Fragmented vnet-hdr: dropping"); + nm_prlim(1, "Fragmented vnet-hdr: dropping"); head = ptnet_rx_discard(kring, head); pq->stats.iqdrops ++; deliver = 0; goto skip; } - ND(1, "%s: vnet hdr: flags %x csum_start %u " + nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " "csum_ofs %u hdr_len = %u gso_size %u " "gso_type %x", __func__, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_len -= PTNET_HDR_SIZE; } /* Allocate the head of a new mbuf chain. * We use m_getcl() to allocate an mbuf with standard cluster * size (MCLBYTES). In the future we could use m_getjcl() * to choose different sizes. */ mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(mhead == NULL)) { device_printf(sc->dev, "%s: failed to allocate mbuf " "head\n", __func__); pq->stats.errors ++; break; } /* Initialize the mbuf state variables. */ mhead->m_pkthdr.len = nmbuf_len; mtail->m_len = 0; /* Scan all the netmap slots containing the current packet. */ for (;;) { DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " "len %u, flags %u\n", __func__, head, ring->tail, slot->len, slot->flags)); mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); if (unlikely(!mtail)) { /* Ouch. We ran out of memory while processing * a packet. We have to restore the previous * head position, free the mbuf chain, and * schedule the taskqueue to give the packet * another chance. */ device_printf(sc->dev, "%s: failed to allocate" " mbuf frag, reset head %u --> %u\n", __func__, head, prev_head); head = prev_head; m_freem(mhead); pq->stats.errors ++; if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } goto escape; } /* We have to increment head irrespective of the * NS_MOREFRAG being set or not. */ head = nm_next(head, lim); if (!(slot->flags & NS_MOREFRAG)) { break; } if (unlikely(head == ring->tail)) { /* The very last slot prepared by the host has * the NS_MOREFRAG set. Drop it and continue * the outer cycle (to do the double-check). */ - RD(1, "Incomplete packet: dropping"); + nm_prlim(1, "Incomplete packet: dropping"); m_freem(mhead); pq->stats.iqdrops ++; goto host_sync; } slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; mhead->m_pkthdr.len += nmbuf_len; } mhead->m_pkthdr.rcvif = ifp; mhead->m_pkthdr.csum_flags = 0; /* Store the queue idx in the packet header. */ mhead->m_pkthdr.flowid = pq->kring_id; M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { struct ether_header *eh; eh = mtod(mhead, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ptnet_vlan_tag_remove(mhead); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) vh->csum_start -= ETHER_VLAN_ENCAP_LEN; } } if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID))) { if (unlikely(ptnet_rx_csum(mhead, vh))) { m_freem(mhead); - RD(1, "Csum offload error: dropping"); + nm_prlim(1, "Csum offload error: dropping"); pq->stats.iqdrops ++; deliver = 0; } } skip: count ++; if (++batch_count >= PTNET_RX_BATCH) { /* Some packets have been (or will be) pushed to the network * stack. We need to update the CSB to tell the host about * the new ring->cur and ring->head (RX buffer refill). */ ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); batch_count = 0; } if (likely(deliver)) { pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; PTNET_Q_UNLOCK(pq); (*ifp->if_input)(ifp, mhead); PTNET_Q_LOCK(pq); /* The ring->head index (and related indices) are * updated under pq lock by ptnet_ring_update(). * Since we dropped the lock to call if_input(), we * must reload ring->head and restart processing the * ring from there. */ head = ring->head; if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { /* The interface has gone down while we didn't * have the lock. Stop any processing and exit. */ goto unlock; } } } escape: if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); } if (count >= budget && may_resched) { /* If we ran out of budget or the double-check found new * slots to process, schedule the taskqueue. */ - DBG(RD(1, "out of budget: resched h %u t %u\n", + DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", head, ring->tail)); taskqueue_enqueue(pq->taskq, &pq->task); } unlock: PTNET_Q_UNLOCK(pq); return count; } static void ptnet_rx_task(void *context, int pending) { struct ptnet_queue *pq = context; - DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); + DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_tx_task(void *context, int pending) { struct ptnet_queue *pq = context; - DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); + DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); } #ifdef DEVICE_POLLING /* We don't need to handle differently POLL_AND_CHECK_STATUS and * POLL_ONLY, since we don't have an Interrupt Status Register. */ static int ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) { struct ptnet_softc *sc = if_getsoftc(ifp); unsigned int queue_budget; unsigned int count = 0; bool borrow = false; int i; KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); queue_budget = MAX(budget / sc->num_rings, 1); - RD(1, "Per-queue budget is %d", queue_budget); + nm_prlim(1, "Per-queue budget is %d", queue_budget); while (budget) { unsigned int rcnt = 0; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (borrow) { queue_budget = MIN(queue_budget, budget); if (queue_budget == 0) { break; } } if (i < sc->num_tx_rings) { rcnt += ptnet_drain_transmit_queue(pq, queue_budget, false); } else { rcnt += ptnet_rx_eof(pq, queue_budget, false); } } if (!rcnt) { /* A scan of the queues gave no result, we can * stop here. */ break; } if (rcnt > budget) { /* This may happen when initial budget < sc->num_rings, * since one packet budget is given to each queue * anyway. Just pretend we didn't eat "so much". */ rcnt = budget; } count += rcnt; budget -= rcnt; borrow = true; } return count; } #endif /* DEVICE_POLLING */ Index: stable/12/sys/dev/netmap/if_vtnet_netmap.h =================================================================== --- stable/12/sys/dev/netmap/if_vtnet_netmap.h (revision 344045) +++ stable/12/sys/dev/netmap/if_vtnet_netmap.h (revision 344046) @@ -1,540 +1,506 @@ /* * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ */ #include #include #include #include /* vtophys ? */ #include /* * Return 1 if the queue identified by 't' and 'idx' is in netmap mode. */ static int vtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx) { struct netmap_adapter *na = NA(sc->vtnet_ifp); if (!nm_native_on(na)) return 0; if (t == NR_RX) return !!(idx < na->num_rx_rings && na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON); return !!(idx < na->num_tx_rings && na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON); } static void vtnet_free_used(struct virtqueue *vq, int netmap_bufs, enum txrx t, int idx) { void *cookie; int deq = 0; while ((cookie = virtqueue_dequeue(vq, NULL)) != NULL) { if (netmap_bufs) { /* These are netmap buffers: there is nothing to do. */ } else { /* These are mbufs that we need to free. */ struct mbuf *m; if (t == NR_TX) { struct vtnet_tx_header *txhdr = cookie; m = txhdr->vth_mbuf; m_freem(m); uma_zfree(vtnet_tx_header_zone, txhdr); } else { m = cookie; m_freem(m); } } deq++; } if (deq) nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)", deq, nm_txrx2str(t), idx, netmap_bufs); } /* Register and unregister. */ static int vtnet_netmap_reg(struct netmap_adapter *na, int state) { struct ifnet *ifp = na->ifp; struct vtnet_softc *sc = ifp->if_softc; int success; - enum txrx t; int i; /* Drain the taskqueues to make sure that there are no worker threads * accessing the virtqueues. */ vtnet_drain_taskqueues(sc); VTNET_CORE_LOCK(sc); /* We need nm_netmap_on() to return true when called by * vtnet_init_locked() below. */ if (state) nm_set_native_flags(na); /* We need to trigger a device reset in order to unexpose guest buffers * published to the host. */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* Get pending used buffers. The way they are freed depends on whether * they are netmap buffer or they are mbufs. We can tell apart the two * cases by looking at kring->nr_mode, before this is possibly updated * in the loop below. */ for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { struct vtnet_txq *txq = &sc->vtnet_txqs[i]; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; struct netmap_kring *kring; VTNET_TXQ_LOCK(txq); kring = NMR(na, NR_TX)[i]; vtnet_free_used(txq->vtntx_vq, kring->nr_mode == NKR_NETMAP_ON, NR_TX, i); VTNET_TXQ_UNLOCK(txq); VTNET_RXQ_LOCK(rxq); kring = NMR(na, NR_RX)[i]; vtnet_free_used(rxq->vtnrx_vq, kring->nr_mode == NKR_NETMAP_ON, NR_RX, i); VTNET_RXQ_UNLOCK(rxq); } vtnet_init_locked(sc); success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO; if (state) { - for_rx_tx(t) { - /* Hardware rings. */ - for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_on(kring)) - kring->nr_mode = NKR_NETMAP_ON; - } - - /* Host rings. */ - for (i = 0; i < nma_get_host_nrings(na, t); i++) { - struct netmap_kring *kring = - NMR(na, t)[nma_get_nrings(na, t) + i]; - - if (nm_kring_pending_on(kring)) - kring->nr_mode = NKR_NETMAP_ON; - } - } + netmap_krings_mode_commit(na, state); + nm_set_native_flags(na); } else { nm_clear_native_flags(na); - for_rx_tx(t) { - /* Hardware rings. */ - for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_off(kring)) - kring->nr_mode = NKR_NETMAP_OFF; - } - - /* Host rings. */ - for (i = 0; i < nma_get_host_nrings(na, t); i++) { - struct netmap_kring *kring = - NMR(na, t)[nma_get_nrings(na, t) + i]; - - if (nm_kring_pending_off(kring)) - kring->nr_mode = NKR_NETMAP_OFF; - } - } + netmap_krings_mode_commit(na, state); } VTNET_CORE_UNLOCK(sc); return success; } /* Reconcile kernel and user view of the transmit ring. */ static int vtnet_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int nm_i; /* index into the netmap ring */ u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; /* device-specific */ struct vtnet_softc *sc = ifp->if_softc; struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr]; struct virtqueue *vq = txq->vtntx_vq; int interrupts = !(kring->nr_kflags & NKR_NOINTR); u_int n; /* * First part: process new packets to send. */ rmb(); nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ struct sglist *sg = txq->vtntx_sg; for (; nm_i != head; nm_i = nm_next(nm_i, lim)) { /* we use an empty header here */ struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int err; NM_CHECK_ADDR_LEN(na, addr, len); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); /* Initialize the scatterlist, expose it to the hypervisor, * and kick the hypervisor (if necessary). */ sglist_reset(sg); // cheap err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size); err |= sglist_append_phys(sg, paddr, len); KASSERT(err == 0, ("%s: cannot append to sglist %d", __func__, err)); err = virtqueue_enqueue(vq, /*cookie=*/txq, sg, /*readable=*/sg->sg_nseg, /*writeable=*/0); if (unlikely(err)) { if (err != ENOSPC) nm_prerr("virtqueue_enqueue(%s) failed: %d", kring->name, err); break; } } virtqueue_notify(vq); /* Update hwcur depending on where we stopped. */ kring->nr_hwcur = nm_i; /* note we migth break early */ } /* Free used slots. We only consider our own used buffers, recognized * by the token we passed to virtqueue_enqueue. */ n = 0; for (;;) { void *token = virtqueue_dequeue(vq, NULL); if (token == NULL) break; if (unlikely(token != (void *)txq)) nm_prerr("BUG: TX token mismatch"); else n++; } if (n > 0) { kring->nr_hwtail += n; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; } if (interrupts && virtqueue_nfree(vq) < 32) virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG); return 0; } static int vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int nm_i, u_int head) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int const lim = kring->nkr_num_slots - 1; /* device-specific */ struct vtnet_softc *sc = ifp->if_softc; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; struct virtqueue *vq = rxq->vtnrx_vq; /* use a local sglist, default might be short */ struct sglist_seg ss[2]; struct sglist sg = { ss, 0, 0, 2 }; for (; nm_i != head; nm_i = nm_next(nm_i, lim)) { struct netmap_slot *slot = &ring->slot[nm_i]; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int err; if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */ if (netmap_ring_reinit(kring)) return -1; } slot->flags &= ~NS_BUF_CHANGED; sglist_reset(&sg); err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size); err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na)); KASSERT(err == 0, ("%s: cannot append to sglist %d", __func__, err)); /* writable for the host */ err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg, /*readable=*/0, /*writeable=*/sg.sg_nseg); if (unlikely(err)) { if (err != ENOSPC) nm_prerr("virtqueue_enqueue(%s) failed: %d", kring->name, err); break; } } return nm_i; } /* * Publish netmap buffers on a RX virtqueue. * Returns -1 if this virtqueue is not being opened in netmap mode. * If the virtqueue is being opened in netmap mode, return 0 on success and * a positive error code on failure. */ static int vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq) { struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp); struct netmap_kring *kring; int error; if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings) return -1; kring = na->rx_rings[rxq->vtnrx_id]; if (!(nm_kring_pending_on(kring) || kring->nr_pending_mode == NKR_NETMAP_ON)) return -1; /* Expose all the RX netmap buffers. Note that the number of * netmap slots in the RX ring matches the maximum number of * 2-elements sglist that the RX virtqueue can accommodate. */ error = vtnet_netmap_kring_refill(kring, 0, na->num_rx_desc); virtqueue_notify(rxq->vtnrx_vq); return error < 0 ? ENXIO : 0; } /* Reconcile kernel and user view of the receive ring. */ static int vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int nm_i; /* index into the netmap ring */ u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || (kring->nr_kflags & NKR_PENDINTR); int interrupts = !(kring->nr_kflags & NKR_NOINTR); /* device-specific */ struct vtnet_softc *sc = ifp->if_softc; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; struct virtqueue *vq = rxq->vtnrx_vq; rmb(); /* * First part: import newly received packets. * Only accept our own buffers (matching the token). We should only get * matching buffers. We may need to stop early to avoid hwtail to overrun * hwcur. */ if (netmap_no_pendintr || force_update) { uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); void *token; vtnet_rxq_disable_intr(rxq); nm_i = kring->nr_hwtail; while (nm_i != hwtail_lim) { int len; token = virtqueue_dequeue(vq, &len); if (token == NULL) { if (interrupts && vtnet_rxq_enable_intr(rxq)) { vtnet_rxq_disable_intr(rxq); continue; } break; } if (unlikely(token != (void *)rxq)) { nm_prerr("BUG: RX token mismatch"); } else { /* Skip the virtio-net header. */ len -= sc->vtnet_hdr_size; if (unlikely(len < 0)) { - RD(1, "Truncated virtio-net-header, " + nm_prlim(1, "Truncated virtio-net-header, " "missing %d bytes", -len); len = 0; } ring->slot[nm_i].len = len; ring->slot[nm_i].flags = 0; nm_i = nm_next(nm_i, lim); } } kring->nr_hwtail = nm_i; kring->nr_kflags &= ~NKR_PENDINTR; } - ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur, + nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur, kring->nr_hwcur, kring->nr_hwtail); /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; /* netmap ring index */ if (nm_i != head) { int nm_j = vtnet_netmap_kring_refill(kring, nm_i, head); if (nm_j < 0) return nm_j; kring->nr_hwcur = nm_j; virtqueue_notify(vq); } - ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur, + nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur, ring->tail, kring->nr_hwcur, kring->nr_hwtail); return 0; } /* Enable/disable interrupts on all virtqueues. */ static void vtnet_netmap_intr(struct netmap_adapter *na, int state) { struct vtnet_softc *sc = na->ifp->if_softc; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; struct vtnet_txq *txq = &sc->vtnet_txqs[i]; struct virtqueue *txvq = txq->vtntx_vq; if (state) { vtnet_rxq_enable_intr(rxq); virtqueue_enable_intr(txvq); } else { vtnet_rxq_disable_intr(rxq); virtqueue_disable_intr(txvq); } } } static int vtnet_netmap_tx_slots(struct vtnet_softc *sc) { int div; /* We need to prepend a virtio-net header to each netmap buffer to be * transmitted, therefore calling virtqueue_enqueue() passing sglist * with 2 elements. * TX virtqueues use indirect descriptors if the feature was negotiated * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect * descriptors, a single virtio descriptor is sufficient to reference * each TX sglist. Without them, we need two separate virtio descriptors * for each TX sglist. We therefore compute the number of netmap TX * slots according to these assumptions. */ if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1) div = 1; else div = 2; return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div; } static int vtnet_netmap_rx_slots(struct vtnet_softc *sc) { int div; /* We need to prepend a virtio-net header to each netmap buffer to be * received, therefore calling virtqueue_enqueue() passing sglist * with 2 elements. * RX virtqueues use indirect descriptors if the feature was negotiated * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect * descriptors, a single virtio descriptor is sufficient to reference * each RX sglist. Without them, we need two separate virtio descriptors * for each RX sglist. We therefore compute the number of netmap RX * slots according to these assumptions. */ if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1) div = 1; else div = 2; return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div; } static int vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info) { struct vtnet_softc *sc = na->ifp->if_softc; info->num_tx_rings = sc->vtnet_act_vq_pairs; info->num_rx_rings = sc->vtnet_act_vq_pairs; info->num_tx_descs = vtnet_netmap_tx_slots(sc); info->num_rx_descs = vtnet_netmap_rx_slots(sc); info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); return 0; } static void vtnet_netmap_attach(struct vtnet_softc *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->vtnet_ifp; na.na_flags = 0; na.num_tx_desc = vtnet_netmap_tx_slots(sc); na.num_rx_desc = vtnet_netmap_rx_slots(sc); na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs; na.rx_buf_maxsize = 0; na.nm_register = vtnet_netmap_reg; na.nm_txsync = vtnet_netmap_txsync; na.nm_rxsync = vtnet_netmap_rxsync; na.nm_intr = vtnet_netmap_intr; na.nm_config = vtnet_netmap_config; netmap_attach(&na); nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d", na.num_tx_rings, na.num_tx_desc, na.num_tx_rings, na.num_rx_desc); } /* end of file */ Index: stable/12/sys/dev/netmap/netmap.c =================================================================== --- stable/12/sys/dev/netmap/netmap.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap.c (revision 344046) @@ -1,4189 +1,4208 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Matteo Landi * Copyright (C) 2011-2016 Luigi Rizzo * Copyright (C) 2011-2016 Giuseppe Lettieri * Copyright (C) 2011-2016 Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * This module supports memory mapped access to network devices, * see netmap(4). * * The module uses a large, memory pool allocated by the kernel * and accessible as mmapped memory by multiple userspace threads/processes. * The memory pool contains packet buffers and "netmap rings", * i.e. user-accessible copies of the interface's queues. * * Access to the network card works like this: * 1. a process/thread issues one or more open() on /dev/netmap, to create * select()able file descriptor on which events are reported. * 2. on each descriptor, the process issues an ioctl() to identify * the interface that should report events to the file descriptor. * 3. on each descriptor, the process issues an mmap() request to * map the shared memory region within the process' address space. * The list of interesting queues is indicated by a location in * the shared memory region. * 4. using the functions in the netmap(4) userspace API, a process * can look up the occupation state of a queue, access memory buffers, * and retrieve received packets or enqueue packets to transmit. * 5. using some ioctl()s the process can synchronize the userspace view * of the queue with the actual status in the kernel. This includes both * receiving the notification of new packets, and transmitting new * packets on the output interface. * 6. select() or poll() can be used to wait for events on individual * transmit or receive queues (or all queues for a given interface). * SYNCHRONIZATION (USER) The netmap rings and data structures may be shared among multiple user threads or even independent processes. Any synchronization among those threads/processes is delegated to the threads themselves. Only one thread at a time can be in a system call on the same netmap ring. The OS does not enforce this and only guarantees against system crashes in case of invalid usage. LOCKING (INTERNAL) Within the kernel, access to the netmap rings is protected as follows: - a spinlock on each ring, to handle producer/consumer races on RX rings attached to the host stack (against multiple host threads writing from the host stack to the same ring), and on 'destination' rings attached to a VALE switch (i.e. RX rings in VALE ports, and TX rings in NIC/host ports) protecting multiple active senders for the same destination) - an atomic variable to guarantee that there is at most one instance of *_*xsync() on the ring at any time. For rings connected to user file descriptors, an atomic_test_and_set() protects this, and the lock on the ring is not actually used. For NIC RX rings connected to a VALE switch, an atomic_test_and_set() is also used to prevent multiple executions (the driver might indeed already guarantee this). For NIC TX rings connected to a VALE switch, the lock arbitrates access to the queue (both when allocating buffers and when pushing them out). - *xsync() should be protected against initializations of the card. On FreeBSD most devices have the reset routine protected by a RING lock (ixgbe, igb, em) or core lock (re). lem is missing the RING protection on rx_reset(), this should be added. On linux there is an external lock on the tx path, which probably also arbitrates access to the reset routine. XXX to be revised - a per-interface core_lock protecting access from the host stack while interfaces may be detached from netmap mode. XXX there should be no need for this lock if we detach the interfaces only while they are down. --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* --- internals ---- * * Roadmap to the code that implements the above. * * > 1. a process/thread issues one or more open() on /dev/netmap, to create * > select()able file descriptor on which events are reported. * * Internally, we allocate a netmap_priv_d structure, that will be * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d * structure for each open(). * * os-specific: * FreeBSD: see netmap_open() (netmap_freebsd.c) * linux: see linux_netmap_open() (netmap_linux.c) * * > 2. on each descriptor, the process issues an ioctl() to identify * > the interface that should report events to the file descriptor. * * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0. * Most important things happen in netmap_get_na() and * netmap_do_regif(), called from there. Additional details can be * found in the comments above those functions. * * In all cases, this action creates/takes-a-reference-to a * netmap_*_adapter describing the port, and allocates a netmap_if * and all necessary netmap rings, filling them with netmap buffers. * * In this phase, the sync callbacks for each ring are set (these are used * in steps 5 and 6 below). The callbacks depend on the type of adapter. * The adapter creation/initialization code puts them in the * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they * are copied from there to the netmap_kring's during netmap_do_regif(), by * the nm_krings_create() callback. All the nm_krings_create callbacks * actually call netmap_krings_create() to perform this and the other * common stuff. netmap_krings_create() also takes care of the host rings, * if needed, by setting their sync callbacks appropriately. * * Additional actions depend on the kind of netmap_adapter that has been * registered: * * - netmap_hw_adapter: [netmap.c] * This is a system netdev/ifp with native netmap support. * The ifp is detached from the host stack by redirecting: * - transmissions (from the network stack) to netmap_transmit() * - receive notifications to the nm_notify() callback for * this adapter. The callback is normally netmap_notify(), unless * the ifp is attached to a bridge using bwrap, in which case it * is netmap_bwrap_intr_notify(). * * - netmap_generic_adapter: [netmap_generic.c] * A system netdev/ifp without native netmap support. * * (the decision about native/non native support is taken in * netmap_get_hw_na(), called by netmap_get_na()) * * - netmap_vp_adapter [netmap_vale.c] * Returned by netmap_get_bdg_na(). * This is a persistent or ephemeral VALE port. Ephemeral ports * are created on the fly if they don't already exist, and are * always attached to a bridge. * Persistent VALE ports must must be created separately, and i * then attached like normal NICs. The NIOCREGIF we are examining * will find them only if they had previosly been created and * attached (see VALE_CTL below). * * - netmap_pipe_adapter [netmap_pipe.c] * Returned by netmap_get_pipe_na(). * Both pipe ends are created, if they didn't already exist. * * - netmap_monitor_adapter [netmap_monitor.c] * Returned by netmap_get_monitor_na(). * If successful, the nm_sync callbacks of the monitored adapter * will be intercepted by the returned monitor. * * - netmap_bwrap_adapter [netmap_vale.c] * Cannot be obtained in this way, see VALE_CTL below * * * os-specific: * linux: we first go through linux_netmap_ioctl() to * adapt the FreeBSD interface to the linux one. * * * > 3. on each descriptor, the process issues an mmap() request to * > map the shared memory region within the process' address space. * > The list of interesting queues is indicated by a location in * > the shared memory region. * * os-specific: * FreeBSD: netmap_mmap_single (netmap_freebsd.c). * linux: linux_netmap_mmap (netmap_linux.c). * * > 4. using the functions in the netmap(4) userspace API, a process * > can look up the occupation state of a queue, access memory buffers, * > and retrieve received packets or enqueue packets to transmit. * * these actions do not involve the kernel. * * > 5. using some ioctl()s the process can synchronize the userspace view * > of the queue with the actual status in the kernel. This includes both * > receiving the notification of new packets, and transmitting new * > packets on the output interface. * * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC * cases. They invoke the nm_sync callbacks on the netmap_kring * structures, as initialized in step 2 and maybe later modified * by a monitor. Monitors, however, will always call the original * callback before doing anything else. * * * > 6. select() or poll() can be used to wait for events on individual * > transmit or receive queues (or all queues for a given interface). * * Implemented in netmap_poll(). This will call the same nm_sync() * callbacks as in step 5 above. * * os-specific: * linux: we first go through linux_netmap_poll() to adapt * the FreeBSD interface to the linux one. * * * ---- VALE_CTL ----- * * VALE switches are controlled by issuing a NIOCREGIF with a non-null * nr_cmd in the nmreq structure. These subcommands are handled by * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF * subcommands, respectively. * * Any network interface known to the system (including a persistent VALE * port) can be attached to a VALE switch by issuing the * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports * look exactly like ephemeral VALE ports (as created in step 2 above). The * attachment of other interfaces, instead, requires the creation of a * netmap_bwrap_adapter. Moreover, the attached interface must be put in * netmap mode. This may require the creation of a netmap_generic_adapter if * we have no native support for the interface, or if generic adapters have * been forced by sysctl. * * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(), * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach() * callback. In the case of the bwrap, the callback creates the * netmap_bwrap_adapter. The initialization of the bwrap is then * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl() * callback (netmap_bwrap_bdg_ctl in netmap_vale.c). * A generic adapter for the wrapped ifp will be created if needed, when * netmap_get_bdg_na() calls netmap_get_hw_na(). * * * ---- DATAPATHS ----- * * -= SYSTEM DEVICE WITH NATIVE SUPPORT =- * * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach() * * - tx from netmap userspace: * concurrently: * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == DEVICE_netmap_txsync() * 2) device interrupt handler * na->nm_notify() == netmap_notify() * - rx from netmap userspace: * concurrently: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == DEVICE_netmap_rxsync() * 2) device interrupt handler * na->nm_notify() == netmap_notify() * - rx from host stack * concurrently: * 1) host stack * netmap_transmit() * na->nm_notify == netmap_notify() * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_rxsync_from_host * netmap_rxsync_from_host(na, NULL, NULL) * - tx to host stack * ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_txsync_to_host * netmap_txsync_to_host(na) * nm_os_send_up() * FreeBSD: na->if_input() == ether_input() * linux: netif_rx() with NM_MAGIC_PRIORITY_RX * * * -= SYSTEM DEVICE WITH GENERIC SUPPORT =- * * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach() * * - tx from netmap userspace: * concurrently: * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == generic_netmap_txsync() * nm_os_generic_xmit_frame() * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX * ifp->ndo_start_xmit == generic_ndo_start_xmit() * gna->save_start_xmit == orig. dev. start_xmit * FreeBSD: na->if_transmit() == orig. dev if_transmit * 2) generic_mbuf_destructor() * na->nm_notify() == netmap_notify() * - rx from netmap userspace: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == generic_netmap_rxsync() * mbq_safe_dequeue() * 2) device driver * generic_rx_handler() * mbq_safe_enqueue() * na->nm_notify() == netmap_notify() * - rx from host stack * FreeBSD: same as native * Linux: same as native except: * 1) host stack * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX * ifp->ndo_start_xmit == generic_ndo_start_xmit() * netmap_transmit() * na->nm_notify() == netmap_notify() * - tx to host stack (same as native): * * * -= VALE =- * * INCOMING: * * - VALE ports: * ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_vp_txsync() * * - system device with native support: * from cable: * interrupt * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) * kring->nm_sync() == DEVICE_netmap_rxsync() * netmap_vp_txsync() * kring->nm_sync() == DEVICE_netmap_rxsync() * from host stack: * netmap_transmit() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) * kring->nm_sync() == netmap_rxsync_from_host() * netmap_vp_txsync() * * - system device with generic support: * from device driver: * generic_rx_handler() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) * kring->nm_sync() == generic_netmap_rxsync() * netmap_vp_txsync() * kring->nm_sync() == generic_netmap_rxsync() * from host stack: * netmap_transmit() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) * kring->nm_sync() == netmap_rxsync_from_host() * netmap_vp_txsync() * * (all cases) --> nm_bdg_flush() * dest_na->nm_notify() == (see below) * * OUTGOING: * * - VALE ports: * concurrently: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_vp_rxsync() * 2) from nm_bdg_flush() * na->nm_notify() == netmap_notify() * * - system device with native support: * to cable: * na->nm_notify() == netmap_bwrap_notify() * netmap_vp_rxsync() * kring->nm_sync() == DEVICE_netmap_txsync() * netmap_vp_rxsync() * to host stack: * netmap_vp_rxsync() * kring->nm_sync() == netmap_txsync_to_host * netmap_vp_rxsync_locked() * * - system device with generic adapter: * to device driver: * na->nm_notify() == netmap_bwrap_notify() * netmap_vp_rxsync() * kring->nm_sync() == generic_netmap_txsync() * netmap_vp_rxsync() * to host stack: * netmap_vp_rxsync() * kring->nm_sync() == netmap_txsync_to_host * netmap_vp_rxsync() * */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include /* FIONBIO */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #include /* ETHER_BPF_MTAP */ #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined (_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include /* user-controlled variables */ int netmap_verbose; #ifdef CONFIG_NETMAP_DEBUG int netmap_debug; #endif /* CONFIG_NETMAP_DEBUG */ static int netmap_no_timestamp; /* don't timestamp on rxsync */ int netmap_no_pendintr = 1; int netmap_txsync_retry = 2; static int netmap_fwd = 0; /* force transparent forwarding */ /* * netmap_admode selects the netmap mode to use. * Invalid values are reset to NETMAP_ADMODE_BEST */ enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */ NETMAP_ADMODE_NATIVE, /* either native or none */ NETMAP_ADMODE_GENERIC, /* force generic */ NETMAP_ADMODE_LAST }; static int netmap_admode = NETMAP_ADMODE_BEST; /* netmap_generic_mit controls mitigation of RX notifications for * the generic netmap adapter. The value is a time interval in * nanoseconds. */ int netmap_generic_mit = 100*1000; /* We use by default netmap-aware qdiscs with generic netmap adapters, * even if there can be a little performance hit with hardware NICs. * However, using the qdisc is the safer approach, for two reasons: * 1) it prevents non-fifo qdiscs to break the TX notification * scheme, which is based on mbuf destructors when txqdisc is * not used. * 2) it makes it possible to transmit over software devices that * change skb->dev, like bridge, veth, ... * * Anyway users looking for the best performance should * use native adapters. */ #ifdef linux int netmap_generic_txqdisc = 1; #endif /* Default number of slots and queues for generic adapters. */ int netmap_generic_ringsize = 1024; int netmap_generic_rings = 1; /* Non-zero to enable checksum offloading in NIC drivers */ int netmap_generic_hwcsum = 0; /* Non-zero if ptnet devices are allowed to use virtio-net headers. */ int ptnet_vnet_hdr = 1; /* * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated * in some other operating systems */ SYSBEGIN(main_init); SYSCTL_DECL(_dev_netmap); SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); #ifdef CONFIG_NETMAP_DEBUG SYSCTL_INT(_dev_netmap, OID_AUTO, debug, CTLFLAG_RW, &netmap_debug, 0, "Debug messages"); #endif /* CONFIG_NETMAP_DEBUG */ SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW, &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush."); SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0, "Force NR_FORWARD mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0, "Adapter mode. 0 selects the best option available," "1 forces native adapter, 2 forces emulated adapter"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum, 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default)," "1 to enable checksum generation by the NIC"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit, 0, "RX notification interval in nanoseconds"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW, &netmap_generic_ringsize, 0, "Number of per-ring slots for emulated netmap mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW, &netmap_generic_rings, 0, "Number of TX/RX queues for emulated netmap adapters"); #ifdef linux SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW, &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters"); #endif SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr, 0, "Allow ptnet devices to use virtio-net headers"); SYSEND; NMG_LOCK_T netmap_global_lock; /* * mark the ring as stopped, and run through the locks * to make sure other users get to see it. * stopped must be either NR_KR_STOPPED (for unbounded stop) * of NR_KR_LOCKED (brief stop for mutual exclusion purposes) */ static void netmap_disable_ring(struct netmap_kring *kr, int stopped) { nm_kr_stop(kr, stopped); // XXX check if nm_kr_stop is sufficient mtx_lock(&kr->q_lock); mtx_unlock(&kr->q_lock); nm_kr_put(kr); } /* stop or enable a single ring */ void netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped) { if (stopped) netmap_disable_ring(NMR(na, t)[ring_id], stopped); else NMR(na, t)[ring_id]->nkr_stopped = 0; } /* stop or enable all the rings of na */ void netmap_set_all_rings(struct netmap_adapter *na, int stopped) { int i; enum txrx t; if (!nm_netmap_on(na)) return; for_rx_tx(t) { for (i = 0; i < netmap_real_rings(na, t); i++) { netmap_set_ring(na, i, t, stopped); } } } /* * Convenience function used in drivers. Waits for current txsync()s/rxsync()s * to finish and prevents any new one from starting. Call this before turning * netmap mode off, or before removing the hardware rings (e.g., on module * onload). */ void netmap_disable_all_rings(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { netmap_set_all_rings(NA(ifp), NM_KR_STOPPED); } } /* * Convenience function used in drivers. Re-enables rxsync and txsync on the * adapter's rings In linux drivers, this should be placed near each * napi_enable(). */ void netmap_enable_all_rings(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { netmap_set_all_rings(NA(ifp), 0 /* enabled */); } } void netmap_make_zombie(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { struct netmap_adapter *na = NA(ifp); netmap_set_all_rings(na, NM_KR_LOCKED); na->na_flags |= NAF_ZOMBIE; netmap_set_all_rings(na, 0); } } void netmap_undo_zombie(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { struct netmap_adapter *na = NA(ifp); if (na->na_flags & NAF_ZOMBIE) { netmap_set_all_rings(na, NM_KR_LOCKED); na->na_flags &= ~NAF_ZOMBIE; netmap_set_all_rings(na, 0); } } } /* * generic bound_checking function */ u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg) { u_int oldv = *v; const char *op = NULL; if (dflt < lo) dflt = lo; if (dflt > hi) dflt = hi; if (oldv < lo) { *v = dflt; op = "Bump"; } else if (oldv > hi) { *v = hi; op = "Clamp"; } if (op && msg) nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv); return *v; } /* * packet-dump function, user-supplied or static buffer. * The destination buffer must be at least 30+4*len */ const char * nm_dump_buf(char *p, int len, int lim, char *dst) { static char _dst[8192]; int i, j, i0; static char hex[] ="0123456789abcdef"; char *o; /* output position */ #define P_HI(x) hex[((x) & 0xf0)>>4] #define P_LO(x) hex[((x) & 0xf)] #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.') if (!dst) dst = _dst; if (lim <= 0 || lim > len) lim = len; o = dst; sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim); o += strlen(o); /* hexdump routine */ for (i = 0; i < lim; ) { sprintf(o, "%5d: ", i); o += strlen(o); memset(o, ' ', 48); i0 = i; for (j=0; j < 16 && i < lim; i++, j++) { o[j*3] = P_HI(p[i]); o[j*3+1] = P_LO(p[i]); } i = i0; for (j=0; j < 16 && i < lim; i++, j++) o[j + 48] = P_C(p[i]); o[j+48] = '\n'; o += j+49; } *o = '\0'; #undef P_HI #undef P_LO #undef P_C return dst; } /* * Fetch configuration from the device, to cope with dynamic * reconfigurations after loading the module. */ /* call with NMG_LOCK held */ int netmap_update_config(struct netmap_adapter *na) { struct nm_config_info info; bzero(&info, sizeof(info)); if (na->nm_config == NULL || na->nm_config(na, &info)) { /* take whatever we had at init time */ info.num_tx_rings = na->num_tx_rings; info.num_tx_descs = na->num_tx_desc; info.num_rx_rings = na->num_rx_rings; info.num_rx_descs = na->num_rx_desc; info.rx_buf_maxsize = na->rx_buf_maxsize; } if (na->num_tx_rings == info.num_tx_rings && na->num_tx_desc == info.num_tx_descs && na->num_rx_rings == info.num_rx_rings && na->num_rx_desc == info.num_rx_descs && na->rx_buf_maxsize == info.rx_buf_maxsize) return 0; /* nothing changed */ if (na->active_fds == 0) { na->num_tx_rings = info.num_tx_rings; na->num_tx_desc = info.num_tx_descs; na->num_rx_rings = info.num_rx_rings; na->num_rx_desc = info.num_rx_descs; na->rx_buf_maxsize = info.rx_buf_maxsize; if (netmap_verbose) nm_prinf("configuration changed for %s: txring %d x %d, " "rxring %d x %d, rxbufsz %d", na->name, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); return 0; } nm_prerr("WARNING: configuration changed for %s while active: " "txring %d x %d, rxring %d x %d, rxbufsz %d", na->name, info.num_tx_rings, info.num_tx_descs, info.num_rx_rings, info.num_rx_descs, info.rx_buf_maxsize); return 1; } /* nm_sync callbacks for the host rings */ static int netmap_txsync_to_host(struct netmap_kring *kring, int flags); static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags); /* create the krings array and initialize the fields common to all adapters. * The array layout is this: * * +----------+ * na->tx_rings ----->| | \ * | | } na->num_tx_ring * | | / * +----------+ * | | host tx kring * na->rx_rings ----> +----------+ * | | \ * | | } na->num_rx_rings * | | / * +----------+ * | | host rx kring * +----------+ * na->tailroom ----->| | \ * | | } tailroom bytes * | | / * +----------+ * * Note: for compatibility, host krings are created even when not needed. * The tailroom space is currently used by vale ports for allocating leases. */ /* call with NMG_LOCK held */ int netmap_krings_create(struct netmap_adapter *na, u_int tailroom) { u_int i, len, ndesc; struct netmap_kring *kring; u_int n[NR_TXRX]; enum txrx t; if (na->tx_rings != NULL) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("warning: krings were already created"); return 0; } /* account for the (possibly fake) host rings */ n[NR_TX] = netmap_all_rings(na, NR_TX); n[NR_RX] = netmap_all_rings(na, NR_RX); len = (n[NR_TX] + n[NR_RX]) * (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *)) + tailroom; na->tx_rings = nm_os_malloc((size_t)len); if (na->tx_rings == NULL) { nm_prerr("Cannot allocate krings"); return ENOMEM; } na->rx_rings = na->tx_rings + n[NR_TX]; na->tailroom = na->rx_rings + n[NR_RX]; /* link the krings in the krings array */ kring = (struct netmap_kring *)((char *)na->tailroom + tailroom); for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) { na->tx_rings[i] = kring; kring++; } /* * All fields in krings are 0 except the one initialized below. * but better be explicit on important kring fields. */ for_rx_tx(t) { ndesc = nma_get_ndesc(na, t); for (i = 0; i < n[t]; i++) { kring = NMR(na, t)[i]; bzero(kring, sizeof(*kring)); kring->na = na; kring->notify_na = na; kring->ring_id = i; kring->tx = t; kring->nkr_num_slots = ndesc; kring->nr_mode = NKR_NETMAP_OFF; kring->nr_pending_mode = NKR_NETMAP_OFF; if (i < nma_get_nrings(na, t)) { kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync); } else { if (!(na->na_flags & NAF_HOST_RINGS)) kring->nr_kflags |= NKR_FAKERING; kring->nm_sync = (t == NR_TX ? netmap_txsync_to_host: netmap_rxsync_from_host); } kring->nm_notify = na->nm_notify; kring->rhead = kring->rcur = kring->nr_hwcur = 0; /* * IMPORTANT: Always keep one slot empty. */ kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0); snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name, nm_txrx2str(t), i); - ND("ktx %s h %d c %d t %d", + nm_prdis("ktx %s h %d c %d t %d", kring->name, kring->rhead, kring->rcur, kring->rtail); mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF); nm_os_selinfo_init(&kring->si); } nm_os_selinfo_init(&na->si[t]); } return 0; } /* undo the actions performed by netmap_krings_create */ /* call with NMG_LOCK held */ void netmap_krings_delete(struct netmap_adapter *na) { struct netmap_kring **kring = na->tx_rings; enum txrx t; if (na->tx_rings == NULL) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("warning: krings were already deleted"); return; } for_rx_tx(t) nm_os_selinfo_uninit(&na->si[t]); /* we rely on the krings layout described above */ for ( ; kring != na->tailroom; kring++) { mtx_destroy(&(*kring)->q_lock); nm_os_selinfo_uninit(&(*kring)->si); } nm_os_free(na->tx_rings); na->tx_rings = na->rx_rings = na->tailroom = NULL; } /* * Destructor for NIC ports. They also have an mbuf queue * on the rings connected to the host so we need to purge * them first. */ /* call with NMG_LOCK held */ void netmap_hw_krings_delete(struct netmap_adapter *na) { u_int lim = netmap_real_rings(na, NR_RX), i; for (i = nma_get_nrings(na, NR_RX); i < lim; i++) { struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue; - ND("destroy sw mbq with len %d", mbq_len(q)); + nm_prdis("destroy sw mbq with len %d", mbq_len(q)); mbq_purge(q); mbq_safe_fini(q); } netmap_krings_delete(na); } static void netmap_mem_drop(struct netmap_adapter *na) { int last = netmap_mem_deref(na->nm_mem, na); /* if the native allocator had been overrided on regif, * restore it now and drop the temporary one */ if (last && na->nm_mem_prev) { netmap_mem_put(na->nm_mem); na->nm_mem = na->nm_mem_prev; na->nm_mem_prev = NULL; } } /* * Undo everything that was done in netmap_do_regif(). In particular, * call nm_register(ifp,0) to stop netmap mode on the interface and * revert to normal operation. */ /* call with NMG_LOCK held */ static void netmap_unset_ringid(struct netmap_priv_d *); static void netmap_krings_put(struct netmap_priv_d *); void netmap_do_unregif(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; NMG_LOCK_ASSERT(); na->active_fds--; /* unset nr_pending_mode and possibly release exclusive mode */ netmap_krings_put(priv); #ifdef WITH_MONITOR /* XXX check whether we have to do something with monitor * when rings change nr_mode. */ if (na->active_fds <= 0) { /* walk through all the rings and tell any monitor * that the port is going to exit netmap mode */ netmap_monitor_stop(na); } #endif if (na->active_fds <= 0 || nm_kring_pending(priv)) { na->nm_register(na, 0); } /* delete rings and buffers that are no longer needed */ netmap_mem_rings_delete(na); if (na->active_fds <= 0) { /* last instance */ /* * (TO CHECK) We enter here * when the last reference to this file descriptor goes * away. This means we cannot have any pending poll() * or interrupt routine operating on the structure. * XXX The file may be closed in a thread while * another thread is using it. * Linux keeps the file opened until the last reference * by any outstanding ioctl/poll or mmap is gone. * FreeBSD does not track mmap()s (but we do) and * wakes up any sleeping poll(). Need to check what * happens if the close() occurs while a concurrent * syscall is running. */ if (netmap_debug & NM_DEBUG_ON) nm_prinf("deleting last instance for %s", na->name); if (nm_netmap_on(na)) { nm_prerr("BUG: netmap on while going to delete the krings"); } na->nm_krings_delete(na); } /* possibily decrement counter of tx_si/rx_si users */ netmap_unset_ringid(priv); /* delete the nifp */ netmap_mem_if_delete(na, priv->np_nifp); /* drop the allocator */ netmap_mem_drop(na); /* mark the priv as unregistered */ priv->np_na = NULL; priv->np_nifp = NULL; } struct netmap_priv_d* netmap_priv_new(void) { struct netmap_priv_d *priv; priv = nm_os_malloc(sizeof(struct netmap_priv_d)); if (priv == NULL) return NULL; priv->np_refs = 1; nm_os_get_module(); return priv; } /* * Destructor of the netmap_priv_d, called when the fd is closed * Action: undo all the things done by NIOCREGIF, * On FreeBSD we need to track whether there are active mmap()s, * and we use np_active_mmaps for that. On linux, the field is always 0. * Return: 1 if we can free priv, 0 otherwise. * */ /* call with NMG_LOCK held */ void netmap_priv_delete(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; /* number of active references to this fd */ if (--priv->np_refs > 0) { return; } nm_os_put_module(); if (na) { netmap_do_unregif(priv); } netmap_unget_na(na, priv->np_ifp); bzero(priv, sizeof(*priv)); /* for safety */ nm_os_free(priv); } /* call with NMG_LOCK *not* held */ void netmap_dtor(void *data) { struct netmap_priv_d *priv = data; NMG_LOCK(); netmap_priv_delete(priv); NMG_UNLOCK(); } /* * Handlers for synchronization of the rings from/to the host stack. * These are associated to a network interface and are just another * ring pair managed by userspace. * * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD * flags): * * - Before releasing buffers on hw RX rings, the application can mark * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they * will be forwarded to the host stack, similarly to what happened if * the application moved them to the host TX ring. * * - Before releasing buffers on the host RX ring, the application can * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(), * they will be forwarded to the hw TX rings, saving the application * from doing the same task in user-space. * * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD * flag, or globally with the netmap_fwd sysctl. * * The transfer NIC --> host is relatively easy, just encapsulate * into mbufs and we are done. The host --> NIC side is slightly * harder because there might not be room in the tx ring so it * might take a while before releasing the buffer. */ /* * Pass a whole queue of mbufs to the host stack as coming from 'dst' * We do not need to lock because the queue is private. * After this call the queue is empty. */ static void netmap_send_up(struct ifnet *dst, struct mbq *q) { struct mbuf *m; struct mbuf *head = NULL, *prev = NULL; /* Send packets up, outside the lock; head/prev machinery * is only useful for Windows. */ while ((m = mbq_dequeue(q)) != NULL) { if (netmap_debug & NM_DEBUG_HOST) nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m)); prev = nm_os_send_up(dst, m, prev); if (head == NULL) head = prev; } if (head) nm_os_send_up(dst, NULL, head); mbq_fini(q); } /* * Scan the buffers from hwcur to ring->head, and put a copy of those * marked NS_FORWARD (or all of them if forced) into a queue of mbufs. * Drop remaining packets in the unlikely event * of an mbuf shortage. */ static void netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force) { u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; u_int n; struct netmap_adapter *na = kring->na; for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) { struct mbuf *m; struct netmap_slot *slot = &kring->ring->slot[n]; if ((slot->flags & NS_FORWARD) == 0 && !force) continue; if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) { - RD(5, "bad pkt at %d len %d", n, slot->len); + nm_prlim(5, "bad pkt at %d len %d", n, slot->len); continue; } slot->flags &= ~NS_FORWARD; // XXX needed ? /* XXX TODO: adapt to the case of a multisegment packet */ m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL); if (m == NULL) break; mbq_enqueue(q, m); } } static inline int _nm_may_forward(struct netmap_kring *kring) { return ((netmap_fwd || kring->ring->flags & NR_FORWARD) && kring->na->na_flags & NAF_HOST_RINGS && kring->tx == NR_RX); } static inline int nm_may_forward_up(struct netmap_kring *kring) { return _nm_may_forward(kring) && kring->ring_id != kring->na->num_rx_rings; } static inline int nm_may_forward_down(struct netmap_kring *kring, int sync_flags) { return _nm_may_forward(kring) && (sync_flags & NAF_CAN_FORWARD_DOWN) && kring->ring_id == kring->na->num_rx_rings; } /* * Send to the NIC rings packets marked NS_FORWARD between * kring->nr_hwcur and kring->rhead. * Called under kring->rx_queue.lock on the sw rx ring. * * It can only be called if the user opened all the TX hw rings, * see NAF_CAN_FORWARD_DOWN flag. * We can touch the TX netmap rings (slots, head and cur) since * we are in poll/ioctl system call context, and the application * is not supposed to touch the ring (using a different thread) * during the execution of the system call. */ static u_int netmap_sw_to_nic(struct netmap_adapter *na) { struct netmap_kring *kring = na->rx_rings[na->num_rx_rings]; struct netmap_slot *rxslot = kring->ring->slot; u_int i, rxcur = kring->nr_hwcur; u_int const head = kring->rhead; u_int const src_lim = kring->nkr_num_slots - 1; u_int sent = 0; /* scan rings to find space, then fill as much as possible */ for (i = 0; i < na->num_tx_rings; i++) { struct netmap_kring *kdst = na->tx_rings[i]; struct netmap_ring *rdst = kdst->ring; u_int const dst_lim = kdst->nkr_num_slots - 1; /* XXX do we trust ring or kring->rcur,rtail ? */ for (; rxcur != head && !nm_ring_empty(rdst); rxcur = nm_next(rxcur, src_lim) ) { struct netmap_slot *src, *dst, tmp; u_int dst_head = rdst->head; src = &rxslot[rxcur]; if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd) continue; sent++; dst = &rdst->slot[dst_head]; tmp = *src; src->buf_idx = dst->buf_idx; src->flags = NS_BUF_CHANGED; dst->buf_idx = tmp.buf_idx; dst->len = tmp.len; dst->flags = NS_BUF_CHANGED; rdst->head = rdst->cur = nm_next(dst_head, dst_lim); } /* if (sent) XXX txsync ? it would be just an optimization */ } return sent; } /* * netmap_txsync_to_host() passes packets up. We are called from a * system call in user process context, and the only contention * can be among multiple user threads erroneously calling * this routine concurrently. */ static int netmap_txsync_to_host(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct mbq q; /* Take packets from hwcur to head and pass them up. * Force hwcur = head since netmap_grab_packets() stops at head */ mbq_init(&q); netmap_grab_packets(kring, &q, 1 /* force */); - ND("have %d pkts in queue", mbq_len(&q)); + nm_prdis("have %d pkts in queue", mbq_len(&q)); kring->nr_hwcur = head; kring->nr_hwtail = head + lim; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; netmap_send_up(na->ifp, &q); return 0; } /* * rxsync backend for packets coming from the host stack. * They have been put in kring->rx_queue by netmap_transmit(). * We protect access to the kring using kring->rx_queue.lock * * also moves to the nic hw rings any packet the user has marked * for transparent-mode forwarding, then sets the NR_FORWARD * flag in the kring to let the caller push them out */ static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; u_int nm_i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int ret = 0; struct mbq *q = &kring->rx_queue, fq; mbq_init(&fq); /* fq holds packets to be freed */ mbq_lock(q); /* First part: import newly received packets */ n = mbq_len(q); if (n) { /* grab packets from the queue */ struct mbuf *m; uint32_t stop_i; nm_i = kring->nr_hwtail; stop_i = nm_prev(kring->nr_hwcur, lim); while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) { int len = MBUF_LEN(m); struct netmap_slot *slot = &ring->slot[nm_i]; m_copydata(m, 0, len, NMB(na, slot)); - ND("nm %d len %d", nm_i, len); + nm_prdis("nm %d len %d", nm_i, len); if (netmap_debug & NM_DEBUG_HOST) nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); slot->len = len; slot->flags = 0; nm_i = nm_next(nm_i, lim); mbq_enqueue(&fq, m); } kring->nr_hwtail = nm_i; } /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* something was released */ if (nm_may_forward_down(kring, flags)) { ret = netmap_sw_to_nic(na); if (ret > 0) { kring->nr_kflags |= NR_FORWARD; ret = 0; } } kring->nr_hwcur = head; } mbq_unlock(q); mbq_purge(&fq); mbq_fini(&fq); return ret; } /* Get a netmap adapter for the port. * * If it is possible to satisfy the request, return 0 * with *na containing the netmap adapter found. * Otherwise return an error code, with *na containing NULL. * * When the port is attached to a bridge, we always return * EBUSY. * Otherwise, if the port is already bound to a file descriptor, * then we unconditionally return the existing adapter into *na. * In all the other cases, we return (into *na) either native, * generic or NULL, according to the following table: * * native_support * active_fds dev.netmap.admode YES NO * ------------------------------------------------------- * >0 * NA(ifp) NA(ifp) * * 0 NETMAP_ADMODE_BEST NATIVE GENERIC * 0 NETMAP_ADMODE_NATIVE NATIVE NULL * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC * */ static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */ int netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na) { /* generic support */ int i = netmap_admode; /* Take a snapshot. */ struct netmap_adapter *prev_na; int error = 0; *na = NULL; /* default */ /* reset in case of invalid value */ if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST) i = netmap_admode = NETMAP_ADMODE_BEST; if (NM_NA_VALID(ifp)) { prev_na = NA(ifp); /* If an adapter already exists, return it if * there are active file descriptors or if * netmap is not forced to use generic * adapters. */ if (NETMAP_OWNED_BY_ANY(prev_na) || i != NETMAP_ADMODE_GENERIC || prev_na->na_flags & NAF_FORCE_NATIVE #ifdef WITH_PIPES /* ugly, but we cannot allow an adapter switch * if some pipe is referring to this one */ || prev_na->na_next_pipe > 0 #endif ) { *na = prev_na; goto assign_mem; } } /* If there isn't native support and netmap is not allowed * to use generic adapters, we cannot satisfy the request. */ if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE) return EOPNOTSUPP; /* Otherwise, create a generic adapter and return it, * saving the previously used netmap adapter, if any. * * Note that here 'prev_na', if not NULL, MUST be a * native adapter, and CANNOT be a generic one. This is * true because generic adapters are created on demand, and * destroyed when not used anymore. Therefore, if the adapter * currently attached to an interface 'ifp' is generic, it * must be that * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))). * Consequently, if NA(ifp) is generic, we will enter one of * the branches above. This ensures that we never override * a generic adapter with another generic adapter. */ error = generic_netmap_attach(ifp); if (error) return error; *na = NA(ifp); assign_mem: if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) && (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) { (*na)->nm_mem_prev = (*na)->nm_mem; (*na)->nm_mem = netmap_mem_get(nmd); } return 0; } /* * MUST BE CALLED UNDER NMG_LOCK() * * Get a refcounted reference to a netmap adapter attached * to the interface specified by req. * This is always called in the execution of an ioctl(). * * Return ENXIO if the interface specified by the request does * not exist, ENOTSUP if netmap is not supported by the interface, * EBUSY if the interface is already attached to a bridge, * EINVAL if parameters are invalid, ENOMEM if needed resources * could not be allocated. * If successful, hold a reference to the netmap adapter. * * If the interface specified by req is a system one, also keep * a reference to it and return a valid *ifp. */ int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct ifnet **ifp, struct netmap_mem_d *nmd, int create) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; int error = 0; struct netmap_adapter *ret = NULL; int nmd_ref = 0; *na = NULL; /* default return value */ *ifp = NULL; if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { return EINVAL; } if (req->nr_mode == NR_REG_PIPE_MASTER || req->nr_mode == NR_REG_PIPE_SLAVE) { /* Do not accept deprecated pipe modes. */ nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax"); return EINVAL; } NMG_LOCK_ASSERT(); /* if the request contain a memid, try to find the * corresponding memory region */ if (nmd == NULL && req->nr_mem_id) { nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) return EINVAL; /* keep the rereference */ nmd_ref = 1; } /* We cascade through all possible types of netmap adapter. * All netmap_get_*_na() functions return an error and an na, * with the following combinations: * * error na * 0 NULL type doesn't match * !0 NULL type matches, but na creation/lookup failed * 0 !NULL type matches and na created/found * !0 !NULL impossible */ error = netmap_get_null_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a monitor port */ error = netmap_get_monitor_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a pipe port */ error = netmap_get_pipe_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a bridge port */ error = netmap_get_vale_na(hdr, na, nmd, create); if (error) goto out; if (*na != NULL) /* valid match in netmap_get_bdg_na() */ goto out; /* * This must be a hardware na, lookup the name in the system. * Note that by hardware we actually mean "it shows up in ifconfig". * This may still be a tap, a veth/epair, or even a * persistent VALE port. */ *ifp = ifunit_ref(hdr->nr_name); if (*ifp == NULL) { error = ENXIO; goto out; } error = netmap_get_hw_na(*ifp, nmd, &ret); if (error) goto out; *na = ret; netmap_adapter_get(ret); out: if (error) { if (ret) netmap_adapter_put(ret); if (*ifp) { if_rele(*ifp); *ifp = NULL; } } if (nmd_ref) netmap_mem_put(nmd); return error; } /* undo netmap_get_na() */ void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp) { if (ifp) if_rele(ifp); if (na) netmap_adapter_put(na); } #define NM_FAIL_ON(t) do { \ if (unlikely(t)) { \ - RD(5, "%s: fail '" #t "' " \ + nm_prlim(5, "%s: fail '" #t "' " \ "h %d c %d t %d " \ "rh %d rc %d rt %d " \ "hc %d ht %d", \ kring->name, \ head, cur, ring->tail, \ kring->rhead, kring->rcur, kring->rtail, \ kring->nr_hwcur, kring->nr_hwtail); \ return kring->nkr_num_slots; \ } \ } while (0) /* * validate parameters on entry for *_txsync() * Returns ring->cur if ok, or something >= kring->nkr_num_slots * in case of error. * * rhead, rcur and rtail=hwtail are stored from previous round. * hwcur is the next packet to send to the ring. * * We want * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail * * hwcur, rhead, rtail and hwtail are reliable */ u_int nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) { u_int head = ring->head; /* read only once */ u_int cur = ring->cur; /* read only once */ u_int n = kring->nkr_num_slots; - ND(5, "%s kcur %d ktail %d head %d cur %d tail %d", + nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, ring->head, ring->cur, ring->tail); #if 1 /* kernel sanity checks; but we can trust the kring. */ NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n || kring->rtail >= n || kring->nr_hwtail >= n); #endif /* kernel sanity checks */ /* * user sanity checks. We only use head, * A, B, ... are possible positions for head: * * 0 A rhead B rtail C n-1 * 0 D rtail E rhead F n-1 * * B, F, D are valid. A, C, E are wrong */ if (kring->rtail >= kring->rhead) { /* want rhead <= head <= rtail */ NM_FAIL_ON(head < kring->rhead || head > kring->rtail); /* and also head <= cur <= rtail */ NM_FAIL_ON(cur < head || cur > kring->rtail); } else { /* here rtail < rhead */ /* we need head outside rtail .. rhead */ NM_FAIL_ON(head > kring->rtail && head < kring->rhead); /* two cases now: head <= rtail or head >= rhead */ if (head <= kring->rtail) { /* want head <= cur <= rtail */ NM_FAIL_ON(cur < head || cur > kring->rtail); } else { /* head >= rhead */ /* cur must be outside rtail..head */ NM_FAIL_ON(cur > kring->rtail && cur < head); } } if (ring->tail != kring->rtail) { - RD(5, "%s tail overwritten was %d need %d", kring->name, + nm_prlim(5, "%s tail overwritten was %d need %d", kring->name, ring->tail, kring->rtail); ring->tail = kring->rtail; } kring->rhead = head; kring->rcur = cur; return head; } /* * validate parameters on entry for *_rxsync() * Returns ring->head if ok, kring->nkr_num_slots on error. * * For a valid configuration, * hwcur <= head <= cur <= tail <= hwtail * * We only consider head and cur. * hwcur and hwtail are reliable. * */ u_int nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) { uint32_t const n = kring->nkr_num_slots; uint32_t head, cur; - ND(5,"%s kc %d kt %d h %d c %d t %d", + nm_prdis(5,"%s kc %d kt %d h %d c %d t %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, ring->head, ring->cur, ring->tail); /* * Before storing the new values, we should check they do not * move backwards. However: * - head is not an issue because the previous value is hwcur; * - cur could in principle go back, however it does not matter * because we are processing a brand new rxsync() */ cur = kring->rcur = ring->cur; /* read only once */ head = kring->rhead = ring->head; /* read only once */ #if 1 /* kernel sanity checks */ NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n); #endif /* kernel sanity checks */ /* user sanity checks */ if (kring->nr_hwtail >= kring->nr_hwcur) { /* want hwcur <= rhead <= hwtail */ NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail); /* and also rhead <= rcur <= hwtail */ NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); } else { /* we need rhead outside hwtail..hwcur */ NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail); /* two cases now: head <= hwtail or head >= hwcur */ if (head <= kring->nr_hwtail) { /* want head <= cur <= hwtail */ NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); } else { /* cur must be outside hwtail..head */ NM_FAIL_ON(cur < head && cur > kring->nr_hwtail); } } if (ring->tail != kring->rtail) { - RD(5, "%s tail overwritten was %d need %d", + nm_prlim(5, "%s tail overwritten was %d need %d", kring->name, ring->tail, kring->rtail); ring->tail = kring->rtail; } return head; } /* * Error routine called when txsync/rxsync detects an error. * Can't do much more than resetting head = cur = hwcur, tail = hwtail * Return 1 on reinit. * * This routine is only called by the upper half of the kernel. * It only reads hwcur (which is changed only by the upper half, too) * and hwtail (which may be changed by the lower half, but only on * a tx ring and only to increase it, so any error will be recovered * on the next call). For the above, we don't strictly need to call * it under lock. */ int netmap_ring_reinit(struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; u_int i, lim = kring->nkr_num_slots - 1; int errors = 0; // XXX KASSERT nm_kr_tryget - RD(10, "called for %s", kring->name); + nm_prlim(10, "called for %s", kring->name); // XXX probably wrong to trust userspace kring->rhead = ring->head; kring->rcur = ring->cur; kring->rtail = ring->tail; if (ring->cur > lim) errors++; if (ring->head > lim) errors++; if (ring->tail > lim) errors++; for (i = 0; i <= lim; i++) { u_int idx = ring->slot[i].buf_idx; u_int len = ring->slot[i].len; if (idx < 2 || idx >= kring->na->na_lut.objtotal) { - RD(5, "bad index at slot %d idx %d len %d ", i, idx, len); + nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len); ring->slot[i].buf_idx = 0; ring->slot[i].len = 0; } else if (len > NETMAP_BUF_SIZE(kring->na)) { ring->slot[i].len = 0; - RD(5, "bad len at slot %d idx %d len %d", i, idx, len); + nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len); } } if (errors) { - RD(10, "total %d errors", errors); - RD(10, "%s reinit, cur %d -> %d tail %d -> %d", + nm_prlim(10, "total %d errors", errors); + nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d", kring->name, ring->cur, kring->nr_hwcur, ring->tail, kring->nr_hwtail); ring->head = kring->rhead = kring->nr_hwcur; ring->cur = kring->rcur = kring->nr_hwcur; ring->tail = kring->rtail = kring->nr_hwtail; } return (errors ? 1 : 0); } /* interpret the ringid and flags fields of an nmreq, by translating them * into a pair of intervals of ring indices: * * [priv->np_txqfirst, priv->np_txqlast) and * [priv->np_rxqfirst, priv->np_rxqlast) * */ int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_adapter *na = priv->np_na; int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY }; enum txrx t; u_int j; for_rx_tx(t) { if (nr_flags & excluded_direction[t]) { priv->np_qfirst[t] = priv->np_qlast[t] = 0; continue; } switch (nr_mode) { case NR_REG_ALL_NIC: case NR_REG_NULL: priv->np_qfirst[t] = 0; priv->np_qlast[t] = nma_get_nrings(na, t); - ND("ALL/PIPE: %s %d %d", nm_txrx2str(t), + nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_SW: case NR_REG_NIC_SW: if (!(na->na_flags & NAF_HOST_RINGS)) { nm_prerr("host rings not supported"); return EINVAL; } priv->np_qfirst[t] = (nr_mode == NR_REG_SW ? nma_get_nrings(na, t) : 0); priv->np_qlast[t] = netmap_all_rings(na, t); - ND("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW", + nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_ONE_NIC: if (nr_ringid >= na->num_tx_rings && nr_ringid >= na->num_rx_rings) { nm_prerr("invalid ring id %d", nr_ringid); return EINVAL; } /* if not enough rings, use the first one */ j = nr_ringid; if (j >= nma_get_nrings(na, t)) j = 0; priv->np_qfirst[t] = j; priv->np_qlast[t] = j + 1; - ND("ONE_NIC: %s %d %d", nm_txrx2str(t), + nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; default: nm_prerr("invalid regif type %d", nr_mode); return EINVAL; } } priv->np_flags = nr_flags; /* Allow transparent forwarding mode in the host --> nic * direction only if all the TX hw rings have been opened. */ if (priv->np_qfirst[NR_TX] == 0 && priv->np_qlast[NR_TX] >= na->num_tx_rings) { priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN; } if (netmap_verbose) { nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[NR_RX], nr_ringid); } return 0; } /* * Set the ring ID. For devices with a single queue, a request * for all rings is the same as a single ring. */ static int netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_adapter *na = priv->np_na; int error; enum txrx t; error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags); if (error) { return error; } priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1; /* optimization: count the users registered for more than * one ring, which are the ones sleeping on the global queue. * The default netmap_notify() callback will then * avoid signaling the global queue if nobody is using it */ for_rx_tx(t) { if (nm_si_user(priv, t)) na->si_users[t]++; } return 0; } static void netmap_unset_ringid(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; enum txrx t; for_rx_tx(t) { if (nm_si_user(priv, t)) na->si_users[t]--; priv->np_qfirst[t] = priv->np_qlast[t] = 0; } priv->np_flags = 0; priv->np_txpoll = 0; priv->np_kloop_state = 0; } /* Set the nr_pending_mode for the requested rings. * If requested, also try to get exclusive access to the rings, provided * the rings we want to bind are not exclusively owned by a previous bind. */ static int netmap_krings_get(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; u_int i; struct netmap_kring *kring; int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; if (netmap_debug & NM_DEBUG_ON) nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[NR_RX]); /* first round: check that all the requested rings * are neither alread exclusively owned, nor we * want exclusive ownership when they are already in use */ for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if ((kring->nr_kflags & NKR_EXCLUSIVE) || (kring->users && excl)) { - ND("ring %s busy", kring->name); + nm_prdis("ring %s busy", kring->name); return EBUSY; } } } /* second round: increment usage count (possibly marking them * as exclusive) and set the nr_pending_mode */ for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; kring->users++; if (excl) kring->nr_kflags |= NKR_EXCLUSIVE; kring->nr_pending_mode = NKR_NETMAP_ON; } } return 0; } /* Undo netmap_krings_get(). This is done by clearing the exclusive mode * if was asked on regif, and unset the nr_pending_mode if we are the * last users of the involved rings. */ static void netmap_krings_put(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; u_int i; struct netmap_kring *kring; int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; - ND("%s: releasing tx [%d, %d) rx [%d, %d)", + nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[MR_RX]); for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if (excl) kring->nr_kflags &= ~NKR_EXCLUSIVE; kring->users--; if (kring->users == 0) kring->nr_pending_mode = NKR_NETMAP_OFF; } } } static int nm_priv_rx_enabled(struct netmap_priv_d *priv) { return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]); } /* Validate the CSB entries for both directions (atok and ktoa). * To be called under NMG_LOCK(). */ static int netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo) { struct nm_csb_atok *csb_atok_base = (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok; struct nm_csb_ktoa *csb_ktoa_base = (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa; enum txrx t; int num_rings[NR_TXRX], tot_rings; size_t entry_size[2]; void *csb_start[2]; int i; if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { nm_prerr("Cannot update CSB while kloop is running"); return EBUSY; } tot_rings = 0; for_rx_tx(t) { num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t]; tot_rings += num_rings[t]; } if (tot_rings <= 0) return 0; if (!(priv->np_flags & NR_EXCLUSIVE)) { nm_prerr("CSB mode requires NR_EXCLUSIVE"); return EINVAL; } entry_size[0] = sizeof(*csb_atok_base); entry_size[1] = sizeof(*csb_ktoa_base); csb_start[0] = (void *)csb_atok_base; csb_start[1] = (void *)csb_ktoa_base; for (i = 0; i < 2; i++) { /* On Linux we could use access_ok() to simplify * the validation. However, the advantage of * this approach is that it works also on * FreeBSD. */ size_t csb_size = tot_rings * entry_size[i]; void *tmp; int err; if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) { nm_prerr("Unaligned CSB address"); return EINVAL; } tmp = nm_os_malloc(csb_size); if (!tmp) return ENOMEM; if (i == 0) { /* Application --> kernel direction. */ err = copyin(csb_start[i], tmp, csb_size); } else { /* Kernel --> application direction. */ memset(tmp, 0, csb_size); err = copyout(tmp, csb_start[i], csb_size); } nm_os_free(tmp); if (err) { nm_prerr("Invalid CSB address"); return err; } } priv->np_csb_atok_base = csb_atok_base; priv->np_csb_ktoa_base = csb_ktoa_base; /* Initialize the CSB. */ for_rx_tx(t) { for (i = 0; i < num_rings[t]; i++) { struct netmap_kring *kring = NMR(priv->np_na, t)[i + priv->np_qfirst[t]]; struct nm_csb_atok *csb_atok = csb_atok_base + i; struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i; if (t == NR_RX) { csb_atok += num_rings[NR_TX]; csb_ktoa += num_rings[NR_TX]; } CSB_WRITE(csb_atok, head, kring->rhead); CSB_WRITE(csb_atok, cur, kring->rcur); CSB_WRITE(csb_atok, appl_need_kick, 1); CSB_WRITE(csb_atok, sync_flags, 1); CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur); CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail); CSB_WRITE(csb_ktoa, kern_need_kick, 1); nm_prinf("csb_init for kring %s: head %u, cur %u, " "hwcur %u, hwtail %u", kring->name, kring->rhead, kring->rcur, kring->nr_hwcur, kring->nr_hwtail); } } return 0; } /* Ensure that the netmap adapter can support the given MTU. * @return EINVAL if the na cannot be set to mtu, 0 otherwise. */ int netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) { unsigned nbs = NETMAP_BUF_SIZE(na); if (mtu <= na->rx_buf_maxsize) { /* The MTU fits a single NIC slot. We only * Need to check that netmap buffers are * large enough to hold an MTU. NS_MOREFRAG * cannot be used in this case. */ if (nbs < mtu) { nm_prerr("error: netmap buf size (%u) " "< device MTU (%u)", nbs, mtu); return EINVAL; } } else { /* More NIC slots may be needed to receive * or transmit a single packet. Check that * the adapter supports NS_MOREFRAG and that * netmap buffers are large enough to hold * the maximum per-slot size. */ if (!(na->na_flags & NAF_MOREFRAG)) { nm_prerr("error: large MTU (%d) needed " "but %s does not support " "NS_MOREFRAG", mtu, na->ifp->if_xname); return EINVAL; } else if (nbs < na->rx_buf_maxsize) { nm_prerr("error: using NS_MOREFRAG on " "%s requires netmap buf size " ">= %u", na->ifp->if_xname, na->rx_buf_maxsize); return EINVAL; } else { nm_prinf("info: netmap application on " "%s needs to support " "NS_MOREFRAG " "(MTU=%u,netmap_buf_size=%u)", na->ifp->if_xname, mtu, nbs); } } return 0; } /* * possibly move the interface to netmap-mode. * If success it returns a pointer to netmap_if, otherwise NULL. * This must be called with NMG_LOCK held. * * The following na callbacks are called in the process: * * na->nm_config() [by netmap_update_config] * (get current number and size of rings) * * We have a generic one for linux (netmap_linux_config). * The bwrap has to override this, since it has to forward * the request to the wrapped adapter (netmap_bwrap_config). * * * na->nm_krings_create() * (create and init the krings array) * * One of the following: * * * netmap_hw_krings_create, (hw ports) * creates the standard layout for the krings * and adds the mbq (used for the host rings). * * * netmap_vp_krings_create (VALE ports) * add leases and scratchpads * * * netmap_pipe_krings_create (pipes) * create the krings and rings of both ends and * cross-link them * * * netmap_monitor_krings_create (monitors) * avoid allocating the mbq * * * netmap_bwrap_krings_create (bwraps) * create both the brap krings array, * the krings array of the wrapped adapter, and * (if needed) the fake array for the host adapter * * na->nm_register(, 1) * (put the adapter in netmap mode) * * This may be one of the following: * * * netmap_hw_reg (hw ports) * checks that the ifp is still there, then calls * the hardware specific callback; * * * netmap_vp_reg (VALE ports) * If the port is connected to a bridge, * set the NAF_NETMAP_ON flag under the * bridge write lock. * * * netmap_pipe_reg (pipes) * inform the other pipe end that it is no * longer responsible for the lifetime of this * pipe end * * * netmap_monitor_reg (monitors) * intercept the sync callbacks of the monitored * rings * * * netmap_bwrap_reg (bwraps) * cross-link the bwrap and hwna rings, * forward the request to the hwna, override * the hwna notify callback (to get the frames * coming from outside go through the bridge). * * */ int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_if *nifp = NULL; int error; NMG_LOCK_ASSERT(); priv->np_na = na; /* store the reference */ error = netmap_mem_finalize(na->nm_mem, na); if (error) goto err; if (na->active_fds == 0) { /* cache the allocator info in the na */ error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); if (error) goto err_drop_mem; - ND("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, + nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, na->na_lut.objsize); /* ring configuration may have changed, fetch from the card */ netmap_update_config(na); } /* compute the range of tx and rx rings to monitor */ error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags); if (error) goto err_put_lut; if (na->active_fds == 0) { /* * If this is the first registration of the adapter, * perform sanity checks and create the in-kernel view * of the netmap rings (the netmap krings). */ if (na->ifp && nm_priv_rx_enabled(priv)) { /* This netmap adapter is attached to an ifnet. */ unsigned mtu = nm_os_ifnet_mtu(na->ifp); - ND("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d", + nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d", na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na)); if (na->rx_buf_maxsize == 0) { nm_prerr("%s: error: rx_buf_maxsize == 0", na->name); error = EIO; goto err_drop_mem; } error = netmap_buf_size_validate(na, mtu); if (error) goto err_drop_mem; } /* * Depending on the adapter, this may also create * the netmap rings themselves */ error = na->nm_krings_create(na); if (error) goto err_put_lut; } /* now the krings must exist and we can check whether some * previous bind has exclusive ownership on them, and set * nr_pending_mode */ error = netmap_krings_get(priv); if (error) goto err_del_krings; /* create all needed missing netmap rings */ error = netmap_mem_rings_create(na); if (error) goto err_rel_excl; /* in all cases, create a new netmap if */ nifp = netmap_mem_if_new(na, priv); if (nifp == NULL) { error = ENOMEM; goto err_rel_excl; } if (nm_kring_pending(priv)) { /* Some kring is switching mode, tell the adapter to * react on this. */ error = na->nm_register(na, 1); if (error) goto err_del_if; } /* Commit the reference. */ na->active_fds++; /* * advertise that the interface is ready by setting np_nifp. * The barrier is needed because readers (poll, *SYNC and mmap) * check for priv->np_nifp != NULL without locking */ mb(); /* make sure previous writes are visible to all CPUs */ priv->np_nifp = nifp; return 0; err_del_if: netmap_mem_if_delete(na, nifp); err_rel_excl: netmap_krings_put(priv); netmap_mem_rings_delete(na); err_del_krings: if (na->active_fds == 0) na->nm_krings_delete(na); err_put_lut: if (na->active_fds == 0) memset(&na->na_lut, 0, sizeof(na->na_lut)); err_drop_mem: netmap_mem_drop(na); err: priv->np_na = NULL; return error; } /* * update kring and ring at the end of rxsync/txsync. */ static inline void nm_sync_finalize(struct netmap_kring *kring) { /* * Update ring tail to what the kernel knows * After txsync: head/rhead/hwcur might be behind cur/rcur * if no carrier. */ kring->ring->tail = kring->rtail = kring->nr_hwtail; - ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", + nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); } /* set ring timestamp */ static inline void ring_timestamp_set(struct netmap_ring *ring) { if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) { microtime(&ring->ts); } } static int nmreq_copyin(struct nmreq_header *, int); static int nmreq_copyout(struct nmreq_header *, int); static int nmreq_checkoptions(struct nmreq_header *); /* * ioctl(2) support for the "netmap" device. * * Following a list of accepted commands: * - NIOCCTRL device control API * - NIOCTXSYNC sync TX rings * - NIOCRXSYNC sync RX rings * - SIOCGIFADDR just for convenience * - NIOCGINFO deprecated (legacy API) * - NIOCREGIF deprecated (legacy API) * * Return 0 on success, errno otherwise. */ int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *td, int nr_body_is_user) { struct mbq q; /* packets from RX hw queues to host stack */ struct netmap_adapter *na = NULL; struct netmap_mem_d *nmd = NULL; struct ifnet *ifp = NULL; int error = 0; u_int i, qfirst, qlast; struct netmap_kring **krings; int sync_flags; enum txrx t; switch (cmd) { case NIOCCTRL: { struct nmreq_header *hdr = (struct nmreq_header *)data; if (hdr->nr_version < NETMAP_MIN_API || hdr->nr_version > NETMAP_MAX_API) { nm_prerr("API mismatch: got %d need %d", hdr->nr_version, NETMAP_API); return EINVAL; } /* Make a kernel-space copy of the user-space nr_body. * For convenince, the nr_body pointer and the pointers * in the options list will be replaced with their * kernel-space counterparts. The original pointers are * saved internally and later restored by nmreq_copyout */ error = nmreq_copyin(hdr, nr_body_is_user); if (error) { return error; } /* Sanitize hdr->nr_name. */ hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0'; switch (hdr->nr_reqtype) { case NETMAP_REQ_REGISTER: { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct netmap_if *nifp; /* Protect access to priv from concurrent requests. */ NMG_LOCK(); do { struct nmreq_option *opt; u_int memflags; if (priv->np_nifp != NULL) { /* thread already registered */ error = EBUSY; break; } #ifdef WITH_EXTMEM opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options, NETMAP_REQ_OPT_EXTMEM); if (opt != NULL) { struct nmreq_opt_extmem *e = (struct nmreq_opt_extmem *)opt; error = nmreq_checkduplicate(opt); if (error) { opt->nro_status = error; break; } nmd = netmap_mem_ext_create(e->nro_usrptr, &e->nro_info, &error); opt->nro_status = error; if (nmd == NULL) break; } #endif /* WITH_EXTMEM */ if (nmd == NULL && req->nr_mem_id) { /* find the allocator and get a reference */ nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) { if (netmap_verbose) { nm_prerr("%s: failed to find mem_id %u", hdr->nr_name, req->nr_mem_id); } error = EINVAL; break; } } /* find the interface and a reference */ error = netmap_get_na(hdr, &na, &ifp, nmd, 1 /* create */); /* keep reference */ if (error) break; if (NETMAP_OWNED_BY_KERN(na)) { error = EBUSY; break; } if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) { nm_prerr("virt_hdr_len=%d, but application does " "not accept it", na->virt_hdr_len); error = EIO; break; } error = netmap_do_regif(priv, na, req->nr_mode, req->nr_ringid, req->nr_flags); if (error) { /* reg. failed, release priv and ref */ break; } opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options, NETMAP_REQ_OPT_CSB); if (opt != NULL) { struct nmreq_opt_csb *csbo = (struct nmreq_opt_csb *)opt; error = nmreq_checkduplicate(opt); if (!error) { error = netmap_csb_validate(priv, csbo); } opt->nro_status = error; if (error) { netmap_do_unregif(priv); break; } } nifp = priv->np_nifp; /* return the offset of the netmap_if object */ req->nr_rx_rings = na->num_rx_rings; req->nr_tx_rings = na->num_tx_rings; req->nr_rx_slots = na->num_rx_desc; req->nr_tx_slots = na->num_tx_desc; error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags, &req->nr_mem_id); if (error) { netmap_do_unregif(priv); break; } if (memflags & NETMAP_MEM_PRIVATE) { *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM; } for_rx_tx(t) { priv->np_si[t] = nm_si_user(priv, t) ? &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si; } if (req->nr_extra_bufs) { if (netmap_verbose) nm_prinf("requested %d extra buffers", req->nr_extra_bufs); req->nr_extra_bufs = netmap_extra_alloc(na, &nifp->ni_bufs_head, req->nr_extra_bufs); if (netmap_verbose) nm_prinf("got %d extra buffers", req->nr_extra_bufs); } req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); error = nmreq_checkoptions(hdr); if (error) { netmap_do_unregif(priv); break; } /* store ifp reference so that priv destructor may release it */ priv->np_ifp = ifp; } while (0); if (error) { netmap_unget_na(na, ifp); } /* release the reference from netmap_mem_find() or * netmap_mem_ext_create() */ if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); break; } case NETMAP_REQ_PORT_INFO_GET: { struct nmreq_port_info_get *req = (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body; NMG_LOCK(); do { u_int memflags; if (hdr->nr_name[0] != '\0') { /* Build a nmreq_register out of the nmreq_port_info_get, * so that we can call netmap_get_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_mode = NR_REG_ALL_NIC; regreq.nr_tx_slots = req->nr_tx_slots; regreq.nr_rx_slots = req->nr_rx_slots; regreq.nr_tx_rings = req->nr_tx_rings; regreq.nr_rx_rings = req->nr_rx_rings; regreq.nr_mem_id = req->nr_mem_id; /* get a refcount */ hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */ hdr->nr_body = (uintptr_t)req; /* reset nr_body */ if (error) { na = NULL; ifp = NULL; break; } nmd = na->nm_mem; /* get memory allocator */ } else { nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1); if (nmd == NULL) { if (netmap_verbose) nm_prerr("%s: failed to find mem_id %u", hdr->nr_name, req->nr_mem_id ? req->nr_mem_id : 1); error = EINVAL; break; } } error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags, &req->nr_mem_id); if (error) break; if (na == NULL) /* only memory info */ break; netmap_update_config(na); req->nr_rx_rings = na->num_rx_rings; req->nr_tx_rings = na->num_tx_rings; req->nr_rx_slots = na->num_rx_desc; req->nr_tx_slots = na->num_tx_desc; } while (0); netmap_unget_na(na, ifp); NMG_UNLOCK(); break; } #ifdef WITH_VALE case NETMAP_REQ_VALE_ATTACH: { error = netmap_vale_attach(hdr, NULL /* userspace request */); break; } case NETMAP_REQ_VALE_DETACH: { error = netmap_vale_detach(hdr, NULL /* userspace request */); break; } case NETMAP_REQ_VALE_LIST: { error = netmap_vale_list(hdr); break; } case NETMAP_REQ_PORT_HDR_SET: { struct nmreq_port_hdr *req = (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; /* Build a nmreq_register out of the nmreq_port_hdr, * so that we can call netmap_get_bdg_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_mode = NR_REG_ALL_NIC; /* For now we only support virtio-net headers, and only for * VALE ports, but this may change in future. Valid lengths * for the virtio-net header are 0 (no header), 10 and 12. */ if (req->nr_hdr_len != 0 && req->nr_hdr_len != sizeof(struct nm_vnet_hdr) && req->nr_hdr_len != 12) { if (netmap_verbose) nm_prerr("invalid hdr_len %u", req->nr_hdr_len); error = EINVAL; break; } NMG_LOCK(); hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_vale_na(hdr, &na, NULL, 0); hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET; hdr->nr_body = (uintptr_t)req; if (na && !error) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; na->virt_hdr_len = req->nr_hdr_len; if (na->virt_hdr_len) { vpna->mfs = NETMAP_BUF_SIZE(na); } if (netmap_verbose) nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); netmap_adapter_put(na); } else if (!na) { error = ENXIO; } NMG_UNLOCK(); break; } case NETMAP_REQ_PORT_HDR_GET: { /* Get vnet-header length for this netmap port */ struct nmreq_port_hdr *req = (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; /* Build a nmreq_register out of the nmreq_port_hdr, * so that we can call netmap_get_bdg_na(). */ struct nmreq_register regreq; struct ifnet *ifp; bzero(®req, sizeof(regreq)); regreq.nr_mode = NR_REG_ALL_NIC; NMG_LOCK(); hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_na(hdr, &na, &ifp, NULL, 0); hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET; hdr->nr_body = (uintptr_t)req; if (na && !error) { req->nr_hdr_len = na->virt_hdr_len; } netmap_unget_na(na, ifp); NMG_UNLOCK(); break; } case NETMAP_REQ_VALE_NEWIF: { error = nm_vi_create(hdr); break; } case NETMAP_REQ_VALE_DELIF: { error = nm_vi_destroy(hdr->nr_name); break; } case NETMAP_REQ_VALE_POLLING_ENABLE: case NETMAP_REQ_VALE_POLLING_DISABLE: { error = nm_bdg_polling(hdr); break; } #endif /* WITH_VALE */ case NETMAP_REQ_POOLS_INFO_GET: { /* Get information from the memory allocator used for * hdr->nr_name. */ struct nmreq_pools_info *req = (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body; NMG_LOCK(); do { /* Build a nmreq_register out of the nmreq_pools_info, * so that we can call netmap_get_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_mem_id = req->nr_mem_id; regreq.nr_mode = NR_REG_ALL_NIC; hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */ hdr->nr_body = (uintptr_t)req; /* reset nr_body */ if (error) { na = NULL; ifp = NULL; break; } nmd = na->nm_mem; /* grab the memory allocator */ if (nmd == NULL) { error = EINVAL; break; } /* Finalize the memory allocator, get the pools * information and release the allocator. */ error = netmap_mem_finalize(nmd, na); if (error) { break; } error = netmap_mem_pools_info_get(req, nmd); netmap_mem_drop(na); } while (0); netmap_unget_na(na, ifp); NMG_UNLOCK(); break; } case NETMAP_REQ_CSB_ENABLE: { struct nmreq_option *opt; opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options, NETMAP_REQ_OPT_CSB); if (opt == NULL) { error = EINVAL; } else { struct nmreq_opt_csb *csbo = (struct nmreq_opt_csb *)opt; error = nmreq_checkduplicate(opt); if (!error) { NMG_LOCK(); error = netmap_csb_validate(priv, csbo); NMG_UNLOCK(); } opt->nro_status = error; } break; } case NETMAP_REQ_SYNC_KLOOP_START: { error = netmap_sync_kloop(priv, hdr); break; } case NETMAP_REQ_SYNC_KLOOP_STOP: { error = netmap_sync_kloop_stop(priv); break; } default: { error = EINVAL; break; } } /* Write back request body to userspace and reset the * user-space pointer. */ error = nmreq_copyout(hdr, error); break; } case NIOCTXSYNC: case NIOCRXSYNC: { if (unlikely(priv->np_nifp == NULL)) { error = ENXIO; break; } mb(); /* make sure following reads are not from cache */ if (unlikely(priv->np_csb_atok_base)) { nm_prerr("Invalid sync in CSB mode"); error = EBUSY; break; } na = priv->np_na; /* we have a reference */ mbq_init(&q); t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX); krings = NMR(na, t); qfirst = priv->np_qfirst[t]; qlast = priv->np_qlast[t]; sync_flags = priv->np_sync_flags; for (i = qfirst; i < qlast; i++) { struct netmap_kring *kring = krings[i]; struct netmap_ring *ring = kring->ring; if (unlikely(nm_kr_tryget(kring, 1, &error))) { error = (error ? EIO : 0); continue; } if (cmd == NIOCTXSYNC) { if (netmap_debug & NM_DEBUG_TXSYNC) nm_prinf("pre txsync ring %d cur %d hwcur %d", i, ring->cur, kring->nr_hwcur); if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) { nm_sync_finalize(kring); } if (netmap_debug & NM_DEBUG_TXSYNC) nm_prinf("post txsync ring %d cur %d hwcur %d", i, ring->cur, kring->nr_hwcur); } else { if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); } if (nm_may_forward_up(kring)) { /* transparent forwarding, see netmap_poll() */ netmap_grab_packets(kring, &q, netmap_fwd); } if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) { nm_sync_finalize(kring); } ring_timestamp_set(ring); } nm_kr_put(kring); } if (mbq_peek(&q)) { netmap_send_up(na->ifp, &q); } break; } default: { return netmap_ioctl_legacy(priv, cmd, data, td); break; } } return (error); } size_t nmreq_size_by_type(uint16_t nr_reqtype) { switch (nr_reqtype) { case NETMAP_REQ_REGISTER: return sizeof(struct nmreq_register); case NETMAP_REQ_PORT_INFO_GET: return sizeof(struct nmreq_port_info_get); case NETMAP_REQ_VALE_ATTACH: return sizeof(struct nmreq_vale_attach); case NETMAP_REQ_VALE_DETACH: return sizeof(struct nmreq_vale_detach); case NETMAP_REQ_VALE_LIST: return sizeof(struct nmreq_vale_list); case NETMAP_REQ_PORT_HDR_SET: case NETMAP_REQ_PORT_HDR_GET: return sizeof(struct nmreq_port_hdr); case NETMAP_REQ_VALE_NEWIF: return sizeof(struct nmreq_vale_newif); case NETMAP_REQ_VALE_DELIF: case NETMAP_REQ_SYNC_KLOOP_STOP: case NETMAP_REQ_CSB_ENABLE: return 0; case NETMAP_REQ_VALE_POLLING_ENABLE: case NETMAP_REQ_VALE_POLLING_DISABLE: return sizeof(struct nmreq_vale_polling); case NETMAP_REQ_POOLS_INFO_GET: return sizeof(struct nmreq_pools_info); case NETMAP_REQ_SYNC_KLOOP_START: return sizeof(struct nmreq_sync_kloop_start); } return 0; } static size_t nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size) { size_t rv = sizeof(struct nmreq_option); #ifdef NETMAP_REQ_OPT_DEBUG if (nro_reqtype & NETMAP_REQ_OPT_DEBUG) return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG); #endif /* NETMAP_REQ_OPT_DEBUG */ switch (nro_reqtype) { #ifdef WITH_EXTMEM case NETMAP_REQ_OPT_EXTMEM: rv = sizeof(struct nmreq_opt_extmem); break; #endif /* WITH_EXTMEM */ case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS: if (nro_size >= rv) rv = nro_size; break; case NETMAP_REQ_OPT_CSB: rv = sizeof(struct nmreq_opt_csb); break; case NETMAP_REQ_OPT_SYNC_KLOOP_MODE: rv = sizeof(struct nmreq_opt_sync_kloop_mode); break; } /* subtract the common header */ return rv - sizeof(struct nmreq_option); } int nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user) { size_t rqsz, optsz, bufsz; int error; char *ker = NULL, *p; struct nmreq_option **next, *src; struct nmreq_option buf; uint64_t *ptrs; if (hdr->nr_reserved) { if (netmap_verbose) nm_prerr("nr_reserved must be zero"); return EINVAL; } if (!nr_body_is_user) return 0; hdr->nr_reserved = nr_body_is_user; /* compute the total size of the buffer */ rqsz = nmreq_size_by_type(hdr->nr_reqtype); if (rqsz > NETMAP_REQ_MAXSIZE) { error = EMSGSIZE; goto out_err; } if ((rqsz && hdr->nr_body == (uintptr_t)NULL) || (!rqsz && hdr->nr_body != (uintptr_t)NULL)) { /* Request body expected, but not found; or * request body found but unexpected. */ if (netmap_verbose) nm_prerr("nr_body expected but not found, or vice versa"); error = EINVAL; goto out_err; } bufsz = 2 * sizeof(void *) + rqsz; optsz = 0; for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src; src = (struct nmreq_option *)(uintptr_t)buf.nro_next) { error = copyin(src, &buf, sizeof(*src)); if (error) goto out_err; optsz += sizeof(*src); optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size); if (rqsz + optsz > NETMAP_REQ_MAXSIZE) { error = EMSGSIZE; goto out_err; } bufsz += optsz + sizeof(void *); } ker = nm_os_malloc(bufsz); if (ker == NULL) { error = ENOMEM; goto out_err; } p = ker; /* make a copy of the user pointers */ ptrs = (uint64_t*)p; *ptrs++ = hdr->nr_body; *ptrs++ = hdr->nr_options; p = (char *)ptrs; /* copy the body */ error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz); if (error) goto out_restore; /* overwrite the user pointer with the in-kernel one */ hdr->nr_body = (uintptr_t)p; p += rqsz; /* copy the options */ next = (struct nmreq_option **)&hdr->nr_options; src = *next; while (src) { struct nmreq_option *opt; /* copy the option header */ ptrs = (uint64_t *)p; opt = (struct nmreq_option *)(ptrs + 1); error = copyin(src, opt, sizeof(*src)); if (error) goto out_restore; /* make a copy of the user next pointer */ *ptrs = opt->nro_next; /* overwrite the user pointer with the in-kernel one */ *next = opt; /* initialize the option as not supported. * Recognized options will update this field. */ opt->nro_status = EOPNOTSUPP; p = (char *)(opt + 1); /* copy the option body */ optsz = nmreq_opt_size_by_type(opt->nro_reqtype, opt->nro_size); if (optsz) { /* the option body follows the option header */ error = copyin(src + 1, p, optsz); if (error) goto out_restore; p += optsz; } /* move to next option */ next = (struct nmreq_option **)&opt->nro_next; src = *next; } return 0; out_restore: ptrs = (uint64_t *)ker; hdr->nr_body = *ptrs++; hdr->nr_options = *ptrs++; hdr->nr_reserved = 0; nm_os_free(ker); out_err: return error; } static int nmreq_copyout(struct nmreq_header *hdr, int rerror) { struct nmreq_option *src, *dst; void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart; uint64_t *ptrs; size_t bodysz; int error; if (!hdr->nr_reserved) return rerror; /* restore the user pointers in the header */ ptrs = (uint64_t *)ker - 2; bufstart = ptrs; hdr->nr_body = *ptrs++; src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; hdr->nr_options = *ptrs; if (!rerror) { /* copy the body */ bodysz = nmreq_size_by_type(hdr->nr_reqtype); error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz); if (error) { rerror = error; goto out; } } /* copy the options */ dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options; while (src) { size_t optsz; uint64_t next; /* restore the user pointer */ next = src->nro_next; ptrs = (uint64_t *)src - 1; src->nro_next = *ptrs; /* always copy the option header */ error = copyout(src, dst, sizeof(*src)); if (error) { rerror = error; goto out; } /* copy the option body only if there was no error */ if (!rerror && !src->nro_status) { optsz = nmreq_opt_size_by_type(src->nro_reqtype, src->nro_size); if (optsz) { error = copyout(src + 1, dst + 1, optsz); if (error) { rerror = error; goto out; } } } src = (struct nmreq_option *)(uintptr_t)next; dst = (struct nmreq_option *)(uintptr_t)*ptrs; } out: hdr->nr_reserved = 0; nm_os_free(bufstart); return rerror; } struct nmreq_option * nmreq_findoption(struct nmreq_option *opt, uint16_t reqtype) { for ( ; opt; opt = (struct nmreq_option *)(uintptr_t)opt->nro_next) if (opt->nro_reqtype == reqtype) return opt; return NULL; } int nmreq_checkduplicate(struct nmreq_option *opt) { uint16_t type = opt->nro_reqtype; int dup = 0; while ((opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)opt->nro_next, type))) { dup++; opt->nro_status = EINVAL; } return (dup ? EINVAL : 0); } static int nmreq_checkoptions(struct nmreq_header *hdr) { struct nmreq_option *opt; /* return error if there is still any option * marked as not supported */ for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt; opt = (struct nmreq_option *)(uintptr_t)opt->nro_next) if (opt->nro_status == EOPNOTSUPP) return EOPNOTSUPP; return 0; } /* * select(2) and poll(2) handlers for the "netmap" device. * * Can be called for one or more queues. * Return true the event mask corresponding to ready events. * If there are no ready events (and 'sr' is not NULL), do a * selrecord on either individual selinfo or on the global one. * Device-dependent parts (locking and sync of tx/rx rings) * are done through callbacks. * * On linux, arguments are really pwait, the poll table, and 'td' is struct file * * The first one is remapped to pwait as selrecord() uses the name as an * hidden argument. */ int netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr) { struct netmap_adapter *na; struct netmap_kring *kring; struct netmap_ring *ring; u_int i, want[NR_TXRX], revents = 0; NM_SELINFO_T *si[NR_TXRX]; #define want_tx want[NR_TX] #define want_rx want[NR_RX] struct mbq q; /* packets from RX hw queues to host stack */ /* * In order to avoid nested locks, we need to "double check" * txsync and rxsync if we decide to do a selrecord(). * retry_tx (and retry_rx, later) prevent looping forever. */ int retry_tx = 1, retry_rx = 1; /* Transparent mode: send_down is 1 if we have found some * packets to forward (host RX ring --> NIC) during the rx * scan and we have not sent them down to the NIC yet. * Transparent mode requires to bind all rings to a single * file descriptor. */ int send_down = 0; int sync_flags = priv->np_sync_flags; mbq_init(&q); if (unlikely(priv->np_nifp == NULL)) { return POLLERR; } mb(); /* make sure following reads are not from cache */ na = priv->np_na; if (unlikely(!nm_netmap_on(na))) return POLLERR; if (unlikely(priv->np_csb_atok_base)) { nm_prerr("Invalid poll in CSB mode"); return POLLERR; } if (netmap_debug & NM_DEBUG_ON) nm_prinf("device %s events 0x%x", na->name, events); want_tx = events & (POLLOUT | POLLWRNORM); want_rx = events & (POLLIN | POLLRDNORM); /* * If the card has more than one queue AND the file descriptor is * bound to all of them, we sleep on the "global" selinfo, otherwise * we sleep on individual selinfo (FreeBSD only allows two selinfo's * per file descriptor). * The interrupt routine in the driver wake one or the other * (or both) depending on which clients are active. * * rxsync() is only called if we run out of buffers on a POLLIN. * txsync() is called if we run out of buffers on POLLOUT, or * there are pending packets to send. The latter can be disabled * passing NETMAP_NO_TX_POLL in the NIOCREG call. */ si[NR_RX] = priv->np_si[NR_RX]; si[NR_TX] = priv->np_si[NR_TX]; #ifdef __FreeBSD__ /* * We start with a lock free round which is cheap if we have * slots available. If this fails, then lock and call the sync * routines. We can't do this on Linux, as the contract says * that we must call nm_os_selrecord() unconditionally. */ if (want_tx) { const enum txrx t = NR_TX; for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if (kring->ring->cur != kring->ring->tail) { /* Some unseen TX space is available, so what * we don't need to run txsync. */ revents |= want[t]; want[t] = 0; break; } } } if (want_rx) { const enum txrx t = NR_RX; int rxsync_needed = 0; for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if (kring->ring->cur == kring->ring->tail || kring->rhead != kring->ring->head) { /* There are no unseen packets on this ring, * or there are some buffers to be returned * to the netmap port. We therefore go ahead * and run rxsync. */ rxsync_needed = 1; break; } } if (!rxsync_needed) { revents |= want_rx; want_rx = 0; } } #endif #ifdef linux /* The selrecord must be unconditional on linux. */ nm_os_selrecord(sr, si[NR_RX]); nm_os_selrecord(sr, si[NR_TX]); #endif /* linux */ /* * If we want to push packets out (priv->np_txpoll) or * want_tx is still set, we must issue txsync calls * (on all rings, to avoid that the tx rings stall). * Fortunately, normal tx mode has np_txpoll set. */ if (priv->np_txpoll || want_tx) { /* * The first round checks if anyone is ready, if not * do a selrecord and another round to handle races. * want_tx goes to 0 if any space is found, and is * used to skip rings with no pending transmissions. */ flush_tx: for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) { int found = 0; kring = na->tx_rings[i]; ring = kring->ring; /* * Don't try to txsync this TX ring if we already found some * space in some of the TX rings (want_tx == 0) and there are no * TX slots in this ring that need to be flushed to the NIC * (head == hwcur). */ if (!send_down && !want_tx && ring->head == kring->nr_hwcur) continue; if (nm_kr_tryget(kring, 1, &revents)) continue; if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); revents |= POLLERR; } else { if (kring->nm_sync(kring, sync_flags)) revents |= POLLERR; else nm_sync_finalize(kring); } /* * If we found new slots, notify potential * listeners on the same ring. * Since we just did a txsync, look at the copies * of cur,tail in the kring. */ found = kring->rcur != kring->rtail; nm_kr_put(kring); if (found) { /* notify other listeners */ revents |= want_tx; want_tx = 0; #ifndef linux kring->nm_notify(kring, 0); #endif /* linux */ } } /* if there were any packet to forward we must have handled them by now */ send_down = 0; if (want_tx && retry_tx && sr) { #ifndef linux nm_os_selrecord(sr, si[NR_TX]); #endif /* !linux */ retry_tx = 0; goto flush_tx; } } /* * If want_rx is still set scan receive rings. * Do it on all rings because otherwise we starve. */ if (want_rx) { /* two rounds here for race avoidance */ do_retry_rx: for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) { int found = 0; kring = na->rx_rings[i]; ring = kring->ring; if (unlikely(nm_kr_tryget(kring, 1, &revents))) continue; if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); revents |= POLLERR; } /* now we can use kring->rcur, rtail */ /* * transparent mode support: collect packets from * hw rxring(s) that have been released by the user */ if (nm_may_forward_up(kring)) { netmap_grab_packets(kring, &q, netmap_fwd); } /* Clear the NR_FORWARD flag anyway, it may be set by * the nm_sync() below only on for the host RX ring (see * netmap_rxsync_from_host()). */ kring->nr_kflags &= ~NR_FORWARD; if (kring->nm_sync(kring, sync_flags)) revents |= POLLERR; else nm_sync_finalize(kring); send_down |= (kring->nr_kflags & NR_FORWARD); ring_timestamp_set(ring); found = kring->rcur != kring->rtail; nm_kr_put(kring); if (found) { revents |= want_rx; retry_rx = 0; #ifndef linux kring->nm_notify(kring, 0); #endif /* linux */ } } #ifndef linux if (retry_rx && sr) { nm_os_selrecord(sr, si[NR_RX]); } #endif /* !linux */ if (send_down || retry_rx) { retry_rx = 0; if (send_down) goto flush_tx; /* and retry_rx */ else goto do_retry_rx; } } /* * Transparent mode: released bufs (i.e. between kring->nr_hwcur and * ring->head) marked with NS_FORWARD on hw rx rings are passed up * to the host stack. */ if (mbq_peek(&q)) { netmap_send_up(na->ifp, &q); } return (revents); #undef want_tx #undef want_rx } int nma_intr_enable(struct netmap_adapter *na, int onoff) { bool changed = false; enum txrx t; int i; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[i]; int on = !(kring->nr_kflags & NKR_NOINTR); if (!!onoff != !!on) { changed = true; } if (onoff) { kring->nr_kflags &= ~NKR_NOINTR; } else { kring->nr_kflags |= NKR_NOINTR; } } } if (!changed) { return 0; /* nothing to do */ } if (!na->nm_intr) { nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable", na->name); return -1; } na->nm_intr(na, onoff); return 0; } /*-------------------- driver support routines -------------------*/ /* default notify callback */ static int netmap_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->notify_na; enum txrx t = kring->tx; nm_os_selwakeup(&kring->si); /* optimization: avoid a wake up on the global * queue if nobody has registered for more * than one ring */ if (na->si_users[t] > 0) nm_os_selwakeup(&na->si[t]); return NM_IRQ_COMPLETED; } /* called by all routines that create netmap_adapters. * provide some defaults and get a reference to the * memory allocator */ int netmap_attach_common(struct netmap_adapter *na) { if (!na->rx_buf_maxsize) { /* Set a conservative default (larger is safer). */ na->rx_buf_maxsize = PAGE_SIZE; } #ifdef __FreeBSD__ if (na->na_flags & NAF_HOST_RINGS && na->ifp) { na->if_input = na->ifp->if_input; /* for netmap_send_up */ } na->pdev = na; /* make sure netmap_mem_map() is called */ #endif /* __FreeBSD__ */ if (na->na_flags & NAF_HOST_RINGS) { if (na->num_host_rx_rings == 0) na->num_host_rx_rings = 1; if (na->num_host_tx_rings == 0) na->num_host_tx_rings = 1; } if (na->nm_krings_create == NULL) { /* we assume that we have been called by a driver, * since other port types all provide their own * nm_krings_create */ na->nm_krings_create = netmap_hw_krings_create; na->nm_krings_delete = netmap_hw_krings_delete; } if (na->nm_notify == NULL) na->nm_notify = netmap_notify; na->active_fds = 0; if (na->nm_mem == NULL) { /* use the global allocator */ na->nm_mem = netmap_mem_get(&nm_mem); } #ifdef WITH_VALE if (na->nm_bdg_attach == NULL) /* no special nm_bdg_attach callback. On VALE * attach, we need to interpose a bwrap */ na->nm_bdg_attach = netmap_default_bdg_attach; #endif return 0; } /* Wrapper for the register callback provided netmap-enabled * hardware drivers. * nm_iszombie(na) means that the driver module has been * unloaded, so we cannot call into it. * nm_os_ifnet_lock() must guarantee mutual exclusion with * module unloading. */ static int netmap_hw_reg(struct netmap_adapter *na, int onoff) { struct netmap_hw_adapter *hwna = (struct netmap_hw_adapter*)na; int error = 0; nm_os_ifnet_lock(); if (nm_iszombie(na)) { if (onoff) { error = ENXIO; } else if (na != NULL) { na->na_flags &= ~NAF_NETMAP_ON; } goto out; } error = hwna->nm_hw_register(na, onoff); out: nm_os_ifnet_unlock(); return error; } static void netmap_hw_dtor(struct netmap_adapter *na) { if (na->ifp == NULL) return; NM_DETACH_NA(na->ifp); } /* * Allocate a netmap_adapter object, and initialize it from the * 'arg' passed by the driver on attach. * We allocate a block of memory of 'size' bytes, which has room * for struct netmap_adapter plus additional room private to * the caller. * Return 0 on success, ENOMEM otherwise. */ int netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg) { struct netmap_hw_adapter *hwna = NULL; struct ifnet *ifp = NULL; if (size < sizeof(struct netmap_hw_adapter)) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("Invalid netmap adapter size %d", (int)size); return EINVAL; } if (arg == NULL || arg->ifp == NULL) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("either arg or arg->ifp is NULL"); return EINVAL; } if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("%s: invalid rings tx %d rx %d", arg->name, arg->num_tx_rings, arg->num_rx_rings); return EINVAL; } ifp = arg->ifp; if (NM_NA_CLASH(ifp)) { /* If NA(ifp) is not null but there is no valid netmap * adapter it means that someone else is using the same * pointer (e.g. ax25_ptr on linux). This happens for * instance when also PF_RING is in use. */ nm_prerr("Error: netmap adapter hook is busy"); return EBUSY; } hwna = nm_os_malloc(size); if (hwna == NULL) goto fail; hwna->up = *arg; hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE; strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name)); if (override_reg) { hwna->nm_hw_register = hwna->up.nm_register; hwna->up.nm_register = netmap_hw_reg; } if (netmap_attach_common(&hwna->up)) { nm_os_free(hwna); goto fail; } netmap_adapter_get(&hwna->up); NM_ATTACH_NA(ifp, &hwna->up); nm_os_onattach(ifp); if (arg->nm_dtor == NULL) { hwna->up.nm_dtor = netmap_hw_dtor; } if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n", hwna->up.num_tx_rings, hwna->up.num_tx_desc, hwna->up.num_rx_rings, hwna->up.num_rx_desc); return 0; fail: nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna); return (hwna ? EINVAL : ENOMEM); } int netmap_attach(struct netmap_adapter *arg) { return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter), 1 /* override nm_reg */); } void NM_DBG(netmap_adapter_get)(struct netmap_adapter *na) { if (!na) { return; } refcount_acquire(&na->na_refcount); } /* returns 1 iff the netmap_adapter is destroyed */ int NM_DBG(netmap_adapter_put)(struct netmap_adapter *na) { if (!na) return 1; if (!refcount_release(&na->na_refcount)) return 0; if (na->nm_dtor) na->nm_dtor(na); if (na->tx_rings) { /* XXX should not happen */ if (netmap_debug & NM_DEBUG_ON) nm_prerr("freeing leftover tx_rings"); na->nm_krings_delete(na); } netmap_pipe_dealloc(na); if (na->nm_mem) netmap_mem_put(na->nm_mem); bzero(na, sizeof(*na)); nm_os_free(na); return 1; } /* nm_krings_create callback for all hardware native adapters */ int netmap_hw_krings_create(struct netmap_adapter *na) { int ret = netmap_krings_create(na, 0); if (ret == 0) { /* initialize the mbq for the sw rx ring */ u_int lim = netmap_real_rings(na, NR_RX), i; for (i = na->num_rx_rings; i < lim; i++) { mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue); } - ND("initialized sw rx queue %d", na->num_rx_rings); + nm_prdis("initialized sw rx queue %d", na->num_rx_rings); } return ret; } /* * Called on module unload by the netmap-enabled drivers */ void netmap_detach(struct ifnet *ifp) { struct netmap_adapter *na = NA(ifp); if (!na) return; NMG_LOCK(); netmap_set_all_rings(na, NM_KR_LOCKED); /* * if the netmap adapter is not native, somebody * changed it, so we can not release it here. * The NAF_ZOMBIE flag will notify the new owner that * the driver is gone. */ if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) { na->na_flags |= NAF_ZOMBIE; } /* give active users a chance to notice that NAF_ZOMBIE has been * turned on, so that they can stop and return an error to userspace. * Note that this becomes a NOP if there are no active users and, * therefore, the put() above has deleted the na, since now NA(ifp) is * NULL. */ netmap_enable_all_rings(ifp); NMG_UNLOCK(); } /* * Intercept packets from the network stack and pass them * to netmap as incoming packets on the 'software' ring. * * We only store packets in a bounded mbq and then copy them * in the relevant rxsync routine. * * We rely on the OS to make sure that the ifp and na do not go * away (typically the caller checks for IFF_DRV_RUNNING or the like). * In nm_register() or whenever there is a reinitialization, * we make sure to make the mode change visible here. */ int netmap_transmit(struct ifnet *ifp, struct mbuf *m) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring, *tx_kring; u_int len = MBUF_LEN(m); u_int error = ENOBUFS; unsigned int txr; struct mbq *q; int busy; u_int i; i = MBUF_TXQ(m); if (i >= na->num_host_rx_rings) { i = i % na->num_host_rx_rings; } kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i]; // XXX [Linux] we do not need this lock // if we follow the down/configure/up protocol -gl // mtx_lock(&na->core_lock); if (!nm_netmap_on(na)) { nm_prerr("%s not in netmap mode anymore", na->name); error = ENXIO; goto done; } txr = MBUF_TXQ(m); if (txr >= na->num_tx_rings) { txr %= na->num_tx_rings; } tx_kring = NMR(na, NR_TX)[txr]; if (tx_kring->nr_mode == NKR_NETMAP_OFF) { return MBUF_TRANSMIT(na, ifp, m); } q = &kring->rx_queue; // XXX reconsider long packets if we handle fragments if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */ nm_prerr("%s from_host, drop packet size %d > %d", na->name, len, NETMAP_BUF_SIZE(na)); goto done; } if (!netmap_generic_hwcsum) { if (nm_os_mbuf_has_csum_offld(m)) { - RD(1, "%s drop mbuf that needs checksum offload", na->name); + nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name); goto done; } } if (nm_os_mbuf_has_seg_offld(m)) { - RD(1, "%s drop mbuf that needs generic segmentation offload", na->name); + nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name); goto done; } #ifdef __FreeBSD__ ETHER_BPF_MTAP(ifp, m); #endif /* __FreeBSD__ */ /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic() * and maybe other instances of netmap_transmit (the latter * not possible on Linux). * We enqueue the mbuf only if we are sure there is going to be * enough room in the host RX ring, otherwise we drop it. */ mbq_lock(q); busy = kring->nr_hwtail - kring->nr_hwcur; if (busy < 0) busy += kring->nkr_num_slots; if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) { - RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name, + nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name, kring->nr_hwcur, kring->nr_hwtail, mbq_len(q)); } else { mbq_enqueue(q, m); - ND(2, "%s %d bufs in queue", na->name, mbq_len(q)); + nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q)); /* notify outside the lock */ m = NULL; error = 0; } mbq_unlock(q); done: if (m) m_freem(m); /* unconditionally wake up listeners */ kring->nm_notify(kring, 0); /* this is normally netmap_notify(), but for nics * connected to a bridge it is netmap_bwrap_intr_notify(), * that possibly forwards the frames through the switch */ return (error); } /* * netmap_reset() is called by the driver routines when reinitializing * a ring. The driver is in charge of locking to protect the kring. * If native netmap mode is not set just return NULL. * If native netmap mode is set, in particular, we have to set nr_mode to * NKR_NETMAP_ON. */ struct netmap_slot * netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur) { struct netmap_kring *kring; int new_hwofs, lim; if (!nm_native_on(na)) { - ND("interface not in native netmap mode"); + nm_prdis("interface not in native netmap mode"); return NULL; /* nothing to reinitialize */ } /* XXX note- in the new scheme, we are not guaranteed to be * under lock (e.g. when called on a device reset). * In this case, we should set a flag and do not trust too * much the values. In practice: TODO * - set a RESET flag somewhere in the kring * - do the processing in a conservative way * - let the *sync() fixup at the end. */ if (tx == NR_TX) { if (n >= na->num_tx_rings) return NULL; kring = na->tx_rings[n]; if (kring->nr_pending_mode == NKR_NETMAP_OFF) { kring->nr_mode = NKR_NETMAP_OFF; return NULL; } // XXX check whether we should use hwcur or rcur new_hwofs = kring->nr_hwcur - new_cur; } else { if (n >= na->num_rx_rings) return NULL; kring = na->rx_rings[n]; if (kring->nr_pending_mode == NKR_NETMAP_OFF) { kring->nr_mode = NKR_NETMAP_OFF; return NULL; } new_hwofs = kring->nr_hwtail - new_cur; } lim = kring->nkr_num_slots - 1; if (new_hwofs > lim) new_hwofs -= lim + 1; /* Always set the new offset value and realign the ring. */ if (netmap_debug & NM_DEBUG_ON) nm_prinf("%s %s%d hwofs %d -> %d, hwtail %d -> %d", na->name, tx == NR_TX ? "TX" : "RX", n, kring->nkr_hwofs, new_hwofs, kring->nr_hwtail, tx == NR_TX ? lim : kring->nr_hwtail); kring->nkr_hwofs = new_hwofs; if (tx == NR_TX) { kring->nr_hwtail = kring->nr_hwcur + lim; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; } /* * Wakeup on the individual and global selwait * We do the wakeup here, but the ring is not yet reconfigured. * However, we are under lock so there are no races. */ kring->nr_mode = NKR_NETMAP_ON; kring->nm_notify(kring, 0); return kring->ring->slot; } /* * Dispatch rx/tx interrupts to the netmap rings. * * "work_done" is non-null on the RX path, NULL for the TX path. * We rely on the OS to make sure that there is only one active * instance per queue, and that there is appropriate locking. * * The 'notify' routine depends on what the ring is attached to. * - for a netmap file descriptor, do a selwakeup on the individual * waitqueue, plus one on the global one if needed * (see netmap_notify) * - for a nic connected to a switch, call the proper forwarding routine * (see netmap_bwrap_intr_notify) */ int netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done) { struct netmap_kring *kring; enum txrx t = (work_done ? NR_RX : NR_TX); q &= NETMAP_RING_MASK; if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) { nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q); } if (q >= nma_get_nrings(na, t)) return NM_IRQ_PASS; // not a physical queue kring = NMR(na, t)[q]; if (kring->nr_mode == NKR_NETMAP_OFF) { return NM_IRQ_PASS; } if (t == NR_RX) { kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ? *work_done = 1; /* do not fire napi again */ } return kring->nm_notify(kring, 0); } /* * Default functions to handle rx/tx interrupts from a physical device. * "work_done" is non-null on the RX path, NULL for the TX path. * * If the card is not in netmap mode, simply return NM_IRQ_PASS, * so that the caller proceeds with regular processing. * Otherwise call netmap_common_irq(). * * If the card is connected to a netmap file descriptor, * do a selwakeup on the individual queue, plus one on the global one * if needed (multiqueue card _and_ there are multiqueue listeners), * and return NR_IRQ_COMPLETED. * * Finally, if called on rx from an interface connected to a switch, * calls the proper forwarding routine. */ int netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done) { struct netmap_adapter *na = NA(ifp); /* * XXX emulated netmap mode sets NAF_SKIP_INTR so * we still use the regular driver even though the previous * check fails. It is unclear whether we should use * nm_native_on() here. */ if (!nm_netmap_on(na)) return NM_IRQ_PASS; if (na->na_flags & NAF_SKIP_INTR) { - ND("use regular interrupt"); + nm_prdis("use regular interrupt"); return NM_IRQ_PASS; } return netmap_common_irq(na, q, work_done); } /* set/clear native flags and if_transmit/netdev_ops */ void nm_set_native_flags(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; /* We do the setup for intercepting packets only if we are the * first user of this adapapter. */ if (na->active_fds > 0) { return; } na->na_flags |= NAF_NETMAP_ON; nm_os_onenter(ifp); nm_update_hostrings_mode(na); } void nm_clear_native_flags(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; /* We undo the setup for intercepting packets only if we are the * last user of this adapter. */ if (na->active_fds > 0) { return; } nm_update_hostrings_mode(na); nm_os_onexit(ifp); na->na_flags &= ~NAF_NETMAP_ON; +} + +void +netmap_krings_mode_commit(struct netmap_adapter *na, int onoff) +{ + enum txrx t; + + for_rx_tx(t) { + int i; + + for (i = 0; i < netmap_real_rings(na, t); i++) { + struct netmap_kring *kring = NMR(na, t)[i]; + + if (onoff && nm_kring_pending_on(kring)) + kring->nr_mode = NKR_NETMAP_ON; + else if (!onoff && nm_kring_pending_off(kring)) + kring->nr_mode = NKR_NETMAP_OFF; + } + } } /* * Module loader and unloader * * netmap_init() creates the /dev/netmap device and initializes * all global variables. Returns 0 on success, errno on failure * (but there is no chance) * * netmap_fini() destroys everything. */ static struct cdev *netmap_dev; /* /dev/netmap character device. */ extern struct cdevsw netmap_cdevsw; void netmap_fini(void) { if (netmap_dev) destroy_dev(netmap_dev); /* we assume that there are no longer netmap users */ nm_os_ifnet_fini(); netmap_uninit_bridges(); netmap_mem_fini(); NMG_LOCK_DESTROY(); nm_prinf("netmap: unloaded module."); } int netmap_init(void) { int error; NMG_LOCK_INIT(); error = netmap_mem_init(); if (error != 0) goto fail; /* * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls * when the module is compiled in. * XXX could use make_dev_credv() to get error number */ netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "netmap"); if (!netmap_dev) goto fail; error = netmap_init_bridges(); if (error) goto fail; #ifdef __FreeBSD__ nm_os_vi_init_index(); #endif error = nm_os_ifnet_init(); if (error) goto fail; nm_prinf("netmap: loaded module"); return (0); fail: netmap_fini(); return (EINVAL); /* may be incorrect */ } Index: stable/12/sys/dev/netmap/netmap_bdg.c =================================================================== --- stable/12/sys/dev/netmap/netmap_bdg.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_bdg.c (revision 344046) @@ -1,1665 +1,1649 @@ /* * Copyright (C) 2013-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module implements the VALE switch for netmap --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #include const char* netmap_bdg_name(struct netmap_vp_adapter *vp) { struct nm_bridge *b = vp->na_bdg; if (b == NULL) return NULL; return b->bdg_basename; } #ifndef CONFIG_NET_NS /* * XXX in principle nm_bridges could be created dynamically * Right now we have a static array and deletions are protected * by an exclusive lock. */ struct nm_bridge *nm_bridges; #endif /* !CONFIG_NET_NS */ static int nm_is_id_char(const char c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_'); } /* Validate the name of a bdg port and return the * position of the ":" character. */ static int nm_bdg_name_validate(const char *name, size_t prefixlen) { int colon_pos = -1; int i; if (!name || strlen(name) < prefixlen) { return -1; } for (i = 0; i < NM_BDG_IFNAMSIZ && name[i]; i++) { if (name[i] == ':') { colon_pos = i; break; } else if (!nm_is_id_char(name[i])) { return -1; } } if (strlen(name) - colon_pos > IFNAMSIZ) { /* interface name too long */ return -1; } return colon_pos; } /* * locate a bridge among the existing ones. * MUST BE CALLED WITH NMG_LOCK() * * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME. * We assume that this is called with a name of at least NM_NAME chars. */ struct nm_bridge * nm_find_bridge(const char *name, int create, struct netmap_bdg_ops *ops) { int i, namelen; struct nm_bridge *b = NULL, *bridges; u_int num_bridges; NMG_LOCK_ASSERT(); netmap_bns_getbridges(&bridges, &num_bridges); namelen = nm_bdg_name_validate(name, (ops != NULL ? strlen(ops->name) : 0)); if (namelen < 0) { nm_prerr("invalid bridge name %s", name ? name : NULL); return NULL; } /* lookup the name, remember empty slot if there is one */ for (i = 0; i < num_bridges; i++) { struct nm_bridge *x = bridges + i; if ((x->bdg_flags & NM_BDG_ACTIVE) + x->bdg_active_ports == 0) { if (create && b == NULL) b = x; /* record empty slot */ } else if (x->bdg_namelen != namelen) { continue; } else if (strncmp(name, x->bdg_basename, namelen) == 0) { - ND("found '%.*s' at %d", namelen, name, i); + nm_prdis("found '%.*s' at %d", namelen, name, i); b = x; break; } } if (i == num_bridges && b) { /* name not found, can create entry */ /* initialize the bridge */ - ND("create new bridge %s with ports %d", b->bdg_basename, + nm_prdis("create new bridge %s with ports %d", b->bdg_basename, b->bdg_active_ports); b->ht = nm_os_malloc(sizeof(struct nm_hash_ent) * NM_BDG_HASH); if (b->ht == NULL) { nm_prerr("failed to allocate hash table"); return NULL; } strncpy(b->bdg_basename, name, namelen); b->bdg_namelen = namelen; b->bdg_active_ports = 0; for (i = 0; i < NM_BDG_MAXPORTS; i++) b->bdg_port_index[i] = i; /* set the default function */ b->bdg_ops = b->bdg_saved_ops = *ops; b->private_data = b->ht; b->bdg_flags = 0; NM_BNS_GET(b); } return b; } int netmap_bdg_free(struct nm_bridge *b) { if ((b->bdg_flags & NM_BDG_ACTIVE) + b->bdg_active_ports != 0) { return EBUSY; } - ND("marking bridge %s as free", b->bdg_basename); + nm_prdis("marking bridge %s as free", b->bdg_basename); nm_os_free(b->ht); memset(&b->bdg_ops, 0, sizeof(b->bdg_ops)); memset(&b->bdg_saved_ops, 0, sizeof(b->bdg_saved_ops)); b->bdg_flags = 0; NM_BNS_PUT(b); return 0; } /* Called by external kernel modules (e.g., Openvswitch). * to modify the private data previously given to regops(). * 'name' may be just bridge's name (including ':' if it * is not just NM_BDG_NAME). * Called without NMG_LOCK. */ int netmap_bdg_update_private_data(const char *name, bdg_update_private_data_fn_t callback, void *callback_data, void *auth_token) { void *private_data = NULL; struct nm_bridge *b; int error = 0; NMG_LOCK(); b = nm_find_bridge(name, 0 /* don't create */, NULL); if (!b) { error = EINVAL; goto unlock_update_priv; } if (!nm_bdg_valid_auth_token(b, auth_token)) { error = EACCES; goto unlock_update_priv; } BDG_WLOCK(b); private_data = callback(b->private_data, callback_data, &error); b->private_data = private_data; BDG_WUNLOCK(b); unlock_update_priv: NMG_UNLOCK(); return error; } /* remove from bridge b the ports in slots hw and sw * (sw can be -1 if not needed) */ void netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw) { int s_hw = hw, s_sw = sw; int i, lim =b->bdg_active_ports; uint32_t *tmp = b->tmp_bdg_port_index; /* New algorithm: make a copy of bdg_port_index; lookup NA(ifp)->bdg_port and SWNA(ifp)->bdg_port in the array of bdg_port_index, replacing them with entries from the bottom of the array; decrement bdg_active_ports; acquire BDG_WLOCK() and copy back the array. */ if (netmap_debug & NM_DEBUG_BDG) nm_prinf("detach %d and %d (lim %d)", hw, sw, lim); /* make a copy of the list of active ports, update it, * and then copy back within BDG_WLOCK(). */ memcpy(b->tmp_bdg_port_index, b->bdg_port_index, sizeof(b->tmp_bdg_port_index)); for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) { if (hw >= 0 && tmp[i] == hw) { - ND("detach hw %d at %d", hw, i); + nm_prdis("detach hw %d at %d", hw, i); lim--; /* point to last active port */ tmp[i] = tmp[lim]; /* swap with i */ tmp[lim] = hw; /* now this is inactive */ hw = -1; } else if (sw >= 0 && tmp[i] == sw) { - ND("detach sw %d at %d", sw, i); + nm_prdis("detach sw %d at %d", sw, i); lim--; tmp[i] = tmp[lim]; tmp[lim] = sw; sw = -1; } else { i++; } } if (hw >= 0 || sw >= 0) { nm_prerr("delete failed hw %d sw %d, should panic...", hw, sw); } BDG_WLOCK(b); if (b->bdg_ops.dtor) b->bdg_ops.dtor(b->bdg_ports[s_hw]); b->bdg_ports[s_hw] = NULL; if (s_sw >= 0) { b->bdg_ports[s_sw] = NULL; } memcpy(b->bdg_port_index, b->tmp_bdg_port_index, sizeof(b->tmp_bdg_port_index)); b->bdg_active_ports = lim; BDG_WUNLOCK(b); - ND("now %d active ports", lim); + nm_prdis("now %d active ports", lim); netmap_bdg_free(b); } /* nm_bdg_ctl callback for VALE ports */ int netmap_vp_bdg_ctl(struct nmreq_header *hdr, struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; struct nm_bridge *b = vpna->na_bdg; if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) { return 0; /* nothing to do */ } if (b) { netmap_set_all_rings(na, 0 /* disable */); netmap_bdg_detach_common(b, vpna->bdg_port, -1); vpna->na_bdg = NULL; netmap_set_all_rings(na, 1 /* enable */); } /* I have took reference just for attach */ netmap_adapter_put(na); return 0; } int netmap_default_bdg_attach(const char *name, struct netmap_adapter *na, struct nm_bridge *b) { return NM_NEED_BWRAP; } /* Try to get a reference to a netmap adapter attached to a VALE switch. * If the adapter is found (or is created), this function returns 0, a * non NULL pointer is returned into *na, and the caller holds a * reference to the adapter. * If an adapter is not found, then no reference is grabbed and the * function returns an error code, or 0 if there is just a VALE prefix * mismatch. Therefore the caller holds a reference when * (*na != NULL && return == 0). */ int netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create, struct netmap_bdg_ops *ops) { char *nr_name = hdr->nr_name; const char *ifname; struct ifnet *ifp = NULL; int error = 0; struct netmap_vp_adapter *vpna, *hostna = NULL; struct nm_bridge *b; uint32_t i, j; uint32_t cand = NM_BDG_NOPORT, cand2 = NM_BDG_NOPORT; int needed; *na = NULL; /* default return value */ /* first try to see if this is a bridge port. */ NMG_LOCK_ASSERT(); if (strncmp(nr_name, ops->name, strlen(ops->name) - 1)) { return 0; /* no error, but no VALE prefix */ } b = nm_find_bridge(nr_name, create, ops); if (b == NULL) { - ND("no bridges available for '%s'", nr_name); + nm_prdis("no bridges available for '%s'", nr_name); return (create ? ENOMEM : ENXIO); } if (strlen(nr_name) < b->bdg_namelen) /* impossible */ panic("x"); /* Now we are sure that name starts with the bridge's name, * lookup the port in the bridge. We need to scan the entire * list. It is not important to hold a WLOCK on the bridge * during the search because NMG_LOCK already guarantees * that there are no other possible writers. */ /* lookup in the local list of ports */ for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; vpna = b->bdg_ports[i]; - ND("checking %s", vpna->up.name); + nm_prdis("checking %s", vpna->up.name); if (!strcmp(vpna->up.name, nr_name)) { netmap_adapter_get(&vpna->up); - ND("found existing if %s refs %d", nr_name) + nm_prdis("found existing if %s refs %d", nr_name) *na = &vpna->up; return 0; } } /* not found, should we create it? */ if (!create) return ENXIO; /* yes we should, see if we have space to attach entries */ needed = 2; /* in some cases we only need 1 */ if (b->bdg_active_ports + needed >= NM_BDG_MAXPORTS) { nm_prerr("bridge full %d, cannot create new port", b->bdg_active_ports); return ENOMEM; } /* record the next two ports available, but do not allocate yet */ cand = b->bdg_port_index[b->bdg_active_ports]; cand2 = b->bdg_port_index[b->bdg_active_ports + 1]; - ND("+++ bridge %s port %s used %d avail %d %d", + nm_prdis("+++ bridge %s port %s used %d avail %d %d", b->bdg_basename, ifname, b->bdg_active_ports, cand, cand2); /* * try see if there is a matching NIC with this name * (after the bridge's name) */ ifname = nr_name + b->bdg_namelen + 1; ifp = ifunit_ref(ifname); if (!ifp) { /* Create an ephemeral virtual port. * This block contains all the ephemeral-specific logic. */ if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { error = EINVAL; goto out; } /* bdg_netmap_attach creates a struct netmap_adapter */ error = b->bdg_ops.vp_create(hdr, NULL, nmd, &vpna); if (error) { if (netmap_debug & NM_DEBUG_BDG) nm_prerr("error %d", error); goto out; } /* shortcut - we can skip get_hw_na(), * ownership check and nm_bdg_attach() */ } else { struct netmap_adapter *hw; /* the vale:nic syntax is only valid for some commands */ switch (hdr->nr_reqtype) { case NETMAP_REQ_VALE_ATTACH: case NETMAP_REQ_VALE_DETACH: case NETMAP_REQ_VALE_POLLING_ENABLE: case NETMAP_REQ_VALE_POLLING_DISABLE: break; /* ok */ default: error = EINVAL; goto out; } error = netmap_get_hw_na(ifp, nmd, &hw); if (error || hw == NULL) goto out; /* host adapter might not be created */ error = hw->nm_bdg_attach(nr_name, hw, b); if (error == NM_NEED_BWRAP) { error = b->bdg_ops.bwrap_attach(nr_name, hw); } if (error) goto out; vpna = hw->na_vp; hostna = hw->na_hostvp; if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) { /* Check if we need to skip the host rings. */ struct nmreq_vale_attach *areq = (struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body; if (areq->reg.nr_mode != NR_REG_NIC_SW) { hostna = NULL; } } } BDG_WLOCK(b); vpna->bdg_port = cand; - ND("NIC %p to bridge port %d", vpna, cand); + nm_prdis("NIC %p to bridge port %d", vpna, cand); /* bind the port to the bridge (virtual ports are not active) */ b->bdg_ports[cand] = vpna; vpna->na_bdg = b; b->bdg_active_ports++; if (hostna != NULL) { /* also bind the host stack to the bridge */ b->bdg_ports[cand2] = hostna; hostna->bdg_port = cand2; hostna->na_bdg = b; b->bdg_active_ports++; - ND("host %p to bridge port %d", hostna, cand2); + nm_prdis("host %p to bridge port %d", hostna, cand2); } - ND("if %s refs %d", ifname, vpna->up.na_refcount); + nm_prdis("if %s refs %d", ifname, vpna->up.na_refcount); BDG_WUNLOCK(b); *na = &vpna->up; netmap_adapter_get(*na); out: if (ifp) if_rele(ifp); return error; } int nm_is_bwrap(struct netmap_adapter *na) { return na->nm_register == netmap_bwrap_reg; } struct nm_bdg_polling_state; struct nm_bdg_kthread { struct nm_kctx *nmk; u_int qfirst; u_int qlast; struct nm_bdg_polling_state *bps; }; struct nm_bdg_polling_state { bool configured; bool stopped; struct netmap_bwrap_adapter *bna; uint32_t mode; u_int qfirst; u_int qlast; u_int cpu_from; u_int ncpus; struct nm_bdg_kthread *kthreads; }; static void netmap_bwrap_polling(void *data) { struct nm_bdg_kthread *nbk = data; struct netmap_bwrap_adapter *bna; u_int qfirst, qlast, i; struct netmap_kring **kring0, *kring; if (!nbk) return; qfirst = nbk->qfirst; qlast = nbk->qlast; bna = nbk->bps->bna; kring0 = NMR(bna->hwna, NR_RX); for (i = qfirst; i < qlast; i++) { kring = kring0[i]; kring->nm_notify(kring, 0); } } static int nm_bdg_create_kthreads(struct nm_bdg_polling_state *bps) { struct nm_kctx_cfg kcfg; int i, j; bps->kthreads = nm_os_malloc(sizeof(struct nm_bdg_kthread) * bps->ncpus); if (bps->kthreads == NULL) return ENOMEM; bzero(&kcfg, sizeof(kcfg)); kcfg.worker_fn = netmap_bwrap_polling; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; int all = (bps->ncpus == 1 && bps->mode == NETMAP_POLLING_MODE_SINGLE_CPU); int affinity = bps->cpu_from + i; t->bps = bps; t->qfirst = all ? bps->qfirst /* must be 0 */: affinity; t->qlast = all ? bps->qlast : t->qfirst + 1; if (netmap_verbose) nm_prinf("kthread %d a:%u qf:%u ql:%u", i, affinity, t->qfirst, t->qlast); kcfg.type = i; kcfg.worker_private = t; t->nmk = nm_os_kctx_create(&kcfg, NULL); if (t->nmk == NULL) { goto cleanup; } nm_os_kctx_worker_setaff(t->nmk, affinity); } return 0; cleanup: for (j = 0; j < i; j++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kctx_destroy(t->nmk); } nm_os_free(bps->kthreads); return EFAULT; } /* A variant of ptnetmap_start_kthreads() */ static int nm_bdg_polling_start_kthreads(struct nm_bdg_polling_state *bps) { int error, i, j; if (!bps) { nm_prerr("polling is not configured"); return EFAULT; } bps->stopped = false; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; error = nm_os_kctx_worker_start(t->nmk); if (error) { nm_prerr("error in nm_kthread_start(): %d", error); goto cleanup; } } return 0; cleanup: for (j = 0; j < i; j++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kctx_worker_stop(t->nmk); } bps->stopped = true; return error; } static void nm_bdg_polling_stop_delete_kthreads(struct nm_bdg_polling_state *bps) { int i; if (!bps) return; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kctx_worker_stop(t->nmk); nm_os_kctx_destroy(t->nmk); } bps->stopped = true; } static int get_polling_cfg(struct nmreq_vale_polling *req, struct netmap_adapter *na, struct nm_bdg_polling_state *bps) { unsigned int avail_cpus, core_from; unsigned int qfirst, qlast; uint32_t i = req->nr_first_cpu_id; uint32_t req_cpus = req->nr_num_polling_cpus; avail_cpus = nm_os_ncpus(); if (req_cpus == 0) { nm_prerr("req_cpus must be > 0"); return EINVAL; } else if (req_cpus >= avail_cpus) { nm_prerr("Cannot use all the CPUs in the system"); return EINVAL; } if (req->nr_mode == NETMAP_POLLING_MODE_MULTI_CPU) { /* Use a separate core for each ring. If nr_num_polling_cpus>1 * more consecutive rings are polled. * For example, if nr_first_cpu_id=2 and nr_num_polling_cpus=2, * ring 2 and 3 are polled by core 2 and 3, respectively. */ if (i + req_cpus > nma_get_nrings(na, NR_RX)) { nm_prerr("Rings %u-%u not in range (have %d rings)", i, i + req_cpus, nma_get_nrings(na, NR_RX)); return EINVAL; } qfirst = i; qlast = qfirst + req_cpus; core_from = qfirst; } else if (req->nr_mode == NETMAP_POLLING_MODE_SINGLE_CPU) { /* Poll all the rings using a core specified by nr_first_cpu_id. * the number of cores must be 1. */ if (req_cpus != 1) { nm_prerr("ncpus must be 1 for NETMAP_POLLING_MODE_SINGLE_CPU " "(was %d)", req_cpus); return EINVAL; } qfirst = 0; qlast = nma_get_nrings(na, NR_RX); core_from = i; } else { nm_prerr("Invalid polling mode"); return EINVAL; } bps->mode = req->nr_mode; bps->qfirst = qfirst; bps->qlast = qlast; bps->cpu_from = core_from; bps->ncpus = req_cpus; nm_prinf("%s qfirst %u qlast %u cpu_from %u ncpus %u", req->nr_mode == NETMAP_POLLING_MODE_MULTI_CPU ? "MULTI" : "SINGLE", qfirst, qlast, core_from, req_cpus); return 0; } static int nm_bdg_ctl_polling_start(struct nmreq_vale_polling *req, struct netmap_adapter *na) { struct nm_bdg_polling_state *bps; struct netmap_bwrap_adapter *bna; int error; bna = (struct netmap_bwrap_adapter *)na; if (bna->na_polling_state) { nm_prerr("ERROR adapter already in polling mode"); return EFAULT; } bps = nm_os_malloc(sizeof(*bps)); if (!bps) return ENOMEM; bps->configured = false; bps->stopped = true; if (get_polling_cfg(req, na, bps)) { nm_os_free(bps); return EINVAL; } if (nm_bdg_create_kthreads(bps)) { nm_os_free(bps); return EFAULT; } bps->configured = true; bna->na_polling_state = bps; bps->bna = bna; /* disable interrupts if possible */ nma_intr_enable(bna->hwna, 0); /* start kthread now */ error = nm_bdg_polling_start_kthreads(bps); if (error) { nm_prerr("ERROR nm_bdg_polling_start_kthread()"); nm_os_free(bps->kthreads); nm_os_free(bps); bna->na_polling_state = NULL; nma_intr_enable(bna->hwna, 1); } return error; } static int nm_bdg_ctl_polling_stop(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct nm_bdg_polling_state *bps; if (!bna->na_polling_state) { nm_prerr("ERROR adapter is not in polling mode"); return EFAULT; } bps = bna->na_polling_state; nm_bdg_polling_stop_delete_kthreads(bna->na_polling_state); bps->configured = false; nm_os_free(bps); bna->na_polling_state = NULL; /* reenable interrupts */ nma_intr_enable(bna->hwna, 1); return 0; } int nm_bdg_polling(struct nmreq_header *hdr) { struct nmreq_vale_polling *req = (struct nmreq_vale_polling *)(uintptr_t)hdr->nr_body; struct netmap_adapter *na = NULL; int error = 0; NMG_LOCK(); error = netmap_get_vale_na(hdr, &na, NULL, /*create=*/0); if (na && !error) { if (!nm_is_bwrap(na)) { error = EOPNOTSUPP; } else if (hdr->nr_reqtype == NETMAP_BDG_POLLING_ON) { error = nm_bdg_ctl_polling_start(req, na); if (!error) netmap_adapter_get(na); } else { error = nm_bdg_ctl_polling_stop(na); if (!error) netmap_adapter_put(na); } netmap_adapter_put(na); } else if (!na && !error) { /* Not VALE port. */ error = EINVAL; } NMG_UNLOCK(); return error; } /* Called by external kernel modules (e.g., Openvswitch). * to set configure/lookup/dtor functions of a VALE instance. * Register callbacks to the given bridge. 'name' may be just * bridge's name (including ':' if it is not just NM_BDG_NAME). * * Called without NMG_LOCK. */ int netmap_bdg_regops(const char *name, struct netmap_bdg_ops *bdg_ops, void *private_data, void *auth_token) { struct nm_bridge *b; int error = 0; NMG_LOCK(); b = nm_find_bridge(name, 0 /* don't create */, NULL); if (!b) { error = ENXIO; goto unlock_regops; } if (!nm_bdg_valid_auth_token(b, auth_token)) { error = EACCES; goto unlock_regops; } BDG_WLOCK(b); if (!bdg_ops) { /* resetting the bridge */ bzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH); b->bdg_ops = b->bdg_saved_ops; b->private_data = b->ht; } else { /* modifying the bridge */ b->private_data = private_data; #define nm_bdg_override(m) if (bdg_ops->m) b->bdg_ops.m = bdg_ops->m nm_bdg_override(lookup); nm_bdg_override(config); nm_bdg_override(dtor); nm_bdg_override(vp_create); nm_bdg_override(bwrap_attach); #undef nm_bdg_override } BDG_WUNLOCK(b); unlock_regops: NMG_UNLOCK(); return error; } int netmap_bdg_config(struct nm_ifreq *nr) { struct nm_bridge *b; int error = EINVAL; NMG_LOCK(); b = nm_find_bridge(nr->nifr_name, 0, NULL); if (!b) { NMG_UNLOCK(); return error; } NMG_UNLOCK(); /* Don't call config() with NMG_LOCK() held */ BDG_RLOCK(b); if (b->bdg_ops.config != NULL) error = b->bdg_ops.config(nr); BDG_RUNLOCK(b); return error; } /* nm_register callback for VALE ports */ int netmap_vp_reg(struct netmap_adapter *na, int onoff) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na; - enum txrx t; - int i; /* persistent ports may be put in netmap mode * before being attached to a bridge */ if (vpna->na_bdg) BDG_WLOCK(vpna->na_bdg); if (onoff) { - for_rx_tx(t) { - for (i = 0; i < netmap_real_rings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_on(kring)) - kring->nr_mode = NKR_NETMAP_ON; - } - } + netmap_krings_mode_commit(na, onoff); if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; /* XXX on FreeBSD, persistent VALE ports should also * toggle IFCAP_NETMAP in na->ifp (2014-03-16) */ } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; - for_rx_tx(t) { - for (i = 0; i < netmap_real_rings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_off(kring)) - kring->nr_mode = NKR_NETMAP_OFF; - } - } + netmap_krings_mode_commit(na, onoff); } if (vpna->na_bdg) BDG_WUNLOCK(vpna->na_bdg); return 0; } /* rxsync code used by VALE ports nm_rxsync callback and also * internally by the brwap */ static int netmap_vp_rxsync_locked(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; u_int nm_i, lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; int n; if (head > lim) { nm_prerr("ouch dangerous reset!!!"); n = netmap_ring_reinit(kring); goto done; } /* First part, import newly received packets. */ /* actually nothing to do here, they are already in the kring */ /* Second part, skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* consistency check, but nothing really important here */ for (n = 0; likely(nm_i != head); n++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = NMB(na, slot); if (addr == NETMAP_BUF_BASE(kring->na)) { /* bad buf */ nm_prerr("bad buffer index %d, ignore ?", slot->buf_idx); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); } kring->nr_hwcur = head; } n = 0; done: return n; } /* * nm_rxsync callback for VALE ports * user process reading from a VALE switch. * Already protected against concurrent calls from userspace, * but we must acquire the queue's lock to protect against * writers on the same queue. */ int netmap_vp_rxsync(struct netmap_kring *kring, int flags) { int n; mtx_lock(&kring->q_lock); n = netmap_vp_rxsync_locked(kring, flags); mtx_unlock(&kring->q_lock); return n; } int netmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna, struct netmap_bdg_ops *ops) { return ops->bwrap_attach(nr_name, hwna); } /* Bridge wrapper code (bwrap). * This is used to connect a non-VALE-port netmap_adapter (hwna) to a * VALE switch. * The main task is to swap the meaning of tx and rx rings to match the * expectations of the VALE switch code (see nm_bdg_flush). * * The bwrap works by interposing a netmap_bwrap_adapter between the * rest of the system and the hwna. The netmap_bwrap_adapter looks like * a netmap_vp_adapter to the rest the system, but, internally, it * translates all callbacks to what the hwna expects. * * Note that we have to intercept callbacks coming from two sides: * * - callbacks coming from the netmap module are intercepted by * passing around the netmap_bwrap_adapter instead of the hwna * * - callbacks coming from outside of the netmap module only know * about the hwna. This, however, only happens in interrupt * handlers, where only the hwna->nm_notify callback is called. * What the bwrap does is to overwrite the hwna->nm_notify callback * with its own netmap_bwrap_intr_notify. * XXX This assumes that the hwna->nm_notify callback was the * standard netmap_notify(), as it is the case for nic adapters. * Any additional action performed by hwna->nm_notify will not be * performed by netmap_bwrap_intr_notify. * * Additionally, the bwrap can optionally attach the host rings pair * of the wrapped adapter to a different port of the switch. */ static void netmap_bwrap_dtor(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na; struct netmap_adapter *hwna = bna->hwna; struct nm_bridge *b = bna->up.na_bdg, *bh = bna->host.na_bdg; if (bna->host.up.nm_mem) netmap_mem_put(bna->host.up.nm_mem); if (b) { netmap_bdg_detach_common(b, bna->up.bdg_port, (bh ? bna->host.bdg_port : -1)); } - ND("na %p", na); + nm_prdis("na %p", na); na->ifp = NULL; bna->host.up.ifp = NULL; hwna->na_vp = bna->saved_na_vp; hwna->na_hostvp = NULL; hwna->na_private = NULL; hwna->na_flags &= ~NAF_BUSY; netmap_adapter_put(hwna); } /* * Intr callback for NICs connected to a bridge. * Simply ignore tx interrupts (maybe we could try to recover space ?) * and pass received packets from nic to the bridge. * * XXX TODO check locking: this is called from the interrupt * handler so we should make sure that the interface is not * disconnected while passing down an interrupt. * * Note, no user process can access this NIC or the host stack. * The only part of the ring that is significant are the slots, * and head/cur/tail are set from the kring as needed * (part as a receive ring, part as a transmit ring). * * callback that overwrites the hwna notify callback. * Packets come from the outside or from the host stack and are put on an * hwna rx ring. * The bridge wrapper then sends the packets through the bridge. */ static int netmap_bwrap_intr_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_bwrap_adapter *bna = na->na_private; struct netmap_kring *bkring; struct netmap_vp_adapter *vpna = &bna->up; u_int ring_nr = kring->ring_id; int ret = NM_IRQ_COMPLETED; int error; if (netmap_debug & NM_DEBUG_RXINTR) nm_prinf("%s %s 0x%x", na->name, kring->name, flags); bkring = vpna->up.tx_rings[ring_nr]; /* make sure the ring is not disabled */ if (nm_kr_tryget(kring, 0 /* can't sleep */, NULL)) { return EIO; } if (netmap_debug & NM_DEBUG_RXINTR) nm_prinf("%s head %d cur %d tail %d", na->name, kring->rhead, kring->rcur, kring->rtail); /* simulate a user wakeup on the rx ring * fetch packets that have arrived. */ error = kring->nm_sync(kring, 0); if (error) goto put_out; if (kring->nr_hwcur == kring->nr_hwtail) { if (netmap_verbose) nm_prlim(1, "interrupt with no packets on %s", kring->name); goto put_out; } /* new packets are kring->rcur to kring->nr_hwtail, and the bkring * had hwcur == bkring->rhead. So advance bkring->rhead to kring->nr_hwtail * to push all packets out. */ bkring->rhead = bkring->rcur = kring->nr_hwtail; bkring->nm_sync(bkring, flags); /* mark all buffers as released on this ring */ kring->rhead = kring->rcur = kring->rtail = kring->nr_hwtail; /* another call to actually release the buffers */ error = kring->nm_sync(kring, 0); /* The second rxsync may have further advanced hwtail. If this happens, * return NM_IRQ_RESCHED, otherwise just return NM_IRQ_COMPLETED. */ if (kring->rcur != kring->nr_hwtail) { ret = NM_IRQ_RESCHED; } put_out: nm_kr_put(kring); return error ? error : ret; } /* nm_register callback for bwrap */ int netmap_bwrap_reg(struct netmap_adapter *na, int onoff) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; struct netmap_vp_adapter *hostna = &bna->host; int error, i; enum txrx t; - ND("%s %s", na->name, onoff ? "on" : "off"); + nm_prdis("%s %s", na->name, onoff ? "on" : "off"); if (onoff) { /* netmap_do_regif has been called on the bwrap na. * We need to pass the information about the * memory allocator down to the hwna before * putting it in netmap mode */ hwna->na_lut = na->na_lut; if (hostna->na_bdg) { /* if the host rings have been attached to switch, * we need to copy the memory allocator information * in the hostna also */ hostna->up.na_lut = na->na_lut; } } /* pass down the pending ring state information */ for_rx_tx(t) { for (i = 0; i < netmap_all_rings(na, t); i++) { NMR(hwna, nm_txrx_swap(t))[i]->nr_pending_mode = NMR(na, t)[i]->nr_pending_mode; } } /* forward the request to the hwna */ error = hwna->nm_register(hwna, onoff); if (error) return error; /* copy up the current ring state information */ for_rx_tx(t) { for (i = 0; i < netmap_all_rings(na, t); i++) { struct netmap_kring *kring = NMR(hwna, nm_txrx_swap(t))[i]; NMR(na, t)[i]->nr_mode = kring->nr_mode; } } /* impersonate a netmap_vp_adapter */ netmap_vp_reg(na, onoff); if (hostna->na_bdg) netmap_vp_reg(&hostna->up, onoff); if (onoff) { u_int i; /* intercept the hwna nm_nofify callback on the hw rings */ for (i = 0; i < hwna->num_rx_rings; i++) { hwna->rx_rings[i]->save_notify = hwna->rx_rings[i]->nm_notify; hwna->rx_rings[i]->nm_notify = netmap_bwrap_intr_notify; } i = hwna->num_rx_rings; /* for safety */ /* save the host ring notify unconditionally */ for (; i < netmap_real_rings(hwna, NR_RX); i++) { hwna->rx_rings[i]->save_notify = hwna->rx_rings[i]->nm_notify; if (hostna->na_bdg) { /* also intercept the host ring notify */ hwna->rx_rings[i]->nm_notify = netmap_bwrap_intr_notify; na->tx_rings[i]->nm_sync = na->nm_txsync; } } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; } else { u_int i; if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; /* reset all notify callbacks (including host ring) */ for (i = 0; i < netmap_all_rings(hwna, NR_RX); i++) { hwna->rx_rings[i]->nm_notify = hwna->rx_rings[i]->save_notify; hwna->rx_rings[i]->save_notify = NULL; } hwna->na_lut.lut = NULL; hwna->na_lut.plut = NULL; hwna->na_lut.objtotal = 0; hwna->na_lut.objsize = 0; /* pass ownership of the netmap rings to the hwna */ for_rx_tx(t) { for (i = 0; i < netmap_all_rings(na, t); i++) { NMR(na, t)[i]->ring = NULL; } } /* reset the number of host rings to default */ for_rx_tx(t) { nma_set_host_nrings(hwna, t, 1); } } return 0; } /* nm_config callback for bwrap */ static int netmap_bwrap_config(struct netmap_adapter *na, struct nm_config_info *info) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; int error; /* Forward the request to the hwna. It may happen that nobody * registered hwna yet, so netmap_mem_get_lut() may have not * been called yet. */ error = netmap_mem_get_lut(hwna->nm_mem, &hwna->na_lut); if (error) return error; netmap_update_config(hwna); /* swap the results and propagate */ info->num_tx_rings = hwna->num_rx_rings; info->num_tx_descs = hwna->num_rx_desc; info->num_rx_rings = hwna->num_tx_rings; info->num_rx_descs = hwna->num_tx_desc; info->rx_buf_maxsize = hwna->rx_buf_maxsize; return 0; } /* nm_krings_create callback for bwrap */ int netmap_bwrap_krings_create_common(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; struct netmap_adapter *hostna = &bna->host.up; int i, error = 0; enum txrx t; /* also create the hwna krings */ error = hwna->nm_krings_create(hwna); if (error) { return error; } /* increment the usage counter for all the hwna krings */ for_rx_tx(t) { for (i = 0; i < netmap_all_rings(hwna, t); i++) { NMR(hwna, t)[i]->users++; } } /* now create the actual rings */ error = netmap_mem_rings_create(hwna); if (error) { goto err_dec_users; } /* cross-link the netmap rings * The original number of rings comes from hwna, * rx rings on one side equals tx rings on the other. */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ for (i = 0; i < netmap_all_rings(hwna, r); i++) { NMR(na, t)[i]->nkr_num_slots = NMR(hwna, r)[i]->nkr_num_slots; NMR(na, t)[i]->ring = NMR(hwna, r)[i]->ring; } } if (na->na_flags & NAF_HOST_RINGS) { /* the hostna rings are the host rings of the bwrap. * The corresponding krings must point back to the * hostna */ hostna->tx_rings = &na->tx_rings[na->num_tx_rings]; hostna->rx_rings = &na->rx_rings[na->num_rx_rings]; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(hostna, t); i++) { NMR(hostna, t)[i]->na = hostna; } } } return 0; err_dec_users: for_rx_tx(t) { for (i = 0; i < netmap_all_rings(hwna, t); i++) { NMR(hwna, t)[i]->users--; } } hwna->nm_krings_delete(hwna); return error; } void netmap_bwrap_krings_delete_common(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; enum txrx t; int i; - ND("%s", na->name); + nm_prdis("%s", na->name); /* decrement the usage counter for all the hwna krings */ for_rx_tx(t) { for (i = 0; i < netmap_all_rings(hwna, t); i++) { NMR(hwna, t)[i]->users--; } } /* delete any netmap rings that are no longer needed */ netmap_mem_rings_delete(hwna); hwna->nm_krings_delete(hwna); } /* notify method for the bridge-->hwna direction */ int netmap_bwrap_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_bwrap_adapter *bna = na->na_private; struct netmap_adapter *hwna = bna->hwna; u_int ring_n = kring->ring_id; u_int lim = kring->nkr_num_slots - 1; struct netmap_kring *hw_kring; int error; - ND("%s: na %s hwna %s", + nm_prdis("%s: na %s hwna %s", (kring ? kring->name : "NULL!"), (na ? na->name : "NULL!"), (hwna ? hwna->name : "NULL!")); hw_kring = hwna->tx_rings[ring_n]; if (nm_kr_tryget(hw_kring, 0, NULL)) { return ENXIO; } /* first step: simulate a user wakeup on the rx ring */ netmap_vp_rxsync(kring, flags); - ND("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", + nm_prdis("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", na->name, ring_n, kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease, kring->rhead, kring->rcur, kring->rtail, hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_kring->rtail); /* second step: the new packets are sent on the tx ring * (which is actually the same ring) */ hw_kring->rhead = hw_kring->rcur = kring->nr_hwtail; error = hw_kring->nm_sync(hw_kring, flags); if (error) goto put_out; /* third step: now we are back the rx ring */ /* claim ownership on all hw owned bufs */ kring->rhead = kring->rcur = nm_next(hw_kring->nr_hwtail, lim); /* skip past reserved slot */ /* fourth step: the user goes to sleep again, causing another rxsync */ netmap_vp_rxsync(kring, flags); - ND("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", + nm_prdis("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", na->name, ring_n, kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease, kring->rhead, kring->rcur, kring->rtail, hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_kring->rtail); put_out: nm_kr_put(hw_kring); return error ? error : NM_IRQ_COMPLETED; } /* nm_bdg_ctl callback for the bwrap. * Called on bridge-attach and detach, as an effect of vale-ctl -[ahd]. * On attach, it needs to provide a fake netmap_priv_d structure and * perform a netmap_do_regif() on the bwrap. This will put both the * bwrap and the hwna in netmap mode, with the netmap rings shared * and cross linked. Moroever, it will start intercepting interrupts * directed to hwna. */ static int netmap_bwrap_bdg_ctl(struct nmreq_header *hdr, struct netmap_adapter *na) { struct netmap_priv_d *npriv; struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na; int error = 0; if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) { struct nmreq_vale_attach *req = (struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body; if (req->reg.nr_ringid != 0 || (req->reg.nr_mode != NR_REG_ALL_NIC && req->reg.nr_mode != NR_REG_NIC_SW)) { /* We only support attaching all the NIC rings * and/or the host stack. */ return EINVAL; } if (NETMAP_OWNED_BY_ANY(na)) { return EBUSY; } if (bna->na_kpriv) { /* nothing to do */ return 0; } npriv = netmap_priv_new(); if (npriv == NULL) return ENOMEM; npriv->np_ifp = na->ifp; /* let the priv destructor release the ref */ error = netmap_do_regif(npriv, na, req->reg.nr_mode, req->reg.nr_ringid, req->reg.nr_flags); if (error) { netmap_priv_delete(npriv); return error; } bna->na_kpriv = npriv; na->na_flags |= NAF_BUSY; } else { if (na->active_fds == 0) /* not registered */ return EINVAL; netmap_priv_delete(bna->na_kpriv); bna->na_kpriv = NULL; na->na_flags &= ~NAF_BUSY; } return error; } /* attach a bridge wrapper to the 'real' device */ int netmap_bwrap_attach_common(struct netmap_adapter *na, struct netmap_adapter *hwna) { struct netmap_bwrap_adapter *bna; struct netmap_adapter *hostna = NULL; int error = 0; enum txrx t; /* make sure the NIC is not already in use */ if (NETMAP_OWNED_BY_ANY(hwna)) { nm_prerr("NIC %s busy, cannot attach to bridge", hwna->name); return EBUSY; } bna = (struct netmap_bwrap_adapter *)na; /* make bwrap ifp point to the real ifp */ na->ifp = hwna->ifp; if_ref(na->ifp); na->na_private = bna; /* fill the ring data for the bwrap adapter with rx/tx meanings * swapped. The real cross-linking will be done during register, * when all the krings will have been created. */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ nma_set_nrings(na, t, nma_get_nrings(hwna, r)); nma_set_ndesc(na, t, nma_get_ndesc(hwna, r)); } na->nm_dtor = netmap_bwrap_dtor; na->nm_config = netmap_bwrap_config; na->nm_bdg_ctl = netmap_bwrap_bdg_ctl; na->pdev = hwna->pdev; na->nm_mem = netmap_mem_get(hwna->nm_mem); na->virt_hdr_len = hwna->virt_hdr_len; na->rx_buf_maxsize = hwna->rx_buf_maxsize; bna->hwna = hwna; netmap_adapter_get(hwna); hwna->na_private = bna; /* weak reference */ bna->saved_na_vp = hwna->na_vp; hwna->na_vp = &bna->up; bna->up.up.na_vp = &(bna->up); if (hwna->na_flags & NAF_HOST_RINGS) { if (hwna->na_flags & NAF_SW_ONLY) na->na_flags |= NAF_SW_ONLY; na->na_flags |= NAF_HOST_RINGS; hostna = &bna->host.up; /* limit the number of host rings to that of hw */ nm_bound_var(&hostna->num_tx_rings, 1, 1, nma_get_nrings(hwna, NR_TX), NULL); nm_bound_var(&hostna->num_rx_rings, 1, 1, nma_get_nrings(hwna, NR_RX), NULL); snprintf(hostna->name, sizeof(hostna->name), "%s^", na->name); hostna->ifp = hwna->ifp; for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); u_int nr = nma_get_nrings(hostna, t); nma_set_nrings(hostna, t, nr); nma_set_host_nrings(na, t, nr); if (nma_get_host_nrings(hwna, t) < nr) { nma_set_host_nrings(hwna, t, nr); } nma_set_ndesc(hostna, t, nma_get_ndesc(hwna, r)); } // hostna->nm_txsync = netmap_bwrap_host_txsync; // hostna->nm_rxsync = netmap_bwrap_host_rxsync; hostna->nm_mem = netmap_mem_get(na->nm_mem); hostna->na_private = bna; hostna->na_vp = &bna->up; na->na_hostvp = hwna->na_hostvp = hostna->na_hostvp = &bna->host; hostna->na_flags = NAF_BUSY; /* prevent NIOCREGIF */ hostna->rx_buf_maxsize = hwna->rx_buf_maxsize; } if (hwna->na_flags & NAF_MOREFRAG) na->na_flags |= NAF_MOREFRAG; - ND("%s<->%s txr %d txd %d rxr %d rxd %d", + nm_prdis("%s<->%s txr %d txd %d rxr %d rxd %d", na->name, ifp->if_xname, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc); error = netmap_attach_common(na); if (error) { goto err_put; } hwna->na_flags |= NAF_BUSY; return 0; err_put: hwna->na_vp = hwna->na_hostvp = NULL; netmap_adapter_put(hwna); return error; } struct nm_bridge * netmap_init_bridges2(u_int n) { int i; struct nm_bridge *b; b = nm_os_malloc(sizeof(struct nm_bridge) * n); if (b == NULL) return NULL; for (i = 0; i < n; i++) BDG_RWINIT(&b[i]); return b; } void netmap_uninit_bridges2(struct nm_bridge *b, u_int n) { int i; if (b == NULL) return; for (i = 0; i < n; i++) BDG_RWDESTROY(&b[i]); nm_os_free(b); } int netmap_init_bridges(void) { #ifdef CONFIG_NET_NS return netmap_bns_register(); #else nm_bridges = netmap_init_bridges2(NM_BRIDGES); if (nm_bridges == NULL) return ENOMEM; return 0; #endif } void netmap_uninit_bridges(void) { #ifdef CONFIG_NET_NS netmap_bns_unregister(); #else netmap_uninit_bridges2(nm_bridges, NM_BRIDGES); #endif } Index: stable/12/sys/dev/netmap/netmap_freebsd.c =================================================================== --- stable/12/sys/dev/netmap/netmap_freebsd.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_freebsd.c (revision 344046) @@ -1,1571 +1,1569 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include /* POLLIN, POLLOUT */ #include /* types used in module initialization */ #include /* DEV_MODULE_ORDERED */ #include #include /* kern_ioctl() */ #include #include /* vtophys */ #include /* vtophys */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include /* kthread_add() */ #include /* PROC_LOCK() */ #include /* RFNOWAIT */ #include /* sched_bind() */ #include /* mp_maxid */ #include #include #include /* IFT_ETHER */ #include /* ether_ifdetach */ #include /* LLADDR */ #include /* bus_dmamap_* */ #include /* in6_cksum_pseudo() */ #include /* in_pseudo(), in_cksum_hdr() */ #include #include #include #include /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ void nm_os_selinfo_init(NM_SELINFO_T *si) { struct mtx *m = &si->m; mtx_init(m, "nm_kn_lock", NULL, MTX_DEF); knlist_init_mtx(&si->si.si_note, m); } void nm_os_selinfo_uninit(NM_SELINFO_T *si) { /* XXX kqueue(9) needed; these will mirror knlist_init. */ knlist_delete(&si->si.si_note, curthread, /*islocked=*/0); knlist_destroy(&si->si.si_note); /* now we don't need the mutex anymore */ mtx_destroy(&si->m); } void * nm_os_malloc(size_t size) { return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); } void * nm_os_realloc(void *addr, size_t new_size, size_t old_size __unused) { return realloc(addr, new_size, M_DEVBUF, M_NOWAIT | M_ZERO); } void nm_os_free(void *addr) { free(addr, M_DEVBUF); } void nm_os_ifnet_lock(void) { IFNET_RLOCK(); } void nm_os_ifnet_unlock(void) { IFNET_RUNLOCK(); } static int netmap_use_count = 0; void nm_os_get_module(void) { netmap_use_count++; } void nm_os_put_module(void) { netmap_use_count--; } static void netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp) { netmap_undo_zombie(ifp); } static void netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp) { netmap_make_zombie(ifp); } static eventhandler_tag nm_ifnet_ah_tag; static eventhandler_tag nm_ifnet_dh_tag; int nm_os_ifnet_init(void) { nm_ifnet_ah_tag = EVENTHANDLER_REGISTER(ifnet_arrival_event, netmap_ifnet_arrival_handler, NULL, EVENTHANDLER_PRI_ANY); nm_ifnet_dh_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, netmap_ifnet_departure_handler, NULL, EVENTHANDLER_PRI_ANY); return 0; } void nm_os_ifnet_fini(void) { EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nm_ifnet_ah_tag); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nm_ifnet_dh_tag); } unsigned nm_os_ifnet_mtu(struct ifnet *ifp) { #if __FreeBSD_version < 1100030 return ifp->if_data.ifi_mtu; #else /* __FreeBSD_version >= 1100030 */ return ifp->if_mtu; #endif } rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum) { /* TODO XXX please use the FreeBSD implementation for this. */ uint16_t *words = (uint16_t *)data; int nw = len / 2; int i; for (i = 0; i < nw; i++) cur_sum += be16toh(words[i]); if (len & 1) cur_sum += (data[len-1] << 8); return cur_sum; } /* Fold a raw checksum: 'cur_sum' is in host byte order, while the * return value is in network byte order. */ uint16_t nm_os_csum_fold(rawsum_t cur_sum) { /* TODO XXX please use the FreeBSD implementation for this. */ while (cur_sum >> 16) cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16); return htobe16((~cur_sum) & 0xFFFF); } uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph) { #if 0 return in_cksum_hdr((void *)iph); #else return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0)); #endif } void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, size_t datalen, uint16_t *check) { #ifdef INET uint16_t pseudolen = datalen + iph->protocol; /* Compute and insert the pseudo-header cheksum. */ *check = in_pseudo(iph->saddr, iph->daddr, htobe16(pseudolen)); /* Compute the checksum on TCP/UDP header + payload * (includes the pseudo-header). */ *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0)); #else static int notsupported = 0; if (!notsupported) { notsupported = 1; nm_prerr("inet4 segmentation not supported"); } #endif } void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, size_t datalen, uint16_t *check) { #ifdef INET6 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0); *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0)); #else static int notsupported = 0; if (!notsupported) { notsupported = 1; nm_prerr("inet6 segmentation not supported"); } #endif } /* on FreeBSD we send up one packet at a time */ void * nm_os_send_up(struct ifnet *ifp, struct mbuf *m, struct mbuf *prev) { NA(ifp)->if_input(ifp, m); return NULL; } int nm_os_mbuf_has_csum_offld(struct mbuf *m) { return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); } int nm_os_mbuf_has_seg_offld(struct mbuf *m) { return m->m_pkthdr.csum_flags & CSUM_TSO; } static void freebsd_generic_rx_handler(struct ifnet *ifp, struct mbuf *m) { int stolen; if (unlikely(!NM_NA_VALID(ifp))) { nm_prlim(1, "Warning: RX packet intercepted, but no" " emulated adapter"); return; } stolen = generic_rx_handler(ifp, m); if (!stolen) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)NA(ifp); gna->save_if_input(ifp, m); } } /* * Intercept the rx routine in the standard device driver. * Second argument is non-zero to intercept, 0 to restore */ int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = na->ifp; int ret = 0; nm_os_ifnet_lock(); if (intercept) { if (gna->save_if_input) { nm_prerr("RX on %s already intercepted", na->name); ret = EBUSY; /* already set */ goto out; } gna->save_if_input = ifp->if_input; ifp->if_input = freebsd_generic_rx_handler; } else { if (!gna->save_if_input) { nm_prerr("Failed to undo RX intercept on %s", na->name); ret = EINVAL; /* not saved */ goto out; } ifp->if_input = gna->save_if_input; gna->save_if_input = NULL; } out: nm_os_ifnet_unlock(); return ret; } /* * Intercept the packet steering routine in the tx path, * so that we can decide which queue is used for an mbuf. * Second argument is non-zero to intercept, 0 to restore. * On freebsd we just intercept if_transmit. */ int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = netmap_generic_getifp(gna); nm_os_ifnet_lock(); if (intercept) { na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; } else { ifp->if_transmit = na->if_transmit; } nm_os_ifnet_unlock(); return 0; } /* * Transmit routine used by generic_netmap_txsync(). Returns 0 on success * and non-zero on error (which may be packet drops or other errors). * addr and len identify the netmap buffer, m is the (preallocated) * mbuf to use for transmissions. * * We should add a reference to the mbuf so the m_freem() at the end * of the transmission does not consume resources. * * On FreeBSD, and on multiqueue cards, we can force the queue using * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) * i = m->m_pkthdr.flowid % adapter->num_queues; * else * i = curcpu % adapter->num_queues; * */ int nm_os_generic_xmit_frame(struct nm_os_gen_arg *a) { int ret; u_int len = a->len; struct ifnet *ifp = a->ifp; struct mbuf *m = a->m; #if __FreeBSD_version < 1100000 /* * Old FreeBSD versions. The mbuf has a cluster attached, * we need to copy from the cluster to the netmap buffer. */ if (MBUF_REFCNT(m) != 1) { nm_prerr("invalid refcnt %d for %p", MBUF_REFCNT(m), m); panic("in generic_xmit_frame"); } if (m->m_ext.ext_size < len) { nm_prlim(2, "size %d < len %d", m->m_ext.ext_size, len); len = m->m_ext.ext_size; } bcopy(a->addr, m->m_data, len); #else /* __FreeBSD_version >= 1100000 */ /* New FreeBSD versions. Link the external storage to * the netmap buffer, so that no copy is necessary. */ m->m_ext.ext_buf = m->m_data = a->addr; m->m_ext.ext_size = len; #endif /* __FreeBSD_version >= 1100000 */ m->m_len = m->m_pkthdr.len = len; /* mbuf refcnt is not contended, no need to use atomic * (a memory barrier is enough). */ SET_MBUF_REFCNT(m, 2); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); m->m_pkthdr.flowid = a->ring_nr; m->m_pkthdr.rcvif = ifp; /* used for tx notification */ ret = NA(ifp)->if_transmit(ifp, m); return ret ? -1 : 0; } #if __FreeBSD_version >= 1100005 struct netmap_adapter * netmap_getna(if_t ifp) { return (NA((struct ifnet *)ifp)); } #endif /* __FreeBSD_version >= 1100005 */ /* * The following two functions are empty until we have a generic * way to extract the info from the ifp */ int nm_os_generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) { return 0; } void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) { unsigned num_rings = netmap_generic_rings ? netmap_generic_rings : 1; *txq = num_rings; *rxq = num_rings; } void nm_os_generic_set_features(struct netmap_generic_adapter *gna) { gna->rxsg = 1; /* Supported through m_copydata. */ gna->txqdisc = 0; /* Not supported. */ } void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na) { mit->mit_pending = 0; mit->mit_ring_idx = idx; mit->mit_na = na; } void nm_os_mitigation_start(struct nm_generic_mit *mit) { } void nm_os_mitigation_restart(struct nm_generic_mit *mit) { } int nm_os_mitigation_active(struct nm_generic_mit *mit) { return 0; } void nm_os_mitigation_cleanup(struct nm_generic_mit *mit) { } static int nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr) { return EINVAL; } static void nm_vi_start(struct ifnet *ifp) { panic("nm_vi_start() must not be called"); } /* * Index manager of persistent virtual interfaces. * It is used to decide the lowest byte of the MAC address. * We use the same algorithm with management of bridge port index. */ #define NM_VI_MAX 255 static struct { uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */ uint8_t active; struct mtx lock; } nm_vi_indices; void nm_os_vi_init_index(void) { int i; for (i = 0; i < NM_VI_MAX; i++) nm_vi_indices.index[i] = i; nm_vi_indices.active = 0; mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF); } /* return -1 if no index available */ static int nm_vi_get_index(void) { int ret; mtx_lock(&nm_vi_indices.lock); ret = nm_vi_indices.active == NM_VI_MAX ? -1 : nm_vi_indices.index[nm_vi_indices.active++]; mtx_unlock(&nm_vi_indices.lock); return ret; } static void nm_vi_free_index(uint8_t val) { int i, lim; mtx_lock(&nm_vi_indices.lock); lim = nm_vi_indices.active; for (i = 0; i < lim; i++) { if (nm_vi_indices.index[i] == val) { /* swap index[lim-1] and j */ int tmp = nm_vi_indices.index[lim-1]; nm_vi_indices.index[lim-1] = val; nm_vi_indices.index[i] = tmp; nm_vi_indices.active--; break; } } if (lim == nm_vi_indices.active) nm_prerr("Index %u not found", val); mtx_unlock(&nm_vi_indices.lock); } #undef NM_VI_MAX /* * Implementation of a netmap-capable virtual interface that * registered to the system. * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9. * * Note: Linux sets refcount to 0 on allocation of net_device, * then increments it on registration to the system. * FreeBSD sets refcount to 1 on if_alloc(), and does not * increment this refcount on if_attach(). */ int nm_os_vi_persist(const char *name, struct ifnet **ret) { struct ifnet *ifp; u_short macaddr_hi; uint32_t macaddr_mid; u_char eaddr[6]; int unit = nm_vi_get_index(); /* just to decide MAC address */ if (unit < 0) return EBUSY; /* * We use the same MAC address generation method with tap * except for the highest octet is 00:be instead of 00:bd */ macaddr_hi = htons(0x00be); /* XXX tap + 1 */ macaddr_mid = (uint32_t) ticks; bcopy(&macaddr_hi, eaddr, sizeof(short)); bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t)); eaddr[5] = (uint8_t)unit; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { nm_prerr("if_alloc failed"); return ENOMEM; } if_initname(ifp, name, IF_DUNIT_NONE); ifp->if_mtu = 65536; ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = (void *)nm_vi_dummy; ifp->if_ioctl = nm_vi_dummy; ifp->if_start = nm_vi_start; ifp->if_mtu = ETHERMTU; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable |= IFCAP_LINKSTATE; ether_ifattach(ifp, eaddr); *ret = ifp; return 0; } /* unregister from the system and drop the final refcount */ void nm_os_vi_detach(struct ifnet *ifp) { nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]); ether_ifdetach(ifp); if_free(ifp); } #ifdef WITH_EXTMEM #include #include struct nm_os_extmem { vm_object_t obj; vm_offset_t kva; vm_offset_t size; uintptr_t scan; }; void nm_os_extmem_delete(struct nm_os_extmem *e) { nm_prinf("freeing %zx bytes", (size_t)e->size); vm_map_remove(kernel_map, e->kva, e->kva + e->size); nm_os_free(e); } char * nm_os_extmem_nextpage(struct nm_os_extmem *e) { char *rv = NULL; if (e->scan < e->kva + e->size) { rv = (char *)e->scan; e->scan += PAGE_SIZE; } return rv; } int nm_os_extmem_isequal(struct nm_os_extmem *e1, struct nm_os_extmem *e2) { return (e1->obj == e2->obj); } int nm_os_extmem_nr_pages(struct nm_os_extmem *e) { return e->size >> PAGE_SHIFT; } struct nm_os_extmem * nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror) { vm_map_t map; vm_map_entry_t entry; vm_object_t obj; vm_prot_t prot; vm_pindex_t index; boolean_t wired; struct nm_os_extmem *e = NULL; int rv, error = 0; e = nm_os_malloc(sizeof(*e)); if (e == NULL) { error = ENOMEM; goto out; } map = &curthread->td_proc->p_vmspace->vm_map; rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry, &obj, &index, &prot, &wired); if (rv != KERN_SUCCESS) { nm_prerr("address %lx not found", p); goto out_free; } /* check that we are given the whole vm_object ? */ vm_map_lookup_done(map, entry); // XXX can we really use obj after releasing the map lock? e->obj = obj; vm_object_reference(obj); /* wire the memory and add the vm_object to the kernel map, * to make sure that it is not fred even if the processes that * are mmap()ing it all exit */ e->kva = vm_map_min(kernel_map); e->size = obj->size << PAGE_SHIFT; rv = vm_map_find(kernel_map, obj, 0, &e->kva, e->size, 0, VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0); if (rv != KERN_SUCCESS) { nm_prerr("vm_map_find(%zx) failed", (size_t)e->size); goto out_rel; } rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); if (rv != KERN_SUCCESS) { nm_prerr("vm_map_wire failed"); goto out_rem; } e->scan = e->kva; return e; out_rem: vm_map_remove(kernel_map, e->kva, e->kva + e->size); e->obj = NULL; out_rel: vm_object_deallocate(e->obj); out_free: nm_os_free(e); out: if (perror) *perror = error; return NULL; } #endif /* WITH_EXTMEM */ /* ================== PTNETMAP GUEST SUPPORT ==================== */ #ifdef WITH_PTNETMAP #include #include #include /* bus_dmamap_* */ #include #include #include /* * ptnetmap memory device (memdev) for freebsd guest, * ssed to expose host netmap memory to the guest through a PCI BAR. */ /* * ptnetmap memdev private data structure */ struct ptnetmap_memdev { device_t dev; struct resource *pci_io; struct resource *pci_mem; struct netmap_mem_d *nm_mem; }; static int ptn_memdev_probe(device_t); static int ptn_memdev_attach(device_t); static int ptn_memdev_detach(device_t); static int ptn_memdev_shutdown(device_t); static device_method_t ptn_memdev_methods[] = { DEVMETHOD(device_probe, ptn_memdev_probe), DEVMETHOD(device_attach, ptn_memdev_attach), DEVMETHOD(device_detach, ptn_memdev_detach), DEVMETHOD(device_shutdown, ptn_memdev_shutdown), DEVMETHOD_END }; static driver_t ptn_memdev_driver = { PTNETMAP_MEMDEV_NAME, ptn_memdev_methods, sizeof(struct ptnetmap_memdev), }; /* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation * below. */ static devclass_t ptnetmap_devclass; DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, ptnetmap_devclass, NULL, NULL, SI_ORDER_MIDDLE + 1); /* * Map host netmap memory through PCI-BAR in the guest OS, * returning physical (nm_paddr) and virtual (nm_addr) addresses * of the netmap memory mapped in the guest. */ int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr, void **nm_addr, uint64_t *mem_size) { int rid; nm_prinf("ptn_memdev_driver iomap"); rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR); *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI); *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) | (*mem_size << 32); /* map memory allocator */ ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY, &rid, 0, ~0, *mem_size, RF_ACTIVE); if (ptn_dev->pci_mem == NULL) { *nm_paddr = 0; *nm_addr = NULL; return ENOMEM; } *nm_paddr = rman_get_start(ptn_dev->pci_mem); *nm_addr = rman_get_virtual(ptn_dev->pci_mem); nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===", PTNETMAP_MEM_PCI_BAR, (unsigned long)(*nm_paddr), (unsigned long)rman_get_size(ptn_dev->pci_mem), (unsigned long)*mem_size); return (0); } uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg) { return bus_read_4(ptn_dev->pci_io, reg); } /* Unmap host netmap memory. */ void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev) { nm_prinf("ptn_memdev_driver iounmap"); if (ptn_dev->pci_mem) { bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem); ptn_dev->pci_mem = NULL; } } /* Device identification routine, return BUS_PROBE_DEFAULT on success, * positive on failure */ static int ptn_memdev_probe(device_t dev) { char desc[256]; if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID) return (ENXIO); if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID) return (ENXIO); snprintf(desc, sizeof(desc), "%s PCI adapter", PTNETMAP_MEMDEV_NAME); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } /* Device initialization routine. */ static int ptn_memdev_attach(device_t dev) { struct ptnetmap_memdev *ptn_dev; int rid; uint16_t mem_id; ptn_dev = device_get_softc(dev); ptn_dev->dev = dev; pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (ptn_dev->pci_io == NULL) { device_printf(dev, "cannot map I/O space\n"); return (ENXIO); } mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID); /* create guest allocator */ ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id); if (ptn_dev->nm_mem == NULL) { ptn_memdev_detach(dev); return (ENOMEM); } netmap_mem_get(ptn_dev->nm_mem); nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id); return (0); } /* Device removal routine. */ static int ptn_memdev_detach(device_t dev) { struct ptnetmap_memdev *ptn_dev; ptn_dev = device_get_softc(dev); if (ptn_dev->nm_mem) { nm_prinf("ptnetmap memdev detached, host memid %u", netmap_mem_get_id(ptn_dev->nm_mem)); netmap_mem_put(ptn_dev->nm_mem); ptn_dev->nm_mem = NULL; } if (ptn_dev->pci_mem) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem); ptn_dev->pci_mem = NULL; } if (ptn_dev->pci_io) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io); ptn_dev->pci_io = NULL; } return (0); } static int ptn_memdev_shutdown(device_t dev) { return bus_generic_shutdown(dev); } #endif /* WITH_PTNETMAP */ /* * In order to track whether pages are still mapped, we hook into * the standard cdev_pager and intercept the constructor and * destructor. */ struct netmap_vm_handle_t { struct cdev *dev; struct netmap_priv_d *priv; }; static int netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { struct netmap_vm_handle_t *vmh = handle; if (netmap_verbose) nm_prinf("handle %p size %jd prot %d foff %jd", handle, (intmax_t)size, prot, (intmax_t)foff); if (color) *color = 0; dev_ref(vmh->dev); return 0; } static void netmap_dev_pager_dtor(void *handle) { struct netmap_vm_handle_t *vmh = handle; struct cdev *dev = vmh->dev; struct netmap_priv_d *priv = vmh->priv; if (netmap_verbose) nm_prinf("handle %p", handle); netmap_dtor(priv); free(vmh, M_DEVBUF); dev_rel(dev); } static int netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct netmap_vm_handle_t *vmh = object->handle; struct netmap_priv_d *priv = vmh->priv; struct netmap_adapter *na = priv->np_na; vm_paddr_t paddr; vm_page_t page; vm_memattr_t memattr; vm_pindex_t pidx; nm_prdis("object %p offset %jd prot %d mres %p", object, (intmax_t)offset, prot, mres); memattr = object->memattr; pidx = OFF_TO_IDX(offset); paddr = netmap_mem_ofstophys(na->nm_mem, offset); if (paddr == 0) return VM_PAGER_FAIL; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake page, update it with * the new physical address. */ page = *mres; vm_page_updatefake(page, paddr, memattr); } else { /* * Replace the passed in reqpage page with our own fake page and * free up the all of the original pages. */ #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK #define VM_OBJECT_WLOCK VM_OBJECT_LOCK #endif /* VM_OBJECT_WUNLOCK */ VM_OBJECT_WUNLOCK(object); page = vm_page_getfake(paddr, memattr); VM_OBJECT_WLOCK(object); vm_page_lock(*mres); vm_page_free(*mres); vm_page_unlock(*mres); *mres = page; vm_page_insert(page, object, pidx); } page->valid = VM_PAGE_BITS_ALL; return (VM_PAGER_OK); } static struct cdev_pager_ops netmap_cdev_pager_ops = { .cdev_pg_ctor = netmap_dev_pager_ctor, .cdev_pg_dtor = netmap_dev_pager_dtor, .cdev_pg_fault = netmap_dev_pager_fault, }; static int netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, vm_size_t objsize, vm_object_t *objp, int prot) { int error; struct netmap_vm_handle_t *vmh; struct netmap_priv_d *priv; vm_object_t obj; if (netmap_verbose) nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev, (intmax_t )*foff, (intmax_t )objsize, objp, prot); vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (vmh == NULL) return ENOMEM; vmh->dev = cdev; NMG_LOCK(); error = devfs_get_cdevpriv((void**)&priv); if (error) goto err_unlock; if (priv->np_nifp == NULL) { error = EINVAL; goto err_unlock; } vmh->priv = priv; priv->np_refs++; NMG_UNLOCK(); obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &netmap_cdev_pager_ops, objsize, prot, *foff, NULL); if (obj == NULL) { nm_prerr("cdev_pager_allocate failed"); error = EINVAL; goto err_deref; } *objp = obj; return 0; err_deref: NMG_LOCK(); priv->np_refs--; err_unlock: NMG_UNLOCK(); // err: free(vmh, M_DEVBUF); return error; } /* * On FreeBSD the close routine is only called on the last close on * the device (/dev/netmap) so we cannot do anything useful. * To track close() on individual file descriptors we pass netmap_dtor() to * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor * when the last fd pointing to the device is closed. * * Note that FreeBSD does not even munmap() on close() so we also have * to track mmap() ourselves, and postpone the call to * netmap_dtor() is called when the process has no open fds and no active * memory maps on /dev/netmap, as in linux. */ static int netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { if (netmap_verbose) nm_prinf("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td); return 0; } static int netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct netmap_priv_d *priv; int error; (void)dev; (void)oflags; (void)devtype; (void)td; NMG_LOCK(); priv = netmap_priv_new(); if (priv == NULL) { error = ENOMEM; goto out; } error = devfs_set_cdevpriv(priv, netmap_dtor); if (error) { netmap_priv_delete(priv); } out: NMG_UNLOCK(); return error; } /******************** kthread wrapper ****************/ #include u_int nm_os_ncpus(void) { return mp_maxid + 1; } struct nm_kctx_ctx { /* Userspace thread (kthread creator). */ struct thread *user_td; /* worker function and parameter */ nm_kctx_worker_fn_t worker_fn; void *worker_private; struct nm_kctx *nmk; /* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */ long type; }; struct nm_kctx { struct thread *worker; struct mtx worker_lock; struct nm_kctx_ctx worker_ctx; int run; /* used to stop kthread */ int attach_user; /* kthread attached to user_process */ int affinity; }; static void nm_kctx_worker(void *data) { struct nm_kctx *nmk = data; struct nm_kctx_ctx *ctx = &nmk->worker_ctx; if (nmk->affinity >= 0) { thread_lock(curthread); sched_bind(curthread, nmk->affinity); thread_unlock(curthread); } while (nmk->run) { /* * check if the parent process dies * (when kthread is attached to user process) */ if (ctx->user_td) { PROC_LOCK(curproc); thread_suspend_check(0); PROC_UNLOCK(curproc); } else { kthread_suspend_check(); } /* Continuously execute worker process. */ ctx->worker_fn(ctx->worker_private); /* worker body */ } kthread_exit(); } void nm_os_kctx_worker_setaff(struct nm_kctx *nmk, int affinity) { nmk->affinity = affinity; } struct nm_kctx * nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque) { struct nm_kctx *nmk = NULL; nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO); if (!nmk) return NULL; mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF); nmk->worker_ctx.worker_fn = cfg->worker_fn; nmk->worker_ctx.worker_private = cfg->worker_private; nmk->worker_ctx.type = cfg->type; nmk->affinity = -1; /* attach kthread to user process (ptnetmap) */ nmk->attach_user = cfg->attach_user; return nmk; } int nm_os_kctx_worker_start(struct nm_kctx *nmk) { struct proc *p = NULL; int error = 0; /* Temporarily disable this function as it is currently broken * and causes kernel crashes. The failure can be triggered by * the "vale_polling_enable_disable" test in ctrl-api-test.c. */ return EOPNOTSUPP; if (nmk->worker) return EBUSY; /* check if we want to attach kthread to user process */ if (nmk->attach_user) { nmk->worker_ctx.user_td = curthread; p = curthread->td_proc; } /* enable kthread main loop */ nmk->run = 1; /* create kthread */ if((error = kthread_add(nm_kctx_worker, nmk, p, &nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld", nmk->worker_ctx.type))) { goto err; } nm_prinf("nm_kthread started td %p", nmk->worker); return 0; err: nm_prerr("nm_kthread start failed err %d", error); nmk->worker = NULL; return error; } void nm_os_kctx_worker_stop(struct nm_kctx *nmk) { if (!nmk->worker) return; /* tell to kthread to exit from main loop */ nmk->run = 0; /* wake up kthread if it sleeps */ kthread_resume(nmk->worker); nmk->worker = NULL; } void nm_os_kctx_destroy(struct nm_kctx *nmk) { if (!nmk) return; if (nmk->worker) nm_os_kctx_worker_stop(nmk); free(nmk, M_DEVBUF); } /******************** kqueue support ****************/ /* * In addition to calling selwakeuppri(), nm_os_selwakeup() also * needs to call KNOTE to wake up kqueue listeners. * We use a non-zero 'hint' argument to inform the netmap_knrw() * function that it is being called from 'nm_os_selwakeup'; this * is necessary because when netmap_knrw() is called by the kevent * subsystem (i.e. kevent_scan()) we also need to call netmap_poll(). * The knote uses a private mutex associated to the 'si' (see struct * selinfo, struct nm_selinfo, and nm_os_selinfo_init). * * The netmap_kqfilter() function registers one or another f_event * depending on read or write mode. A pointer to the struct * 'netmap_priv_d' is stored into kn->kn_hook, so that it can later * be passed to netmap_poll(). We pass NULL as a third argument to * netmap_poll(), so that the latter only runs the txsync/rxsync * (if necessary), and skips the nm_os_selrecord() calls. */ void nm_os_selwakeup(struct nm_selinfo *si) { - if (netmap_verbose) - nm_prinf("on knote %p", &si->si.si_note); selwakeuppri(&si->si, PI_NET); /* We use a non-zero hint to distinguish this notification call * from the call done in kqueue_scan(), which uses hint=0. */ KNOTE(&si->si.si_note, /*hint=*/0x100, mtx_owned(&si->m) ? KNF_LISTLOCKED : 0); } void nm_os_selrecord(struct thread *td, struct nm_selinfo *si) { selrecord(td, &si->si); } static void netmap_knrdetach(struct knote *kn) { struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; struct selinfo *si = &priv->np_si[NR_RX]->si; nm_prinf("remove selinfo %p", si); knlist_remove(&si->si_note, kn, /*islocked=*/0); } static void netmap_knwdetach(struct knote *kn) { struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; struct selinfo *si = &priv->np_si[NR_TX]->si; nm_prinf("remove selinfo %p", si); knlist_remove(&si->si_note, kn, /*islocked=*/0); } /* * Callback triggered by netmap notifications (see netmap_notify()), * and by the application calling kevent(). In the former case we * just return 1 (events ready), since we are not able to do better. * In the latter case we use netmap_poll() to see which events are * ready. */ static int netmap_knrw(struct knote *kn, long hint, int events) { struct netmap_priv_d *priv; int revents; if (hint != 0) { /* Called from netmap_notify(), typically from a * thread different from the one issuing kevent(). * Assume we are ready. */ return 1; } /* Called from kevent(). */ priv = kn->kn_hook; revents = netmap_poll(priv, events, /*thread=*/NULL); return (events & revents) ? 1 : 0; } static int netmap_knread(struct knote *kn, long hint) { return netmap_knrw(kn, hint, POLLIN); } static int netmap_knwrite(struct knote *kn, long hint) { return netmap_knrw(kn, hint, POLLOUT); } static struct filterops netmap_rfiltops = { .f_isfd = 1, .f_detach = netmap_knrdetach, .f_event = netmap_knread, }; static struct filterops netmap_wfiltops = { .f_isfd = 1, .f_detach = netmap_knwdetach, .f_event = netmap_knwrite, }; /* * This is called when a thread invokes kevent() to record * a change in the configuration of the kqueue(). * The 'priv' is the one associated to the open netmap device. */ static int netmap_kqfilter(struct cdev *dev, struct knote *kn) { struct netmap_priv_d *priv; int error; struct netmap_adapter *na; struct nm_selinfo *si; int ev = kn->kn_filter; if (ev != EVFILT_READ && ev != EVFILT_WRITE) { nm_prerr("bad filter request %d", ev); return 1; } error = devfs_get_cdevpriv((void**)&priv); if (error) { nm_prerr("device not yet setup"); return 1; } na = priv->np_na; if (na == NULL) { nm_prerr("no netmap adapter for this file descriptor"); return 1; } /* the si is indicated in the priv */ si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX]; kn->kn_fop = (ev == EVFILT_WRITE) ? &netmap_wfiltops : &netmap_rfiltops; kn->kn_hook = priv; knlist_add(&si->si.si_note, kn, /*islocked=*/0); return 0; } static int freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td) { struct netmap_priv_d *priv; if (devfs_get_cdevpriv((void **)&priv)) { return POLLERR; } return netmap_poll(priv, events, td); } static int freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int ffla __unused, struct thread *td) { int error; struct netmap_priv_d *priv; CURVNET_SET(TD_TO_VNET(td)); error = devfs_get_cdevpriv((void **)&priv); if (error) { /* XXX ENOENT should be impossible, since the priv * is now created in the open */ if (error == ENOENT) error = ENXIO; goto out; } error = netmap_ioctl(priv, cmd, data, td, /*nr_body_is_user=*/1); out: CURVNET_RESTORE(); return error; } void nm_os_onattach(struct ifnet *ifp) { ifp->if_capabilities |= IFCAP_NETMAP; } void nm_os_onenter(struct ifnet *ifp) { struct netmap_adapter *na = NA(ifp); na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; ifp->if_capenable |= IFCAP_NETMAP; } void nm_os_onexit(struct ifnet *ifp) { struct netmap_adapter *na = NA(ifp); ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; } extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */ struct cdevsw netmap_cdevsw = { .d_version = D_VERSION, .d_name = "netmap", .d_open = netmap_open, .d_mmap_single = netmap_mmap_single, .d_ioctl = freebsd_netmap_ioctl, .d_poll = freebsd_netmap_poll, .d_kqfilter = netmap_kqfilter, .d_close = netmap_close, }; /*--- end of kqueue support ----*/ /* * Kernel entry point. * * Initialize/finalize the module and return. * * Return 0 on success, errno on failure. */ static int netmap_loader(__unused struct module *module, int event, __unused void *arg) { int error = 0; switch (event) { case MOD_LOAD: error = netmap_init(); break; case MOD_UNLOAD: /* * if some one is still using netmap, * then the module can not be unloaded. */ if (netmap_use_count) { nm_prerr("netmap module can not be unloaded - netmap_use_count: %d", netmap_use_count); error = EBUSY; break; } netmap_fini(); break; default: error = EOPNOTSUPP; break; } return (error); } #ifdef DEV_MODULE_ORDERED /* * The netmap module contains three drivers: (i) the netmap character device * driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI * device driver. The attach() routines of both (ii) and (iii) need the * lock of the global allocator, and such lock is initialized in netmap_init(), * which is part of (i). * Therefore, we make sure that (i) is loaded before (ii) and (iii), using * the 'order' parameter of driver declaration macros. For (i), we specify * SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED * macros for (ii) and (iii). */ DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE); #else /* !DEV_MODULE_ORDERED */ DEV_MODULE(netmap, netmap_loader, NULL); #endif /* DEV_MODULE_ORDERED */ MODULE_DEPEND(netmap, pci, 1, 1, 1); MODULE_VERSION(netmap, 1); /* reduce conditional code */ // linux API, use for the knlist in FreeBSD /* use a private mutex for the knlist */ Index: stable/12/sys/dev/netmap/netmap_generic.c =================================================================== --- stable/12/sys/dev/netmap/netmap_generic.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_generic.c (revision 344046) @@ -1,1155 +1,1132 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2013-2016 Vincenzo Maffione * Copyright (C) 2013-2016 Luigi Rizzo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module implements netmap support on top of standard, * unmodified device drivers. * * A NIOCREGIF request is handled here if the device does not * have native support. TX and RX rings are emulated as follows: * * NIOCREGIF * We preallocate a block of TX mbufs (roughly as many as * tx descriptors; the number is not critical) to speed up * operation during transmissions. The refcount on most of * these buffers is artificially bumped up so we can recycle * them more easily. Also, the destructor is intercepted * so we use it as an interrupt notification to wake up * processes blocked on a poll(). * * For each receive ring we allocate one "struct mbq" * (an mbuf tailq plus a spinlock). We intercept packets * (through if_input) * on the receive path and put them in the mbq from which * netmap receive routines can grab them. * * TX: * in the generic_txsync() routine, netmap buffers are copied * (or linked, in a future) to the preallocated mbufs * and pushed to the transmit queue. Some of these mbufs * (those with NS_REPORT, or otherwise every half ring) * have the refcount=1, others have refcount=2. * When the destructor is invoked, we take that as * a notification that all mbufs up to that one in * the specific ring have been completed, and generate * the equivalent of a transmit interrupt. * * RX: * */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include #include /* PROT_EXEC */ #include #include /* sockaddrs */ #include #include #include #include #include /* bus_dmamap_* in netmap_kern.h */ #include #include #include #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) #define smp_mb() #elif defined _WIN32 #include "win_glue.h" #define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid) #define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid) #define smp_mb() //XXX: to be correctly defined #else /* linux */ #include "bsd_glue.h" #include /* struct ethtool_ops, get_ringparam */ #include static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { return alloc_skb(ifp->needed_headroom + len + ifp->needed_tailroom, GFP_ATOMIC); } #endif /* linux */ /* Common headers. */ #include #include #include #define for_each_kring_n(_i, _k, _karr, _n) \ for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)]) #define for_each_tx_kring(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings) #define for_each_tx_kring_h(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1) #define for_each_rx_kring(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings) #define for_each_rx_kring_h(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1) /* ======================== PERFORMANCE STATISTICS =========================== */ #ifdef RATE_GENERIC #define IFRATE(x) x struct rate_stats { unsigned long txpkt; unsigned long txsync; unsigned long txirq; unsigned long txrepl; unsigned long txdrop; unsigned long rxpkt; unsigned long rxirq; unsigned long rxsync; }; struct rate_context { unsigned refcount; struct timer_list timer; struct rate_stats new; struct rate_stats old; }; #define RATE_PRINTK(_NAME_) \ printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); #define RATE_PERIOD 2 static void rate_callback(unsigned long arg) { struct rate_context * ctx = (struct rate_context *)arg; struct rate_stats cur = ctx->new; int r; RATE_PRINTK(txpkt); RATE_PRINTK(txsync); RATE_PRINTK(txirq); RATE_PRINTK(txrepl); RATE_PRINTK(txdrop); RATE_PRINTK(rxpkt); RATE_PRINTK(rxsync); RATE_PRINTK(rxirq); printk("\n"); ctx->old = cur; r = mod_timer(&ctx->timer, jiffies + msecs_to_jiffies(RATE_PERIOD * 1000)); if (unlikely(r)) nm_prerr("mod_timer() failed"); } static struct rate_context rate_ctx; void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi) { if (txp) rate_ctx.new.txpkt++; if (txs) rate_ctx.new.txsync++; if (txi) rate_ctx.new.txirq++; if (rxp) rate_ctx.new.rxpkt++; if (rxs) rate_ctx.new.rxsync++; if (rxi) rate_ctx.new.rxirq++; } #else /* !RATE */ #define IFRATE(x) #endif /* !RATE */ /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */ /* * Wrapper used by the generic adapter layer to notify * the poller threads. Differently from netmap_rx_irq(), we check * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq. */ void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done) { if (unlikely(!nm_netmap_on(na))) return; netmap_common_irq(na, q, work_done); #ifdef RATE_GENERIC if (work_done) rate_ctx.new.rxirq++; else rate_ctx.new.txirq++; #endif /* RATE_GENERIC */ } static int generic_netmap_unregister(struct netmap_adapter *na) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct netmap_kring *kring = NULL; int i, r; if (na->active_fds == 0) { na->na_flags &= ~NAF_NETMAP_ON; /* Stop intercepting packets on the RX path. */ nm_os_catch_rx(gna, 0); /* Release packet steering control. */ nm_os_catch_tx(gna, 0); } - for_each_rx_kring_h(r, kring, na) { - if (nm_kring_pending_off(kring)) { - nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name); - kring->nr_mode = NKR_NETMAP_OFF; - } - } - for_each_tx_kring_h(r, kring, na) { - if (nm_kring_pending_off(kring)) { - kring->nr_mode = NKR_NETMAP_OFF; - nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name); - } - } + netmap_krings_mode_commit(na, /*onoff=*/0); for_each_rx_kring(r, kring, na) { /* Free the mbufs still pending in the RX queues, * that did not end up into the corresponding netmap * RX rings. */ mbq_safe_purge(&kring->rx_queue); nm_os_mitigation_cleanup(&gna->mit[r]); } /* Decrement reference counter for the mbufs in the * TX pools. These mbufs can be still pending in drivers, * (e.g. this happens with virtio-net driver, which * does lazy reclaiming of transmitted mbufs). */ for_each_tx_kring(r, kring, na) { /* We must remove the destructor on the TX event, * because the destructor invokes netmap code, and * the netmap module may disappear before the * TX event is consumed. */ mtx_lock_spin(&kring->tx_event_lock); if (kring->tx_event) { SET_MBUF_DESTRUCTOR(kring->tx_event, NULL); } kring->tx_event = NULL; mtx_unlock_spin(&kring->tx_event_lock); } if (na->active_fds == 0) { nm_os_free(gna->mit); for_each_rx_kring(r, kring, na) { mbq_safe_fini(&kring->rx_queue); } for_each_tx_kring(r, kring, na) { mtx_destroy(&kring->tx_event_lock); if (kring->tx_pool == NULL) { continue; } for (i=0; inum_tx_desc; i++) { if (kring->tx_pool[i]) { m_freem(kring->tx_pool[i]); } } nm_os_free(kring->tx_pool); kring->tx_pool = NULL; } #ifdef RATE_GENERIC if (--rate_ctx.refcount == 0) { nm_prinf("del_timer()"); del_timer(&rate_ctx.timer); } #endif nm_prinf("Emulated adapter for %s deactivated", na->name); } return 0; } /* Enable/disable netmap mode for a generic network interface. */ static int generic_netmap_register(struct netmap_adapter *na, int enable) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct netmap_kring *kring = NULL; int error; int i, r; if (!na) { return EINVAL; } if (!enable) { /* This is actually an unregif. */ return generic_netmap_unregister(na); } if (na->active_fds == 0) { nm_prinf("Emulated adapter for %s activated", na->name); /* Do all memory allocations when (na->active_fds == 0), to * simplify error management. */ /* Allocate memory for mitigation support on all the rx queues. */ gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit)); if (!gna->mit) { nm_prerr("mitigation allocation failed"); error = ENOMEM; goto out; } for_each_rx_kring(r, kring, na) { /* Init mitigation support. */ nm_os_mitigation_init(&gna->mit[r], r, na); /* Initialize the rx queue, as generic_rx_handler() can * be called as soon as nm_os_catch_rx() returns. */ mbq_safe_init(&kring->rx_queue); } /* * Prepare mbuf pools (parallel to the tx rings), for packet * transmission. Don't preallocate the mbufs here, it's simpler * to leave this task to txsync. */ for_each_tx_kring(r, kring, na) { kring->tx_pool = NULL; } for_each_tx_kring(r, kring, na) { kring->tx_pool = nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *)); if (!kring->tx_pool) { nm_prerr("tx_pool allocation failed"); error = ENOMEM; goto free_tx_pools; } mtx_init(&kring->tx_event_lock, "tx_event_lock", NULL, MTX_SPIN); } } - for_each_rx_kring_h(r, kring, na) { - if (nm_kring_pending_on(kring)) { - nm_prinf("Emulated adapter: ring '%s' activated", kring->name); - kring->nr_mode = NKR_NETMAP_ON; - } - - } - for_each_tx_kring_h(r, kring, na) { - if (nm_kring_pending_on(kring)) { - nm_prinf("Emulated adapter: ring '%s' activated", kring->name); - kring->nr_mode = NKR_NETMAP_ON; - } - } + netmap_krings_mode_commit(na, /*onoff=*/1); for_each_tx_kring(r, kring, na) { /* Initialize tx_pool and tx_event. */ for (i=0; inum_tx_desc; i++) { kring->tx_pool[i] = NULL; } kring->tx_event = NULL; } if (na->active_fds == 0) { /* Prepare to intercept incoming traffic. */ error = nm_os_catch_rx(gna, 1); if (error) { nm_prerr("nm_os_catch_rx(1) failed (%d)", error); goto free_tx_pools; } /* Let netmap control the packet steering. */ error = nm_os_catch_tx(gna, 1); if (error) { nm_prerr("nm_os_catch_tx(1) failed (%d)", error); goto catch_rx; } na->na_flags |= NAF_NETMAP_ON; #ifdef RATE_GENERIC if (rate_ctx.refcount == 0) { nm_prinf("setup_timer()"); memset(&rate_ctx, 0, sizeof(rate_ctx)); setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { nm_prerr("Error: mod_timer()"); } } rate_ctx.refcount++; #endif /* RATE */ } return 0; /* Here (na->active_fds == 0) holds. */ catch_rx: nm_os_catch_rx(gna, 0); free_tx_pools: for_each_tx_kring(r, kring, na) { mtx_destroy(&kring->tx_event_lock); if (kring->tx_pool == NULL) { continue; } nm_os_free(kring->tx_pool); kring->tx_pool = NULL; } for_each_rx_kring(r, kring, na) { mbq_safe_fini(&kring->rx_queue); } nm_os_free(gna->mit); out: return error; } /* * Callback invoked when the device driver frees an mbuf used * by netmap to transmit a packet. This usually happens when * the NIC notifies the driver that transmission is completed. */ static void generic_mbuf_destructor(struct mbuf *m) { struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m)); struct netmap_kring *kring; unsigned int r = MBUF_TXQ(m); unsigned int r_orig = r; if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) { nm_prerr("Error: no netmap adapter on device %p", GEN_TX_MBUF_IFP(m)); return; } /* * First, clear the event mbuf. * In principle, the event 'm' should match the one stored * on ring 'r'. However we check it explicitely to stay * safe against lower layers (qdisc, driver, etc.) changing * MBUF_TXQ(m) under our feet. If the match is not found * on 'r', we try to see if it belongs to some other ring. */ for (;;) { bool match = false; kring = na->tx_rings[r]; mtx_lock_spin(&kring->tx_event_lock); if (kring->tx_event == m) { kring->tx_event = NULL; match = true; } mtx_unlock_spin(&kring->tx_event_lock); if (match) { if (r != r_orig) { nm_prlim(1, "event %p migrated: ring %u --> %u", m, r_orig, r); } break; } if (++r == na->num_tx_rings) r = 0; if (r == r_orig) { nm_prlim(1, "Cannot match event %p", m); return; } } /* Second, wake up clients. They will reclaim the event through * txsync. */ netmap_generic_irq(na, r, NULL); #ifdef __FreeBSD__ #if __FreeBSD_version <= 1200050 void_mbuf_dtor(m, NULL, NULL); #else /* __FreeBSD_version >= 1200051 */ void_mbuf_dtor(m); #endif /* __FreeBSD_version >= 1200051 */ #endif } /* Record completed transmissions and update hwtail. * * The oldest tx buffer not yet completed is at nr_hwtail + 1, * nr_hwcur is the first unsent buffer. */ static u_int generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc) { u_int const lim = kring->nkr_num_slots - 1; u_int nm_i = nm_next(kring->nr_hwtail, lim); u_int hwcur = kring->nr_hwcur; u_int n = 0; struct mbuf **tx_pool = kring->tx_pool; nm_prdis("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail); while (nm_i != hwcur) { /* buffers not completed */ struct mbuf *m = tx_pool[nm_i]; if (txqdisc) { if (m == NULL) { /* Nothing to do, this is going * to be replenished. */ nm_prlim(3, "Is this happening?"); } else if (MBUF_QUEUED(m)) { break; /* Not dequeued yet. */ } else if (MBUF_REFCNT(m) != 1) { /* This mbuf has been dequeued but is still busy * (refcount is 2). * Leave it to the driver and replenish. */ m_freem(m); tx_pool[nm_i] = NULL; } } else { if (unlikely(m == NULL)) { int event_consumed; /* This slot was used to place an event. */ mtx_lock_spin(&kring->tx_event_lock); event_consumed = (kring->tx_event == NULL); mtx_unlock_spin(&kring->tx_event_lock); if (!event_consumed) { /* The event has not been consumed yet, * still busy in the driver. */ break; } /* The event has been consumed, we can go * ahead. */ } else if (MBUF_REFCNT(m) != 1) { /* This mbuf is still busy: its refcnt is 2. */ break; } } n++; nm_i = nm_next(nm_i, lim); } kring->nr_hwtail = nm_prev(nm_i, lim); nm_prdis("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); return n; } /* Compute a slot index in the middle between inf and sup. */ static inline u_int ring_middle(u_int inf, u_int sup, u_int lim) { u_int n = lim + 1; u_int e; if (sup >= inf) { e = (sup + inf) / 2; } else { /* wrap around */ e = (sup + n + inf) / 2; if (e >= n) { e -= n; } } if (unlikely(e >= n)) { nm_prerr("This cannot happen"); e = 0; } return e; } static void generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) { u_int lim = kring->nkr_num_slots - 1; struct mbuf *m; u_int e; u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */ if (ntc == hwcur) { return; /* all buffers are free */ } /* * We have pending packets in the driver between hwtail+1 * and hwcur, and we have to chose one of these slot to * generate a notification. * There is a race but this is only called within txsync which * does a double check. */ #if 0 /* Choose a slot in the middle, so that we don't risk ending * up in a situation where the client continuously wake up, * fills one or a few TX slots and go to sleep again. */ e = ring_middle(ntc, hwcur, lim); #else /* Choose the first pending slot, to be safe against driver * reordering mbuf transmissions. */ e = ntc; #endif m = kring->tx_pool[e]; if (m == NULL) { /* An event is already in place. */ return; } mtx_lock_spin(&kring->tx_event_lock); if (kring->tx_event) { /* An event is already in place. */ mtx_unlock_spin(&kring->tx_event_lock); return; } SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); kring->tx_event = m; mtx_unlock_spin(&kring->tx_event_lock); kring->tx_pool[e] = NULL; nm_prdis("Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 ); /* Decrement the refcount. This will free it if we lose the race * with the driver. */ m_freem(m); smp_mb(); } /* * generic_netmap_txsync() transforms netmap buffers into mbufs * and passes them to the standard device driver * (ndo_start_xmit() or ifp->if_transmit() ). * On linux this is not done directly, but using dev_queue_xmit(), * since it implements the TX flow control (and takes some locks). */ static int generic_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ // j u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; u_int ring_nr = kring->ring_id; IFRATE(rate_ctx.new.txsync++); rmb(); /* * First part: process new packets to send. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ struct nm_os_gen_arg a; u_int event = -1; if (gna->txqdisc && nm_kr_txempty(kring)) { /* In txqdisc mode, we ask for a delayed notification, * but only when cur == hwtail, which means that the * client is going to block. */ event = ring_middle(nm_i, head, lim); nm_prdis("Place txqdisc event (hwcur=%u,event=%u," "head=%u,hwtail=%u)", nm_i, event, head, kring->nr_hwtail); } a.ifp = ifp; a.ring_nr = ring_nr; a.head = a.tail = NULL; while (nm_i != head) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; void *addr = NMB(na, slot); /* device-specific */ struct mbuf *m; int tx_ret; NM_CHECK_ADDR_LEN(na, addr, len); /* Tale a mbuf from the tx pool (replenishing the pool * entry if necessary) and copy in the user packet. */ m = kring->tx_pool[nm_i]; if (unlikely(m == NULL)) { kring->tx_pool[nm_i] = m = nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na)); if (m == NULL) { nm_prlim(2, "Failed to replenish mbuf"); /* Here we could schedule a timer which * retries to replenish after a while, * and notifies the client when it * manages to replenish some slots. In * any case we break early to avoid * crashes. */ break; } IFRATE(rate_ctx.new.txrepl++); } a.m = m; a.addr = addr; a.len = len; a.qevent = (nm_i == event); /* When not in txqdisc mode, we should ask * notifications when NS_REPORT is set, or roughly * every half ring. To optimize this, we set a * notification event when the client runs out of * TX ring space, or when transmission fails. In * the latter case we also break early. */ tx_ret = nm_os_generic_xmit_frame(&a); if (unlikely(tx_ret)) { if (!gna->txqdisc) { /* * No room for this mbuf in the device driver. * Request a notification FOR A PREVIOUS MBUF, * then call generic_netmap_tx_clean(kring) to do the * double check and see if we can free more buffers. * If there is space continue, else break; * NOTE: the double check is necessary if the problem * occurs in the txsync call after selrecord(). * Also, we need some way to tell the caller that not * all buffers were queued onto the device (this was * not a problem with native netmap driver where space * is preallocated). The bridge has a similar problem * and we solve it there by dropping the excess packets. */ generic_set_tx_event(kring, nm_i); if (generic_netmap_tx_clean(kring, gna->txqdisc)) { /* space now available */ continue; } else { break; } } /* In txqdisc mode, the netmap-aware qdisc * queue has the same length as the number of * netmap slots (N). Since tail is advanced * only when packets are dequeued, qdisc * queue overrun cannot happen, so * nm_os_generic_xmit_frame() did not fail * because of that. * However, packets can be dropped because * carrier is off, or because our qdisc is * being deactivated, or possibly for other * reasons. In these cases, we just let the * packet to be dropped. */ IFRATE(rate_ctx.new.txdrop++); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); IFRATE(rate_ctx.new.txpkt++); } if (a.head != NULL) { a.addr = NULL; nm_os_generic_xmit_frame(&a); } /* Update hwcur to the next slot to transmit. Here nm_i * is not necessarily head, we could break early. */ kring->nr_hwcur = nm_i; } /* * Second, reclaim completed buffers */ if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) { /* No more available slots? Set a notification event * on a netmap slot that will be cleaned in the future. * No doublecheck is performed, since txsync() will be * called twice by netmap_poll(). */ generic_set_tx_event(kring, nm_i); } generic_netmap_tx_clean(kring, gna->txqdisc); return 0; } /* * This handler is registered (through nm_os_catch_rx()) * within the attached network interface * in the RX subsystem, so that every mbuf passed up by * the driver can be stolen to the network stack. * Stolen packets are put in a queue where the * generic_netmap_rxsync() callback can extract them. * Returns 1 if the packet was stolen, 0 otherwise. */ int generic_rx_handler(struct ifnet *ifp, struct mbuf *m) { struct netmap_adapter *na = NA(ifp); struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct netmap_kring *kring; u_int work_done; u_int r = MBUF_RXQ(m); /* receive ring number */ if (r >= na->num_rx_rings) { r = r % na->num_rx_rings; } kring = na->rx_rings[r]; if (kring->nr_mode == NKR_NETMAP_OFF) { /* We must not intercept this mbuf. */ return 0; } /* limit the size of the queue */ if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) { /* This may happen when GRO/LRO features are enabled for * the NIC driver when the generic adapter does not * support RX scatter-gather. */ nm_prlim(2, "Warning: driver pushed up big packet " "(size=%d)", (int)MBUF_LEN(m)); m_freem(m); } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) { m_freem(m); } else { mbq_safe_enqueue(&kring->rx_queue, m); } if (netmap_generic_mit < 32768) { /* no rx mitigation, pass notification up */ netmap_generic_irq(na, r, &work_done); } else { /* same as send combining, filter notification if there is a * pending timer, otherwise pass it up and start a timer. */ if (likely(nm_os_mitigation_active(&gna->mit[r]))) { /* Record that there is some pending work. */ gna->mit[r].mit_pending = 1; } else { netmap_generic_irq(na, r, &work_done); nm_os_mitigation_start(&gna->mit[r]); } } /* We have intercepted the mbuf. */ return 1; } /* * generic_netmap_rxsync() extracts mbufs from the queue filled by * generic_netmap_rx_handler() and puts their content in the netmap * receive ring. * Access must be protected because the rx handler is asynchronous, */ static int generic_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_ring *ring = kring->ring; struct netmap_adapter *na = kring->na; u_int nm_i; /* index into the netmap ring */ //j, u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; /* Adapter-specific variables. */ u_int nm_buf_len = NETMAP_BUF_SIZE(na); struct mbq tmpq; struct mbuf *m; int avail; /* in bytes */ int mlen; int copy; if (head > lim) return netmap_ring_reinit(kring); IFRATE(rate_ctx.new.rxsync++); /* * First part: skip past packets that userspace has released. * This can possibly make room for the second part. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* Userspace has released some packets. */ for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); } kring->nr_hwcur = head; } /* * Second part: import newly received packets. */ if (!netmap_no_pendintr && !force_update) { return 0; } nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */ /* Compute the available space (in bytes) in this netmap ring. * The first slot that is not considered in is the one before * nr_hwcur. */ avail = nm_prev(kring->nr_hwcur, lim) - nm_i; if (avail < 0) avail += lim + 1; avail *= nm_buf_len; /* First pass: While holding the lock on the RX mbuf queue, * extract as many mbufs as they fit the available space, * and put them in a temporary queue. * To avoid performing a per-mbuf division (mlen / nm_buf_len) to * to update avail, we do the update in a while loop that we * also use to set the RX slots, but without performing the copy. */ mbq_init(&tmpq); mbq_lock(&kring->rx_queue); for (n = 0;; n++) { m = mbq_peek(&kring->rx_queue); if (!m) { /* No more packets from the driver. */ break; } mlen = MBUF_LEN(m); if (mlen > avail) { /* No more space in the ring. */ break; } mbq_dequeue(&kring->rx_queue); while (mlen) { copy = nm_buf_len; if (mlen < copy) { copy = mlen; } mlen -= copy; avail -= nm_buf_len; ring->slot[nm_i].len = copy; ring->slot[nm_i].flags = (mlen ? NS_MOREFRAG : 0); nm_i = nm_next(nm_i, lim); } mbq_enqueue(&tmpq, m); } mbq_unlock(&kring->rx_queue); /* Second pass: Drain the temporary queue, going over the used RX slots, * and perform the copy out of the RX queue lock. */ nm_i = kring->nr_hwtail; for (;;) { void *nmaddr; int ofs = 0; int morefrag; m = mbq_dequeue(&tmpq); if (!m) { break; } do { nmaddr = NMB(na, &ring->slot[nm_i]); /* We only check the address here on generic rx rings. */ if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */ m_freem(m); mbq_purge(&tmpq); mbq_fini(&tmpq); return netmap_ring_reinit(kring); } copy = ring->slot[nm_i].len; m_copydata(m, ofs, copy, nmaddr); ofs += copy; morefrag = ring->slot[nm_i].flags & NS_MOREFRAG; nm_i = nm_next(nm_i, lim); } while (morefrag); m_freem(m); } mbq_fini(&tmpq); if (n) { kring->nr_hwtail = nm_i; IFRATE(rate_ctx.new.rxpkt += n); } kring->nr_kflags &= ~NKR_PENDINTR; return 0; } static void generic_netmap_dtor(struct netmap_adapter *na) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; struct ifnet *ifp = netmap_generic_getifp(gna); struct netmap_adapter *prev_na = gna->prev; if (prev_na != NULL) { netmap_adapter_put(prev_na); if (nm_iszombie(na)) { /* * The driver has been removed without releasing * the reference so we need to do it here. */ netmap_adapter_put(prev_na); } nm_prinf("Native netmap adapter %p restored", prev_na); } NM_RESTORE_NA(ifp, prev_na); /* * netmap_detach_common(), that it's called after this function, * overrides WNA(ifp) if na->ifp is not NULL. */ na->ifp = NULL; nm_prinf("Emulated netmap adapter for %s destroyed", na->name); } int na_is_generic(struct netmap_adapter *na) { return na->nm_register == generic_netmap_register; } /* * generic_netmap_attach() makes it possible to use netmap on * a device without native netmap support. * This is less performant than native support but potentially * faster than raw sockets or similar schemes. * * In this "emulated" mode, netmap rings do not necessarily * have the same size as those in the NIC. We use a default * value and possibly override it if the OS has ways to fetch the * actual configuration. */ int generic_netmap_attach(struct ifnet *ifp) { struct netmap_adapter *na; struct netmap_generic_adapter *gna; int retval; u_int num_tx_desc, num_rx_desc; #ifdef __FreeBSD__ if (ifp->if_type == IFT_LOOP) { nm_prerr("if_loop is not supported by %s", __func__); return EINVAL; } #endif if (NM_NA_CLASH(ifp)) { /* If NA(ifp) is not null but there is no valid netmap * adapter it means that someone else is using the same * pointer (e.g. ax25_ptr on linux). This happens for * instance when also PF_RING is in use. */ nm_prerr("Error: netmap adapter hook is busy"); return EBUSY; } num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */ if (num_tx_desc == 0 || num_rx_desc == 0) { nm_prerr("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); return EINVAL; } gna = nm_os_malloc(sizeof(*gna)); if (gna == NULL) { nm_prerr("no memory on attach, give up"); return ENOMEM; } na = (struct netmap_adapter *)gna; strlcpy(na->name, ifp->if_xname, sizeof(na->name)); na->ifp = ifp; na->num_tx_desc = num_tx_desc; na->num_rx_desc = num_rx_desc; na->rx_buf_maxsize = 32768; na->nm_register = &generic_netmap_register; na->nm_txsync = &generic_netmap_txsync; na->nm_rxsync = &generic_netmap_rxsync; na->nm_dtor = &generic_netmap_dtor; /* when using generic, NAF_NETMAP_ON is set so we force * NAF_SKIP_INTR to use the regular interrupt handler */ na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; nm_prdis("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", ifp->num_tx_queues, ifp->real_num_tx_queues, ifp->tx_queue_len); nm_prdis("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", ifp->num_rx_queues, ifp->real_num_rx_queues); nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); retval = netmap_attach_common(na); if (retval) { nm_os_free(gna); return retval; } if (NM_NA_VALID(ifp)) { gna->prev = NA(ifp); /* save old na */ netmap_adapter_get(gna->prev); } NM_ATTACH_NA(ifp, na); nm_os_generic_set_features(gna); nm_prinf("Emulated adapter for %s created (prev was %p)", na->name, gna->prev); return retval; } Index: stable/12/sys/dev/netmap/netmap_kern.h =================================================================== --- stable/12/sys/dev/netmap/netmap_kern.h (revision 344045) +++ stable/12/sys/dev/netmap/netmap_kern.h (revision 344046) @@ -1,2399 +1,2402 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo * Copyright (C) 2013-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * The header contains the definitions of constants and function * prototypes used only in kernelspace. */ #ifndef _NET_NETMAP_KERN_H_ #define _NET_NETMAP_KERN_H_ #if defined(linux) #if defined(CONFIG_NETMAP_EXTMEM) #define WITH_EXTMEM #endif #if defined(CONFIG_NETMAP_VALE) #define WITH_VALE #endif #if defined(CONFIG_NETMAP_PIPE) #define WITH_PIPES #endif #if defined(CONFIG_NETMAP_MONITOR) #define WITH_MONITOR #endif #if defined(CONFIG_NETMAP_GENERIC) #define WITH_GENERIC #endif #if defined(CONFIG_NETMAP_PTNETMAP) #define WITH_PTNETMAP #endif #if defined(CONFIG_NETMAP_SINK) #define WITH_SINK #endif #if defined(CONFIG_NETMAP_NULL) #define WITH_NMNULL #endif #elif defined (_WIN32) #define WITH_VALE // comment out to disable VALE support #define WITH_PIPES #define WITH_MONITOR #define WITH_GENERIC #define WITH_NMNULL #else /* neither linux nor windows */ #define WITH_VALE // comment out to disable VALE support #define WITH_PIPES #define WITH_MONITOR #define WITH_GENERIC #define WITH_PTNETMAP /* ptnetmap guest support */ #define WITH_EXTMEM #define WITH_NMNULL #endif #if defined(__FreeBSD__) #include #define likely(x) __builtin_expect((long)!!(x), 1L) #define unlikely(x) __builtin_expect((long)!!(x), 0L) #define __user #define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */ #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */ #define NM_MTX_INIT(m) sx_init(&(m), #m) #define NM_MTX_DESTROY(m) sx_destroy(&(m)) #define NM_MTX_LOCK(m) sx_xlock(&(m)) #define NM_MTX_SPINLOCK(m) while (!sx_try_xlock(&(m))) ; #define NM_MTX_UNLOCK(m) sx_xunlock(&(m)) #define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED) #define NM_SELINFO_T struct nm_selinfo #define NM_SELRECORD_T struct thread #define MBUF_LEN(m) ((m)->m_pkthdr.len) #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid) #define MBUF_TRANSMIT(na, ifp, m) ((na)->if_transmit(ifp, m)) #define GEN_TX_MBUF_IFP(m) ((m)->m_pkthdr.rcvif) #define NM_ATOMIC_T volatile int /* required by atomic/bitops.h */ /* atomic operations */ #include #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) #if __FreeBSD_version >= 1100030 #define WNA(_ifp) (_ifp)->if_netmap #else /* older FreeBSD */ #define WNA(_ifp) (_ifp)->if_pspare[0] #endif /* older FreeBSD */ #if __FreeBSD_version >= 1100005 struct netmap_adapter *netmap_getna(if_t ifp); #endif #if __FreeBSD_version >= 1100027 #define MBUF_REFCNT(m) ((m)->m_ext.ext_count) #define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x #else #define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1) #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x #endif #define MBUF_QUEUED(m) 1 struct nm_selinfo { struct selinfo si; struct mtx m; }; struct hrtimer { /* Not used in FreeBSD. */ }; #define NM_BNS_GET(b) #define NM_BNS_PUT(b) #elif defined (linux) #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h #define NM_SELINFO_T wait_queue_head_t #define MBUF_LEN(m) ((m)->len) #define MBUF_TRANSMIT(na, ifp, m) \ ({ \ /* Avoid infinite recursion with generic. */ \ m->priority = NM_MAGIC_PRIORITY_TX; \ (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \ 0; \ }) /* See explanation in nm_os_generic_xmit_frame. */ #define GEN_TX_MBUF_IFP(m) ((struct ifnet *)skb_shinfo(m)->destructor_arg) #define NM_ATOMIC_T volatile long unsigned int #define NM_MTX_T struct mutex /* OS-specific sleepable lock */ #define NM_MTX_INIT(m) mutex_init(&(m)) #define NM_MTX_DESTROY(m) do { (void)(m); } while (0) #define NM_MTX_LOCK(m) mutex_lock(&(m)) #define NM_MTX_UNLOCK(m) mutex_unlock(&(m)) #define NM_MTX_ASSERT(m) mutex_is_locked(&(m)) #ifndef DEV_NETMAP #define DEV_NETMAP #endif /* DEV_NETMAP */ #elif defined (__APPLE__) #warning apple support is incomplete. #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define NM_LOCK_T IOLock * #define NM_SELINFO_T struct selinfo #define MBUF_LEN(m) ((m)->m_pkthdr.len) #elif defined (_WIN32) #include "../../../WINDOWS/win_glue.h" #define NM_SELRECORD_T IO_STACK_LOCATION #define NM_SELINFO_T win_SELINFO // see win_glue.h #define NM_LOCK_T win_spinlock_t // see win_glue.h #define NM_MTX_T KGUARDED_MUTEX /* OS-specific mutex (sleepable) */ #define NM_MTX_INIT(m) KeInitializeGuardedMutex(&m); #define NM_MTX_DESTROY(m) do { (void)(m); } while (0) #define NM_MTX_LOCK(m) KeAcquireGuardedMutex(&(m)) #define NM_MTX_UNLOCK(m) KeReleaseGuardedMutex(&(m)) #define NM_MTX_ASSERT(m) assert(&m.Count>0) //These linknames are for the NDIS driver #define NETMAP_NDIS_LINKNAME_STRING L"\\DosDevices\\NMAPNDIS" #define NETMAP_NDIS_NTDEVICE_STRING L"\\Device\\NMAPNDIS" //Definition of internal driver-to-driver ioctl codes #define NETMAP_KERNEL_XCHANGE_POINTERS _IO('i', 180) #define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL _IO_direct('i', 195) typedef struct hrtimer{ KTIMER timer; BOOLEAN active; KDPC deferred_proc; }; /* MSVC does not have likely/unlikely support */ #ifdef _MSC_VER #define likely(x) (x) #define unlikely(x) (x) #else #define likely(x) __builtin_expect((long)!!(x), 1L) #define unlikely(x) __builtin_expect((long)!!(x), 0L) #endif //_MSC_VER #else #error unsupported platform #endif /* end - platform-specific code */ #ifndef _WIN32 /* support for emulated sysctl */ #define SYSBEGIN(x) #define SYSEND #endif /* _WIN32 */ #define NM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x)) #define NMG_LOCK_T NM_MTX_T #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock) #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock) #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock) #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock) #define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock) #if defined(__FreeBSD__) #define nm_prerr_int printf #define nm_prinf_int printf #elif defined (_WIN32) #define nm_prerr_int DbgPrint #define nm_prinf_int DbgPrint #elif defined(linux) #define nm_prerr_int(fmt, arg...) printk(KERN_ERR fmt, ##arg) #define nm_prinf_int(fmt, arg...) printk(KERN_INFO fmt, ##arg) #endif #define nm_prinf(format, ...) \ do { \ struct timeval __xxts; \ microtime(&__xxts); \ nm_prinf_int("%03d.%06d [%4d] %-25s " format "\n",\ (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ __LINE__, __FUNCTION__, ##__VA_ARGS__); \ } while (0) #define nm_prerr(format, ...) \ do { \ struct timeval __xxts; \ microtime(&__xxts); \ nm_prerr_int("%03d.%06d [%4d] %-25s " format "\n",\ (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ __LINE__, __FUNCTION__, ##__VA_ARGS__); \ } while (0) -/* Disabled printf (used to be ND). */ +/* Disabled printf (used to be nm_prdis). */ #define nm_prdis(format, ...) /* Rate limited, lps indicates how many per second. */ #define nm_prlim(lps, format, ...) \ do { \ static int t0, __cnt; \ if (t0 != time_second) { \ t0 = time_second; \ __cnt = 0; \ } \ if (__cnt++ < lps) \ nm_prinf(format, ##__VA_ARGS__); \ } while (0) -/* Old macros. */ -#define ND nm_prdis -#define D nm_prerr -#define RD nm_prlim - struct netmap_adapter; struct nm_bdg_fwd; struct nm_bridge; struct netmap_priv_d; struct nm_bdg_args; /* os-specific NM_SELINFO_T initialzation/destruction functions */ void nm_os_selinfo_init(NM_SELINFO_T *); void nm_os_selinfo_uninit(NM_SELINFO_T *); const char *nm_dump_buf(char *p, int len, int lim, char *dst); void nm_os_selwakeup(NM_SELINFO_T *si); void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si); int nm_os_ifnet_init(void); void nm_os_ifnet_fini(void); void nm_os_ifnet_lock(void); void nm_os_ifnet_unlock(void); unsigned nm_os_ifnet_mtu(struct ifnet *ifp); void nm_os_get_module(void); void nm_os_put_module(void); void netmap_make_zombie(struct ifnet *); void netmap_undo_zombie(struct ifnet *); /* os independent alloc/realloc/free */ void *nm_os_malloc(size_t); void *nm_os_vmalloc(size_t); void *nm_os_realloc(void *, size_t new_size, size_t old_size); void nm_os_free(void *); void nm_os_vfree(void *); /* os specific attach/detach enter/exit-netmap-mode routines */ void nm_os_onattach(struct ifnet *); void nm_os_ondetach(struct ifnet *); void nm_os_onenter(struct ifnet *); void nm_os_onexit(struct ifnet *); /* passes a packet up to the host stack. * If the packet is sent (or dropped) immediately it returns NULL, * otherwise it links the packet to prev and returns m. * In this case, a final call with m=NULL and prev != NULL will send up * the entire chain to the host stack. */ void *nm_os_send_up(struct ifnet *, struct mbuf *m, struct mbuf *prev); int nm_os_mbuf_has_seg_offld(struct mbuf *m); int nm_os_mbuf_has_csum_offld(struct mbuf *m); #include "netmap_mbq.h" extern NMG_LOCK_T netmap_global_lock; enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX }; static __inline const char* nm_txrx2str(enum txrx t) { return (t== NR_RX ? "RX" : "TX"); } static __inline enum txrx nm_txrx_swap(enum txrx t) { return (t== NR_RX ? NR_TX : NR_RX); } #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++) #ifdef WITH_MONITOR struct netmap_zmon_list { struct netmap_kring *next; struct netmap_kring *prev; }; #endif /* WITH_MONITOR */ /* * private, kernel view of a ring. Keeps track of the status of * a ring across system calls. * * nr_hwcur index of the next buffer to refill. * It corresponds to ring->head * at the time the system call returns. * * nr_hwtail index of the first buffer owned by the kernel. * On RX, hwcur->hwtail are receive buffers * not yet released. hwcur is advanced following * ring->head, hwtail is advanced on incoming packets, * and a wakeup is generated when hwtail passes ring->cur * On TX, hwcur->rcur have been filled by the sender * but not sent yet to the NIC; rcur->hwtail are available * for new transmissions, and hwtail->hwcur-1 are pending * transmissions not yet acknowledged. * * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. * This is so that, on a reset, buffers owned by userspace are not * modified by the kernel. In particular: * RX rings: the next empty buffer (hwtail + hwofs) coincides with * the next empty buffer as known by the hardware (next_to_check or so). * TX rings: hwcur + hwofs coincides with next_to_send * * The following fields are used to implement lock-free copy of packets * from input to output ports in VALE switch: * nkr_hwlease buffer after the last one being copied. * A writer in nm_bdg_flush reserves N buffers * from nr_hwlease, advances it, then does the * copy outside the lock. * In RX rings (used for VALE ports), * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1 * In TX rings (used for NIC or host stack ports) * nkr_hwcur <= nkr_hwlease < nkr_hwtail * nkr_leases array of nkr_num_slots where writers can report * completion of their block. NR_NOSLOT (~0) indicates * that the writer has not finished yet * nkr_lease_idx index of next free slot in nr_leases, to be assigned * * The kring is manipulated by txsync/rxsync and generic netmap function. * * Concurrent rxsync or txsync on the same ring are prevented through * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need * for NIC rings, and for TX rings attached to the host stack. * * RX rings attached to the host stack use an mbq (rx_queue) on both * rxsync_from_host() and netmap_transmit(). The mbq is protected * by its internal lock. * * RX rings attached to the VALE switch are accessed by both senders * and receiver. They are protected through the q_lock on the RX ring. */ struct netmap_kring { struct netmap_ring *ring; uint32_t nr_hwcur; /* should be nr_hwhead */ uint32_t nr_hwtail; /* * Copies of values in user rings, so we do not need to look * at the ring (which could be modified). These are set in the * *sync_prologue()/finalize() routines. */ uint32_t rhead; uint32_t rcur; uint32_t rtail; uint32_t nr_kflags; /* private driver flags */ #define NKR_PENDINTR 0x1 // Pending interrupt. #define NKR_EXCLUSIVE 0x2 /* exclusive binding */ #define NKR_FORWARD 0x4 /* (host ring only) there are packets to forward */ #define NKR_NEEDRING 0x8 /* ring needed even if users==0 * (used internally by pipes and * by ptnetmap host ports) */ #define NKR_NOINTR 0x10 /* don't use interrupts on this ring */ #define NKR_FAKERING 0x20 /* don't allocate/free buffers */ uint32_t nr_mode; uint32_t nr_pending_mode; #define NKR_NETMAP_OFF 0x0 #define NKR_NETMAP_ON 0x1 uint32_t nkr_num_slots; /* * On a NIC reset, the NIC ring indexes may be reset but the * indexes in the netmap rings remain the same. nkr_hwofs * keeps track of the offset between the two. */ int32_t nkr_hwofs; /* last_reclaim is opaque marker to help reduce the frequency * of operations such as reclaiming tx buffers. A possible use * is set it to ticks and do the reclaim only once per tick. */ uint64_t last_reclaim; NM_SELINFO_T si; /* poll/select wait queue */ NM_LOCK_T q_lock; /* protects kring and ring. */ NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ /* the adapter the owns this kring */ struct netmap_adapter *na; /* the adapter that wants to be notified when this kring has * new slots avaialable. This is usually the same as the above, * but wrappers may let it point to themselves */ struct netmap_adapter *notify_na; /* The following fields are for VALE switch support */ struct nm_bdg_fwd *nkr_ft; uint32_t *nkr_leases; #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */ uint32_t nkr_hwlease; uint32_t nkr_lease_idx; /* while nkr_stopped is set, no new [tr]xsync operations can * be started on this kring. * This is used by netmap_disable_all_rings() * to find a synchronization point where critical data * structures pointed to by the kring can be added or removed */ volatile int nkr_stopped; /* Support for adapters without native netmap support. * On tx rings we preallocate an array of tx buffers * (same size as the netmap ring), on rx rings we * store incoming mbufs in a queue that is drained by * a rxsync. */ struct mbuf **tx_pool; struct mbuf *tx_event; /* TX event used as a notification */ NM_LOCK_T tx_event_lock; /* protects the tx_event mbuf */ struct mbq rx_queue; /* intercepted rx mbufs. */ uint32_t users; /* existing bindings for this ring */ uint32_t ring_id; /* kring identifier */ enum txrx tx; /* kind of ring (tx or rx) */ char name[64]; /* diagnostic */ /* [tx]sync callback for this kring. * The default nm_kring_create callback (netmap_krings_create) * sets the nm_sync callback of each hardware tx(rx) kring to * the corresponding nm_txsync(nm_rxsync) taken from the * netmap_adapter; moreover, it sets the sync callback * of the host tx(rx) ring to netmap_txsync_to_host * (netmap_rxsync_from_host). * * Overrides: the above configuration is not changed by * any of the nm_krings_create callbacks. */ int (*nm_sync)(struct netmap_kring *kring, int flags); int (*nm_notify)(struct netmap_kring *kring, int flags); #ifdef WITH_PIPES struct netmap_kring *pipe; /* if this is a pipe ring, * pointer to the other end */ uint32_t pipe_tail; /* hwtail updated by the other end */ #endif /* WITH_PIPES */ int (*save_notify)(struct netmap_kring *kring, int flags); #ifdef WITH_MONITOR /* array of krings that are monitoring this kring */ struct netmap_kring **monitors; uint32_t max_monitors; /* current size of the monitors array */ uint32_t n_monitors; /* next unused entry in the monitor array */ uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */ uint32_t mon_tail; /* last seen slot on rx */ /* circular list of zero-copy monitors */ struct netmap_zmon_list zmon_list[NR_TXRX]; /* * Monitors work by intercepting the sync and notify callbacks of the * monitored krings. This is implemented by replacing the pointers * above and saving the previous ones in mon_* pointers below */ int (*mon_sync)(struct netmap_kring *kring, int flags); int (*mon_notify)(struct netmap_kring *kring, int flags); #endif } #ifdef _WIN32 __declspec(align(64)); #else __attribute__((__aligned__(64))); #endif /* return 1 iff the kring needs to be turned on */ static inline int nm_kring_pending_on(struct netmap_kring *kring) { return kring->nr_pending_mode == NKR_NETMAP_ON && kring->nr_mode == NKR_NETMAP_OFF; } /* return 1 iff the kring needs to be turned off */ static inline int nm_kring_pending_off(struct netmap_kring *kring) { return kring->nr_pending_mode == NKR_NETMAP_OFF && kring->nr_mode == NKR_NETMAP_ON; } /* return the next index, with wraparound */ static inline uint32_t nm_next(uint32_t i, uint32_t lim) { return unlikely (i == lim) ? 0 : i + 1; } /* return the previous index, with wraparound */ static inline uint32_t nm_prev(uint32_t i, uint32_t lim) { return unlikely (i == 0) ? lim : i - 1; } /* * * Here is the layout for the Rx and Tx rings. RxRING TxRING +-----------------+ +-----------------+ | | | | | free | | free | +-----------------+ +-----------------+ head->| owned by user |<-hwcur | not sent to nic |<-hwcur | | | yet | +-----------------+ | | cur->| available to | | | | user, not read | +-----------------+ | yet | cur->| (being | | | | prepared) | | | | | +-----------------+ + ------ + tail->| |<-hwtail | |<-hwlease | (being | ... | | ... | prepared) | ... | | ... +-----------------+ ... | | ... | |<-hwlease +-----------------+ | | tail->| |<-hwtail | | | | | | | | | | | | +-----------------+ +-----------------+ * The cur/tail (user view) and hwcur/hwtail (kernel view) * are used in the normal operation of the card. * * When a ring is the output of a switch port (Rx ring for * a VALE port, Tx ring for the host stack or NIC), slots * are reserved in blocks through 'hwlease' which points * to the next unused slot. * On an Rx ring, hwlease is always after hwtail, * and completions cause hwtail to advance. * On a Tx ring, hwlease is always between cur and hwtail, * and completions cause cur to advance. * * nm_kr_space() returns the maximum number of slots that * can be assigned. * nm_kr_lease() reserves the required number of buffers, * advances nkr_hwlease and also returns an entry in * a circular array where completions should be reported. */ struct lut_entry; #ifdef __FreeBSD__ #define plut_entry lut_entry #endif struct netmap_lut { struct lut_entry *lut; struct plut_entry *plut; uint32_t objtotal; /* max buffer index */ uint32_t objsize; /* buffer size */ }; struct netmap_vp_adapter; // forward struct nm_bridge; /* Struct to be filled by nm_config callbacks. */ struct nm_config_info { unsigned num_tx_rings; unsigned num_rx_rings; unsigned num_tx_descs; unsigned num_rx_descs; unsigned rx_buf_maxsize; }; /* * default type for the magic field. * May be overriden in glue code. */ #ifndef NM_OS_MAGIC #define NM_OS_MAGIC uint32_t #endif /* !NM_OS_MAGIC */ /* * The "struct netmap_adapter" extends the "struct adapter" * (or equivalent) device descriptor. * It contains all base fields needed to support netmap operation. * There are in fact different types of netmap adapters * (native, generic, VALE switch...) so a netmap_adapter is * just the first field in the derived type. */ struct netmap_adapter { /* * On linux we do not have a good way to tell if an interface * is netmap-capable. So we always use the following trick: * NA(ifp) points here, and the first entry (which hopefully * always exists and is at least 32 bits) contains a magic * value which we can use to detect that the interface is good. */ NM_OS_MAGIC magic; uint32_t na_flags; /* enabled, and other flags */ #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. * useful during initialization */ #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when * forwarding packets coming from this * interface */ #define NAF_MEM_OWNER 8 /* the adapter uses its own memory area * that cannot be changed */ #define NAF_NATIVE 16 /* the adapter is native. * Virtual ports (non persistent vale ports, * pipes, monitors...) should never use * this flag. */ #define NAF_NETMAP_ON 32 /* netmap is active (either native or * emulated). Where possible (e.g. FreeBSD) * IFCAP_NETMAP also mirrors this flag. */ #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */ #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */ /* free */ #define NAF_MOREFRAG 512 /* the adapter supports NS_MOREFRAG */ #define NAF_ZOMBIE (1U<<30) /* the nic driver has been unloaded */ #define NAF_BUSY (1U<<31) /* the adapter is used internally and * cannot be registered from userspace */ int active_fds; /* number of user-space descriptors using this interface, which is equal to the number of struct netmap_if objs in the mapped region. */ u_int num_rx_rings; /* number of adapter receive rings */ u_int num_tx_rings; /* number of adapter transmit rings */ u_int num_host_rx_rings; /* number of host receive rings */ u_int num_host_tx_rings; /* number of host transmit rings */ u_int num_tx_desc; /* number of descriptor in each queue */ u_int num_rx_desc; /* tx_rings and rx_rings are private but allocated as a * contiguous chunk of memory. Each array has N+K entries, * N for the hardware rings and K for the host rings. */ struct netmap_kring **tx_rings; /* array of TX rings. */ struct netmap_kring **rx_rings; /* array of RX rings. */ void *tailroom; /* space below the rings array */ /* (used for leases) */ NM_SELINFO_T si[NR_TXRX]; /* global wait queues */ /* count users of the global wait queues */ int si_users[NR_TXRX]; void *pdev; /* used to store pci device */ /* copy of if_qflush and if_transmit pointers, to intercept * packets from the network stack when netmap is active. */ int (*if_transmit)(struct ifnet *, struct mbuf *); /* copy of if_input for netmap_send_up() */ void (*if_input)(struct ifnet *, struct mbuf *); /* Back reference to the parent ifnet struct. Used for * hardware ports (emulated netmap included). */ struct ifnet *ifp; /* adapter is ifp->if_softc */ /*---- callbacks for this netmap adapter -----*/ /* * nm_dtor() is the cleanup routine called when destroying * the adapter. * Called with NMG_LOCK held. * * nm_register() is called on NIOCREGIF and close() to enter * or exit netmap mode on the NIC * Called with NNG_LOCK held. * * nm_txsync() pushes packets to the underlying hw/switch * * nm_rxsync() collects packets from the underlying hw/switch * * nm_config() returns configuration information from the OS * Called with NMG_LOCK held. * * nm_krings_create() create and init the tx_rings and * rx_rings arrays of kring structures. In particular, * set the nm_sync callbacks for each ring. * There is no need to also allocate the corresponding * netmap_rings, since netmap_mem_rings_create() will always * be called to provide the missing ones. * Called with NNG_LOCK held. * * nm_krings_delete() cleanup and delete the tx_rings and rx_rings * arrays * Called with NMG_LOCK held. * * nm_notify() is used to act after data have become available * (or the stopped state of the ring has changed) * For hw devices this is typically a selwakeup(), * but for NIC/host ports attached to a switch (or vice-versa) * we also need to invoke the 'txsync' code downstream. * This callback pointer is actually used only to initialize * kring->nm_notify. * Return values are the same as for netmap_rx_irq(). */ void (*nm_dtor)(struct netmap_adapter *); int (*nm_register)(struct netmap_adapter *, int onoff); void (*nm_intr)(struct netmap_adapter *, int onoff); int (*nm_txsync)(struct netmap_kring *kring, int flags); int (*nm_rxsync)(struct netmap_kring *kring, int flags); int (*nm_notify)(struct netmap_kring *kring, int flags); #define NAF_FORCE_READ 1 #define NAF_FORCE_RECLAIM 2 #define NAF_CAN_FORWARD_DOWN 4 /* return configuration information */ int (*nm_config)(struct netmap_adapter *, struct nm_config_info *info); int (*nm_krings_create)(struct netmap_adapter *); void (*nm_krings_delete)(struct netmap_adapter *); /* * nm_bdg_attach() initializes the na_vp field to point * to an adapter that can be attached to a VALE switch. If the * current adapter is already a VALE port, na_vp is simply a cast; * otherwise, na_vp points to a netmap_bwrap_adapter. * If applicable, this callback also initializes na_hostvp, * that can be used to connect the adapter host rings to the * switch. * Called with NMG_LOCK held. * * nm_bdg_ctl() is called on the actual attach/detach to/from * to/from the switch, to perform adapter-specific * initializations * Called with NMG_LOCK held. */ int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *, struct nm_bridge *); int (*nm_bdg_ctl)(struct nmreq_header *, struct netmap_adapter *); /* adapter used to attach this adapter to a VALE switch (if any) */ struct netmap_vp_adapter *na_vp; /* adapter used to attach the host rings of this adapter * to a VALE switch (if any) */ struct netmap_vp_adapter *na_hostvp; /* standard refcount to control the lifetime of the adapter * (it should be equal to the lifetime of the corresponding ifp) */ int na_refcount; /* memory allocator (opaque) * We also cache a pointer to the lut_entry for translating * buffer addresses, the total number of buffers and the buffer size. */ struct netmap_mem_d *nm_mem; struct netmap_mem_d *nm_mem_prev; struct netmap_lut na_lut; /* additional information attached to this adapter * by other netmap subsystems. Currently used by * bwrap, LINUX/v1000 and ptnetmap */ void *na_private; /* array of pipes that have this adapter as a parent */ struct netmap_pipe_adapter **na_pipes; int na_next_pipe; /* next free slot in the array */ int na_max_pipes; /* size of the array */ /* Offset of ethernet header for each packet. */ u_int virt_hdr_len; /* Max number of bytes that the NIC can store in the buffer * referenced by each RX descriptor. This translates to the maximum * bytes that a single netmap slot can reference. Larger packets * require NS_MOREFRAG support. */ unsigned rx_buf_maxsize; char name[NETMAP_REQ_IFNAMSIZ]; /* used at least by pipes */ #ifdef WITH_MONITOR unsigned long monitor_id; /* debugging */ #endif }; static __inline u_int nma_get_ndesc(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc); } static __inline void nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v) { if (t == NR_TX) na->num_tx_desc = v; else na->num_rx_desc = v; } static __inline u_int nma_get_nrings(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings); } static __inline u_int nma_get_host_nrings(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->num_host_tx_rings : na->num_host_rx_rings); } static __inline void nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v) { if (t == NR_TX) na->num_tx_rings = v; else na->num_rx_rings = v; } static __inline void nma_set_host_nrings(struct netmap_adapter *na, enum txrx t, u_int v) { if (t == NR_TX) na->num_host_tx_rings = v; else na->num_host_rx_rings = v; } static __inline struct netmap_kring** NMR(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->tx_rings : na->rx_rings); } int nma_intr_enable(struct netmap_adapter *na, int onoff); /* * If the NIC is owned by the kernel * (i.e., bridge), neither another bridge nor user can use it; * if the NIC is owned by a user, only users can share it. * Evaluation must be done under NMG_LOCK(). */ #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY) #define NETMAP_OWNED_BY_ANY(na) \ (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0)) /* * derived netmap adapters for various types of ports */ struct netmap_vp_adapter { /* VALE software port */ struct netmap_adapter up; /* * Bridge support: * * bdg_port is the port number used in the bridge; * na_bdg points to the bridge this NA is attached to. */ int bdg_port; struct nm_bridge *na_bdg; int retry; int autodelete; /* remove the ifp on last reference */ /* Maximum Frame Size, used in bdg_mismatch_datapath() */ u_int mfs; /* Last source MAC on this port */ uint64_t last_smac; }; struct netmap_hw_adapter { /* physical device */ struct netmap_adapter up; #ifdef linux struct net_device_ops nm_ndo; struct ethtool_ops nm_eto; #endif const struct ethtool_ops* save_ethtool; int (*nm_hw_register)(struct netmap_adapter *, int onoff); }; #ifdef WITH_GENERIC /* Mitigation support. */ struct nm_generic_mit { struct hrtimer mit_timer; int mit_pending; int mit_ring_idx; /* index of the ring being mitigated */ struct netmap_adapter *mit_na; /* backpointer */ }; struct netmap_generic_adapter { /* emulated device */ struct netmap_hw_adapter up; /* Pointer to a previously used netmap adapter. */ struct netmap_adapter *prev; /* Emulated netmap adapters support: * - save_if_input saves the if_input hook (FreeBSD); * - mit implements rx interrupt mitigation; */ void (*save_if_input)(struct ifnet *, struct mbuf *); struct nm_generic_mit *mit; #ifdef linux netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *); #endif /* Is the adapter able to use multiple RX slots to scatter * each packet pushed up by the driver? */ int rxsg; /* Is the transmission path controlled by a netmap-aware * device queue (i.e. qdisc on linux)? */ int txqdisc; }; #endif /* WITH_GENERIC */ static __inline u_int netmap_real_rings(struct netmap_adapter *na, enum txrx t) { return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS) * nma_get_host_nrings(na, t); } /* account for fake rings */ static __inline u_int netmap_all_rings(struct netmap_adapter *na, enum txrx t) { return max(nma_get_nrings(na, t) + 1, netmap_real_rings(na, t)); } int netmap_default_bdg_attach(const char *name, struct netmap_adapter *na, struct nm_bridge *); struct nm_bdg_polling_state; /* * Bridge wrapper for non VALE ports attached to a VALE switch. * * The real device must already have its own netmap adapter (hwna). * The bridge wrapper and the hwna adapter share the same set of * netmap rings and buffers, but they have two separate sets of * krings descriptors, with tx/rx meanings swapped: * * netmap * bwrap krings rings krings hwna * +------+ +------+ +-----+ +------+ +------+ * |tx_rings->| |\ /| |----| |<-tx_rings| * | | +------+ \ / +-----+ +------+ | | * | | X | | * | | / \ | | * | | +------+/ \+-----+ +------+ | | * |rx_rings->| | | |----| |<-rx_rings| * | | +------+ +-----+ +------+ | | * +------+ +------+ * * - packets coming from the bridge go to the brwap rx rings, * which are also the hwna tx rings. The bwrap notify callback * will then complete the hwna tx (see netmap_bwrap_notify). * * - packets coming from the outside go to the hwna rx rings, * which are also the bwrap tx rings. The (overwritten) hwna * notify method will then complete the bridge tx * (see netmap_bwrap_intr_notify). * * The bridge wrapper may optionally connect the hwna 'host' rings * to the bridge. This is done by using a second port in the * bridge and connecting it to the 'host' netmap_vp_adapter * contained in the netmap_bwrap_adapter. The brwap host adapter * cross-links the hwna host rings in the same way as shown above. * * - packets coming from the bridge and directed to the host stack * are handled by the bwrap host notify callback * (see netmap_bwrap_host_notify) * * - packets coming from the host stack are still handled by the * overwritten hwna notify callback (netmap_bwrap_intr_notify), * but are diverted to the host adapter depending on the ring number. * */ struct netmap_bwrap_adapter { struct netmap_vp_adapter up; struct netmap_vp_adapter host; /* for host rings */ struct netmap_adapter *hwna; /* the underlying device */ /* * When we attach a physical interface to the bridge, we * allow the controlling process to terminate, so we need * a place to store the n_detmap_priv_d data structure. * This is only done when physical interfaces * are attached to a bridge. */ struct netmap_priv_d *na_kpriv; struct nm_bdg_polling_state *na_polling_state; /* we overwrite the hwna->na_vp pointer, so we save * here its original value, to be restored at detach */ struct netmap_vp_adapter *saved_na_vp; }; int nm_bdg_polling(struct nmreq_header *hdr); #ifdef WITH_VALE int netmap_vale_attach(struct nmreq_header *hdr, void *auth_token); int netmap_vale_detach(struct nmreq_header *hdr, void *auth_token); int netmap_vale_list(struct nmreq_header *hdr); int netmap_vi_create(struct nmreq_header *hdr, int); int nm_vi_create(struct nmreq_header *); int nm_vi_destroy(const char *name); #else /* !WITH_VALE */ #define netmap_vi_create(hdr, a) (EOPNOTSUPP) #endif /* WITH_VALE */ #ifdef WITH_PIPES #define NM_MAXPIPES 64 /* max number of pipes per adapter */ struct netmap_pipe_adapter { /* pipe identifier is up.name */ struct netmap_adapter up; #define NM_PIPE_ROLE_MASTER 0x1 #define NM_PIPE_ROLE_SLAVE 0x2 int role; /* either NM_PIPE_ROLE_MASTER or NM_PIPE_ROLE_SLAVE */ struct netmap_adapter *parent; /* adapter that owns the memory */ struct netmap_pipe_adapter *peer; /* the other end of the pipe */ int peer_ref; /* 1 iff we are holding a ref to the peer */ struct ifnet *parent_ifp; /* maybe null */ u_int parent_slot; /* index in the parent pipe array */ }; #endif /* WITH_PIPES */ #ifdef WITH_NMNULL struct netmap_null_adapter { struct netmap_adapter up; }; #endif /* WITH_NMNULL */ /* return slots reserved to rx clients; used in drivers */ static inline uint32_t nm_kr_rxspace(struct netmap_kring *k) { int space = k->nr_hwtail - k->nr_hwcur; if (space < 0) space += k->nkr_num_slots; - ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); + nm_prdis("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); return space; } /* return slots reserved to tx clients */ #define nm_kr_txspace(_k) nm_kr_rxspace(_k) /* True if no space in the tx ring, only valid after txsync_prologue */ static inline int nm_kr_txempty(struct netmap_kring *kring) { return kring->rhead == kring->nr_hwtail; } /* True if no more completed slots in the rx ring, only valid after * rxsync_prologue */ #define nm_kr_rxempty(_k) nm_kr_txempty(_k) /* True if the application needs to wait for more space on the ring * (more received packets or more free tx slots). * Only valid after *xsync_prologue. */ static inline int nm_kr_wouldblock(struct netmap_kring *kring) { return kring->rcur == kring->nr_hwtail; } /* * protect against multiple threads using the same ring. * also check that the ring has not been stopped or locked */ #define NM_KR_BUSY 1 /* some other thread is syncing the ring */ #define NM_KR_STOPPED 2 /* unbounded stop (ifconfig down or driver unload) */ #define NM_KR_LOCKED 3 /* bounded, brief stop for mutual exclusion */ /* release the previously acquired right to use the *sync() methods of the ring */ static __inline void nm_kr_put(struct netmap_kring *kr) { NM_ATOMIC_CLEAR(&kr->nr_busy); } /* true if the ifp that backed the adapter has disappeared (e.g., the * driver has been unloaded) */ static inline int nm_iszombie(struct netmap_adapter *na); /* try to obtain exclusive right to issue the *sync() operations on the ring. * The right is obtained and must be later relinquished via nm_kr_put() if and * only if nm_kr_tryget() returns 0. * If can_sleep is 1 there are only two other possible outcomes: * - the function returns NM_KR_BUSY * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr * (if non-null) * In both cases the caller will typically skip the ring, possibly collecting * errors along the way. * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep. * In the latter case, the function may also return NM_KR_LOCKED and leave *perr * untouched: ideally, the caller should try again at a later time. */ static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr) { int busy = 1, stopped; /* check a first time without taking the lock * to avoid starvation for nm_kr_get() */ retry: stopped = kr->nkr_stopped; if (unlikely(stopped)) { goto stop; } busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy); /* we should not return NM_KR_BUSY if the ring was * actually stopped, so check another time after * the barrier provided by the atomic operation */ stopped = kr->nkr_stopped; if (unlikely(stopped)) { goto stop; } if (unlikely(nm_iszombie(kr->na))) { stopped = NM_KR_STOPPED; goto stop; } return unlikely(busy) ? NM_KR_BUSY : 0; stop: if (!busy) nm_kr_put(kr); if (stopped == NM_KR_STOPPED) { /* if POLLERR is defined we want to use it to simplify netmap_poll(). * Otherwise, any non-zero value will do. */ #ifdef POLLERR #define NM_POLLERR POLLERR #else #define NM_POLLERR 1 #endif /* POLLERR */ if (perr) *perr |= NM_POLLERR; #undef NM_POLLERR } else if (can_sleep) { tsleep(kr, 0, "NM_KR_TRYGET", 4); goto retry; } return stopped; } /* put the ring in the 'stopped' state and wait for the current user (if any) to * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED */ static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped) { kr->nkr_stopped = stopped; while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)) tsleep(kr, 0, "NM_KR_GET", 4); } /* restart a ring after a stop */ static __inline void nm_kr_start(struct netmap_kring *kr) { kr->nkr_stopped = 0; nm_kr_put(kr); } /* * The following functions are used by individual drivers to * support netmap operation. * * netmap_attach() initializes a struct netmap_adapter, allocating the * struct netmap_ring's and the struct selinfo. * * netmap_detach() frees the memory allocated by netmap_attach(). * * netmap_transmit() replaces the if_transmit routine of the interface, * and is used to intercept packets coming from the stack. * * netmap_load_map/netmap_reload_map are helper routines to set/reset * the dmamap for a packet buffer * * netmap_reset() is a helper routine to be called in the hw driver * when reinitializing a ring. It should not be called by * virtual ports (vale, pipes, monitor) */ int netmap_attach(struct netmap_adapter *); int netmap_attach_ext(struct netmap_adapter *, size_t size, int override_reg); void netmap_detach(struct ifnet *); int netmap_transmit(struct ifnet *, struct mbuf *); struct netmap_slot *netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur); int netmap_ring_reinit(struct netmap_kring *); int netmap_rings_config_get(struct netmap_adapter *, struct nm_config_info *); /* Return codes for netmap_*x_irq. */ enum { /* Driver should do normal interrupt processing, e.g. because * the interface is not in netmap mode. */ NM_IRQ_PASS = 0, /* Port is in netmap mode, and the interrupt work has been * completed. The driver does not have to notify netmap * again before the next interrupt. */ NM_IRQ_COMPLETED = -1, /* Port is in netmap mode, but the interrupt work has not been * completed. The driver has to make sure netmap will be * notified again soon, even if no more interrupts come (e.g. * on Linux the driver should not call napi_complete()). */ NM_IRQ_RESCHED = -2, }; /* default functions to handle rx/tx interrupts */ int netmap_rx_irq(struct ifnet *, u_int, u_int *); #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done); #ifdef WITH_VALE /* functions used by external modules to interface with VALE */ #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp) #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp) #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp) #define netmap_bdg_idx(_vp) ((_vp)->bdg_port) const char *netmap_bdg_name(struct netmap_vp_adapter *); #else /* !WITH_VALE */ #define netmap_vp_to_ifp(_vp) NULL #define netmap_ifp_to_vp(_ifp) NULL #define netmap_ifp_to_host_vp(_ifp) NULL #define netmap_bdg_idx(_vp) -1 #endif /* WITH_VALE */ static inline int nm_netmap_on(struct netmap_adapter *na) { return na && na->na_flags & NAF_NETMAP_ON; } static inline int nm_native_on(struct netmap_adapter *na) { return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE); } static inline int nm_iszombie(struct netmap_adapter *na) { return na == NULL || (na->na_flags & NAF_ZOMBIE); } static inline void nm_update_hostrings_mode(struct netmap_adapter *na) { /* Process nr_mode and nr_pending_mode for host rings. */ na->tx_rings[na->num_tx_rings]->nr_mode = na->tx_rings[na->num_tx_rings]->nr_pending_mode; na->rx_rings[na->num_rx_rings]->nr_mode = na->rx_rings[na->num_rx_rings]->nr_pending_mode; } void nm_set_native_flags(struct netmap_adapter *); void nm_clear_native_flags(struct netmap_adapter *); +void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff); + /* * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap * kthreads. * We need netmap_ring* parameter, because in ptnetmap it is decoupled * from host kring. * The user-space ring pointers (head/cur/tail) are shared through * CSB between host and guest. */ /* * validates parameters in the ring/kring, returns a value for head * If any error, returns ring_size to force a reinit. */ uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *); /* * validates parameters in the ring/kring, returns a value for head * If any error, returns ring_size lim to force a reinit. */ uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *); /* check/fix address and len in tx rings */ #if 1 /* debug version */ #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \ - RD(5, "bad addr/len ring %d slot %d idx %d len %d", \ + nm_prlim(5, "bad addr/len ring %d slot %d idx %d len %d", \ kring->ring_id, nm_i, slot->buf_idx, len); \ if (_l > NETMAP_BUF_SIZE(_na)) \ _l = NETMAP_BUF_SIZE(_na); \ } } while (0) #else /* no debug version */ #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ if (_l > NETMAP_BUF_SIZE(_na)) \ _l = NETMAP_BUF_SIZE(_na); \ } while (0) #endif /*---------------------------------------------------------------*/ /* * Support routines used by netmap subsystems * (native drivers, VALE, generic, pipes, monitors, ...) */ /* common routine for all functions that create a netmap adapter. It performs * two main tasks: * - if the na points to an ifp, mark the ifp as netmap capable * using na as its native adapter; * - provide defaults for the setup callbacks and the memory allocator */ int netmap_attach_common(struct netmap_adapter *); /* fill priv->np_[tr]xq{first,last} using the ringid and flags information * coming from a struct nmreq_register */ int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags); /* update the ring parameters (number and size of tx and rx rings). * It calls the nm_config callback, if available. */ int netmap_update_config(struct netmap_adapter *na); /* create and initialize the common fields of the krings array. * using the information that must be already available in the na. * tailroom can be used to request the allocation of additional * tailroom bytes after the krings array. This is used by * netmap_vp_adapter's (i.e., VALE ports) to make room for * leasing-related data structures */ int netmap_krings_create(struct netmap_adapter *na, u_int tailroom); /* deletes the kring array of the adapter. The array must have * been created using netmap_krings_create */ void netmap_krings_delete(struct netmap_adapter *na); int netmap_hw_krings_create(struct netmap_adapter *na); void netmap_hw_krings_delete(struct netmap_adapter *na); /* set the stopped/enabled status of ring * When stopping, they also wait for all current activity on the ring to * terminate. The status change is then notified using the na nm_notify * callback. */ void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped); /* set the stopped/enabled status of all rings of the adapter. */ void netmap_set_all_rings(struct netmap_adapter *, int stopped); /* convenience wrappers for netmap_set_all_rings */ void netmap_disable_all_rings(struct ifnet *); void netmap_enable_all_rings(struct ifnet *); int netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu); int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags); void netmap_do_unregif(struct netmap_priv_d *priv); u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct ifnet **ifp, struct netmap_mem_d *nmd, int create); void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp); int netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na); #ifdef WITH_VALE uint32_t netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, struct netmap_vp_adapter *, void *private_data); /* these are redefined in case of no VALE support */ int netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create); void *netmap_vale_create(const char *bdg_name, int *return_status); int netmap_vale_destroy(const char *bdg_name, void *auth_token); #else /* !WITH_VALE */ #define netmap_bdg_learning(_1, _2, _3, _4) 0 #define netmap_get_vale_na(_1, _2, _3, _4) 0 #define netmap_bdg_create(_1, _2) NULL #define netmap_bdg_destroy(_1, _2) 0 #endif /* !WITH_VALE */ #ifdef WITH_PIPES /* max number of pipes per device */ #define NM_MAXPIPES 64 /* XXX this should probably be a sysctl */ void netmap_pipe_dealloc(struct netmap_adapter *); int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create); #else /* !WITH_PIPES */ #define NM_MAXPIPES 0 #define netmap_pipe_alloc(_1, _2) 0 #define netmap_pipe_dealloc(_1) #define netmap_get_pipe_na(hdr, _2, _3, _4) \ ((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0) #endif #ifdef WITH_MONITOR int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create); void netmap_monitor_stop(struct netmap_adapter *na); #else #define netmap_get_monitor_na(hdr, _2, _3, _4) \ (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0) #endif #ifdef WITH_NMNULL int netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create); #else /* !WITH_NMNULL */ #define netmap_get_null_na(hdr, _2, _3, _4) \ (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0) #endif /* WITH_NMNULL */ #ifdef CONFIG_NET_NS struct net *netmap_bns_get(void); void netmap_bns_put(struct net *); void netmap_bns_getbridges(struct nm_bridge **, u_int *); #else extern struct nm_bridge *nm_bridges; #define netmap_bns_get() #define netmap_bns_put(_1) #define netmap_bns_getbridges(b, n) \ do { *b = nm_bridges; *n = NM_BRIDGES; } while (0) #endif /* Various prototypes */ int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td); int netmap_init(void); void netmap_fini(void); int netmap_get_memory(struct netmap_priv_d* p); void netmap_dtor(void *data); int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *, int nr_body_is_user); int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *td); size_t nmreq_size_by_type(uint16_t nr_reqtype); /* netmap_adapter creation/destruction */ // #define NM_DEBUG_PUTGET 1 #ifdef NM_DEBUG_PUTGET #define NM_DBG(f) __##f void __netmap_adapter_get(struct netmap_adapter *na); #define netmap_adapter_get(na) \ do { \ struct netmap_adapter *__na = na; \ - D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ + nm_prinf("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ __netmap_adapter_get(__na); \ } while (0) int __netmap_adapter_put(struct netmap_adapter *na); #define netmap_adapter_put(na) \ ({ \ struct netmap_adapter *__na = na; \ - D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ + nm_prinf("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ __netmap_adapter_put(__na); \ }) #else /* !NM_DEBUG_PUTGET */ #define NM_DBG(f) f void netmap_adapter_get(struct netmap_adapter *na); int netmap_adapter_put(struct netmap_adapter *na); #endif /* !NM_DEBUG_PUTGET */ /* * module variables */ #define NETMAP_BUF_BASE(_na) ((_na)->na_lut.lut[0].vaddr) #define NETMAP_BUF_SIZE(_na) ((_na)->na_lut.objsize) extern int netmap_no_pendintr; extern int netmap_mitigate; extern int netmap_verbose; #ifdef CONFIG_NETMAP_DEBUG extern int netmap_debug; /* for debugging */ #else /* !CONFIG_NETMAP_DEBUG */ #define netmap_debug (0) #endif /* !CONFIG_NETMAP_DEBUG */ enum { /* debug flags */ NM_DEBUG_ON = 1, /* generic debug messsages */ NM_DEBUG_HOST = 0x2, /* debug host stack */ NM_DEBUG_RXSYNC = 0x10, /* debug on rxsync/txsync */ NM_DEBUG_TXSYNC = 0x20, NM_DEBUG_RXINTR = 0x100, /* debug on rx/tx intr (driver) */ NM_DEBUG_TXINTR = 0x200, NM_DEBUG_NIC_RXSYNC = 0x1000, /* debug on rx/tx intr (driver) */ NM_DEBUG_NIC_TXSYNC = 0x2000, NM_DEBUG_MEM = 0x4000, /* verbose memory allocations/deallocations */ NM_DEBUG_VALE = 0x8000, /* debug messages from memory allocators */ NM_DEBUG_BDG = NM_DEBUG_VALE, }; extern int netmap_txsync_retry; extern int netmap_flags; extern int netmap_generic_hwcsum; extern int netmap_generic_mit; extern int netmap_generic_ringsize; extern int netmap_generic_rings; #ifdef linux extern int netmap_generic_txqdisc; #endif /* * NA returns a pointer to the struct netmap adapter from the ifp. * WNA is os-specific and must be defined in glue code. */ #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) /* * we provide a default implementation of NM_ATTACH_NA/NM_DETACH_NA * based on the WNA field. * Glue code may override this by defining its own NM_ATTACH_NA */ #ifndef NM_ATTACH_NA /* * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we * overload another pointer in the netdev. * * We check if NA(ifp) is set and its first element has a related * magic value. The capenable is within the struct netmap_adapter. */ #define NETMAP_MAGIC 0x52697a7a #define NM_NA_VALID(ifp) (NA(ifp) && \ ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) #define NM_ATTACH_NA(ifp, na) do { \ WNA(ifp) = na; \ if (NA(ifp)) \ NA(ifp)->magic = \ ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC; \ } while(0) #define NM_RESTORE_NA(ifp, na) WNA(ifp) = na; #define NM_DETACH_NA(ifp) do { WNA(ifp) = NULL; } while (0) #define NM_NA_CLASH(ifp) (NA(ifp) && !NM_NA_VALID(ifp)) #endif /* !NM_ATTACH_NA */ #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor) #if defined(__FreeBSD__) /* Assigns the device IOMMU domain to an allocator. * Returns -ENOMEM in case the domain is different */ #define nm_iommu_group_id(dev) (0) /* Callback invoked by the dma machinery after a successful dmamap_load */ static void netmap_dmamap_cb(__unused void *arg, __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) { } /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. * XXX can we do it without a callback ? */ static inline int netmap_load_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { if (map) bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); return 0; } static inline void netmap_unload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map) { if (map) bus_dmamap_unload(tag, map); } #define netmap_sync_map(na, tag, map, sz, t) /* update the map when a buffer changes. */ static inline void netmap_reload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { if (map) { bus_dmamap_unload(tag, map); bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); } } #elif defined(_WIN32) #else /* linux */ int nm_iommu_group_id(bus_dma_tag_t dev); #include /* * on linux we need * dma_map_single(&pdev->dev, virt_addr, len, direction) * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction) */ #if 0 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = false; buffer_info->length = len; //buffer_info->next_to_watch = l; /* reload dma map */ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, NETMAP_BUF_SIZE, DMA_TO_DEVICE); buffer_info->dma = dma_map_single(&adapter->pdev->dev, addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { - D("dma mapping error"); + nm_prerr("dma mapping error"); /* goto dma_error; See e1000_put_txbuf() */ /* XXX reset */ } tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX #endif static inline int netmap_load_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf, u_int size) { if (map) { *map = dma_map_single(na->pdev, buf, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(na->pdev, *map)) { *map = 0; return ENOMEM; } } return 0; } static inline void netmap_unload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, u_int sz) { if (*map) { dma_unmap_single(na->pdev, *map, sz, DMA_BIDIRECTIONAL); } } #ifdef NETMAP_LINUX_HAVE_DMASYNC static inline void netmap_sync_map_cpu(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t) { if (*map) { dma_sync_single_for_cpu(na->pdev, *map, sz, (t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); } } static inline void netmap_sync_map_dev(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t) { if (*map) { dma_sync_single_for_device(na->pdev, *map, sz, (t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); } } static inline void netmap_reload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { u_int sz = NETMAP_BUF_SIZE(na); if (*map) { dma_unmap_single(na->pdev, *map, sz, DMA_BIDIRECTIONAL); } *map = dma_map_single(na->pdev, buf, sz, DMA_BIDIRECTIONAL); } #else /* !NETMAP_LINUX_HAVE_DMASYNC */ #define netmap_sync_map_cpu(na, tag, map, sz, t) #define netmap_sync_map_dev(na, tag, map, sz, t) #endif /* NETMAP_LINUX_HAVE_DMASYNC */ #endif /* linux */ /* * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) */ static inline int netmap_idx_n2k(struct netmap_kring *kr, int idx) { int n = kr->nkr_num_slots; if (likely(kr->nkr_hwofs == 0)) { return idx; } idx += kr->nkr_hwofs; if (idx < 0) return idx + n; else if (idx < n) return idx; else return idx - n; } static inline int netmap_idx_k2n(struct netmap_kring *kr, int idx) { int n = kr->nkr_num_slots; if (likely(kr->nkr_hwofs == 0)) { return idx; } idx -= kr->nkr_hwofs; if (idx < 0) return idx + n; else if (idx < n) return idx; else return idx - n; } /* Entries of the look-up table. */ #ifdef __FreeBSD__ struct lut_entry { void *vaddr; /* virtual address. */ vm_paddr_t paddr; /* physical address. */ }; #else /* linux & _WIN32 */ /* dma-mapping in linux can assign a buffer a different address * depending on the device, so we need to have a separate * physical-address look-up table for each na. * We can still share the vaddrs, though, therefore we split * the lut_entry structure. */ struct lut_entry { void *vaddr; /* virtual address. */ }; struct plut_entry { vm_paddr_t paddr; /* physical address. */ }; #endif /* linux & _WIN32 */ struct netmap_obj_pool; /* * NMB return the virtual address of a buffer (buffer 0 on bad index) * PNMB also fills the physical address */ static inline void * NMB(struct netmap_adapter *na, struct netmap_slot *slot) { struct lut_entry *lut = na->na_lut.lut; uint32_t i = slot->buf_idx; return (unlikely(i >= na->na_lut.objtotal)) ? lut[0].vaddr : lut[i].vaddr; } static inline void * PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp) { uint32_t i = slot->buf_idx; struct lut_entry *lut = na->na_lut.lut; struct plut_entry *plut = na->na_lut.plut; void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr; #ifdef _WIN32 *pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart; #else *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr; #endif return ret; } /* * Structure associated to each netmap file descriptor. * It is created on open and left unbound (np_nifp == NULL). * A successful NIOCREGIF will set np_nifp and the first few fields; * this is protected by a global lock (NMG_LOCK) due to low contention. * * np_refs counts the number of references to the structure: one for the fd, * plus (on FreeBSD) one for each active mmap which we track ourselves * (linux automatically tracks them, but FreeBSD does not). * np_refs is protected by NMG_LOCK. * * Read access to the structure is lock free, because ni_nifp once set * can only go to 0 when nobody is using the entry anymore. Readers * must check that np_nifp != NULL before using the other fields. */ struct netmap_priv_d { struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ struct netmap_adapter *np_na; struct ifnet *np_ifp; uint32_t np_flags; /* from the ioctl */ u_int np_qfirst[NR_TXRX], np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */ uint16_t np_txpoll; uint16_t np_kloop_state; /* use with NMG_LOCK held */ #define NM_SYNC_KLOOP_RUNNING (1 << 0) #define NM_SYNC_KLOOP_STOPPING (1 << 1) int np_sync_flags; /* to be passed to nm_sync */ int np_refs; /* use with NMG_LOCK held */ /* pointers to the selinfo to be used for selrecord. * Either the local or the global one depending on the * number of rings. */ NM_SELINFO_T *np_si[NR_TXRX]; /* In the optional CSB mode, the user must specify the start address * of two arrays of Communication Status Block (CSB) entries, for the * two directions (kernel read application write, and kernel write * application read). * The number of entries must agree with the number of rings bound to * the netmap file descriptor. The entries corresponding to the TX * rings are laid out before the ones corresponding to the RX rings. * * Array of CSB entries for application --> kernel communication * (N entries). */ struct nm_csb_atok *np_csb_atok_base; /* Array of CSB entries for kernel --> application communication * (N entries). */ struct nm_csb_ktoa *np_csb_ktoa_base; #ifdef linux struct file *np_filp; /* used by sync kloop */ #endif /* linux */ }; struct netmap_priv_d *netmap_priv_new(void); void netmap_priv_delete(struct netmap_priv_d *); static inline int nm_kring_pending(struct netmap_priv_d *np) { struct netmap_adapter *na = np->np_na; enum txrx t; int i; for_rx_tx(t) { for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) { struct netmap_kring *kring = NMR(na, t)[i]; if (kring->nr_mode != kring->nr_pending_mode) { return 1; } } } return 0; } /* call with NMG_LOCK held */ static __inline int nm_si_user(struct netmap_priv_d *priv, enum txrx t) { return (priv->np_na != NULL && (priv->np_qlast[t] - priv->np_qfirst[t] > 1)); } #ifdef WITH_PIPES int netmap_pipe_txsync(struct netmap_kring *txkring, int flags); int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags); +int netmap_pipe_krings_create_both(struct netmap_adapter *na, + struct netmap_adapter *ona); +void netmap_pipe_krings_delete_both(struct netmap_adapter *na, + struct netmap_adapter *ona); +int netmap_pipe_reg_both(struct netmap_adapter *na, + struct netmap_adapter *ona); #endif /* WITH_PIPES */ #ifdef WITH_MONITOR struct netmap_monitor_adapter { struct netmap_adapter up; struct netmap_priv_d priv; uint32_t flags; }; #endif /* WITH_MONITOR */ #ifdef WITH_GENERIC /* * generic netmap emulation for devices that do not have * native netmap support. */ int generic_netmap_attach(struct ifnet *ifp); int generic_rx_handler(struct ifnet *ifp, struct mbuf *m);; int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept); int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept); int na_is_generic(struct netmap_adapter *na); /* * the generic transmit routine is passed a structure to optionally * build a queue of descriptors, in an OS-specific way. * The payload is at addr, if non-null, and the routine should send or queue * the packet, returning 0 if successful, 1 on failure. * * At the end, if head is non-null, there will be an additional call * to the function with addr = NULL; this should tell the OS-specific * routine to send the queue and free any resources. Failure is ignored. */ struct nm_os_gen_arg { struct ifnet *ifp; void *m; /* os-specific mbuf-like object */ void *head, *tail; /* tailq, if the OS-specific routine needs to build one */ void *addr; /* payload of current packet */ u_int len; /* packet length */ u_int ring_nr; /* packet length */ u_int qevent; /* in txqdisc mode, place an event on this mbuf */ }; int nm_os_generic_xmit_frame(struct nm_os_gen_arg *); int nm_os_generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx); void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq); void nm_os_generic_set_features(struct netmap_generic_adapter *gna); static inline struct ifnet* netmap_generic_getifp(struct netmap_generic_adapter *gna) { if (gna->prev) return gna->prev->ifp; return gna->up.up.ifp; } void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done); //#define RATE_GENERIC /* Enables communication statistics for generic. */ #ifdef RATE_GENERIC void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi); #else #define generic_rate(txp, txs, txi, rxp, rxs, rxi) #endif /* * netmap_mitigation API. This is used by the generic adapter * to reduce the number of interrupt requests/selwakeup * to clients on incoming packets. */ void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na); void nm_os_mitigation_start(struct nm_generic_mit *mit); void nm_os_mitigation_restart(struct nm_generic_mit *mit); int nm_os_mitigation_active(struct nm_generic_mit *mit); void nm_os_mitigation_cleanup(struct nm_generic_mit *mit); #else /* !WITH_GENERIC */ #define generic_netmap_attach(ifp) (EOPNOTSUPP) #define na_is_generic(na) (0) #endif /* WITH_GENERIC */ /* Shared declarations for the VALE switch. */ /* * Each transmit queue accumulates a batch of packets into * a structure before forwarding. Packets to the same * destination are put in a list using ft_next as a link field. * ft_frags and ft_next are valid only on the first fragment. */ struct nm_bdg_fwd { /* forwarding entry for a bridge */ void *ft_buf; /* netmap or indirect buffer */ uint8_t ft_frags; /* how many fragments (only on 1st frag) */ uint16_t ft_offset; /* dst port (unused) */ uint16_t ft_flags; /* flags, e.g. indirect */ uint16_t ft_len; /* src fragment len */ uint16_t ft_next; /* next packet to same destination */ }; /* struct 'virtio_net_hdr' from linux. */ struct nm_vnet_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ uint8_t flags; #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ uint8_t gso_type; uint16_t hdr_len; uint16_t gso_size; uint16_t csum_start; uint16_t csum_offset; }; #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */ /* Private definitions for IPv4, IPv6, UDP and TCP headers. */ struct nm_iphdr { uint8_t version_ihl; uint8_t tos; uint16_t tot_len; uint16_t id; uint16_t frag_off; uint8_t ttl; uint8_t protocol; uint16_t check; uint32_t saddr; uint32_t daddr; /*The options start here. */ }; struct nm_tcphdr { uint16_t source; uint16_t dest; uint32_t seq; uint32_t ack_seq; uint8_t doff; /* Data offset + Reserved */ uint8_t flags; uint16_t window; uint16_t check; uint16_t urg_ptr; }; struct nm_udphdr { uint16_t source; uint16_t dest; uint16_t len; uint16_t check; }; struct nm_ipv6hdr { uint8_t priority_version; uint8_t flow_lbl[3]; uint16_t payload_len; uint8_t nexthdr; uint8_t hop_limit; uint8_t saddr[16]; uint8_t daddr[16]; }; /* Type used to store a checksum (in host byte order) that hasn't been * folded yet. */ #define rawsum_t uint32_t rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum); uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph); void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, size_t datalen, uint16_t *check); void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, size_t datalen, uint16_t *check); uint16_t nm_os_csum_fold(rawsum_t cur_sum); void bdg_mismatch_datapath(struct netmap_vp_adapter *na, struct netmap_vp_adapter *dst_na, const struct nm_bdg_fwd *ft_p, struct netmap_ring *dst_ring, u_int *j, u_int lim, u_int *howmany); /* persistent virtual port routines */ int nm_os_vi_persist(const char *, struct ifnet **); void nm_os_vi_detach(struct ifnet *); void nm_os_vi_init_index(void); /* * kernel thread routines */ struct nm_kctx; /* OS-specific kernel context - opaque */ typedef void (*nm_kctx_worker_fn_t)(void *data); /* kthread configuration */ struct nm_kctx_cfg { long type; /* kthread type/identifier */ nm_kctx_worker_fn_t worker_fn; /* worker function */ void *worker_private;/* worker parameter */ int attach_user; /* attach kthread to user process */ }; /* kthread configuration */ struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque); int nm_os_kctx_worker_start(struct nm_kctx *); void nm_os_kctx_worker_stop(struct nm_kctx *); void nm_os_kctx_destroy(struct nm_kctx *); void nm_os_kctx_worker_setaff(struct nm_kctx *, int); u_int nm_os_ncpus(void); int netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr); int netmap_sync_kloop_stop(struct netmap_priv_d *priv); #ifdef WITH_PTNETMAP /* ptnetmap guest routines */ /* * ptnetmap_memdev routines used to talk with ptnetmap_memdev device driver */ struct ptnetmap_memdev; int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *, vm_paddr_t *, void **, uint64_t *); void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *); uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *, unsigned int); /* * netmap adapter for guest ptnetmap ports */ struct netmap_pt_guest_adapter { /* The netmap adapter to be used by netmap applications. * This field must be the first, to allow upcast. */ struct netmap_hw_adapter hwup; /* The netmap adapter to be used by the driver. */ struct netmap_hw_adapter dr; /* Reference counter to track users of backend netmap port: the * network stack and netmap clients. * Used to decide when we need (de)allocate krings/rings and * start (stop) ptnetmap kthreads. */ int backend_users; }; int netmap_pt_guest_attach(struct netmap_adapter *na, unsigned int nifp_offset, unsigned int memid); bool netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa, struct netmap_kring *kring, int flags); bool netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa, struct netmap_kring *kring, int flags); int ptnet_nm_krings_create(struct netmap_adapter *na); void ptnet_nm_krings_delete(struct netmap_adapter *na); void ptnet_nm_dtor(struct netmap_adapter *na); /* Helper function wrapping nm_sync_kloop_appl_read(). */ static inline void ptnet_sync_tail(struct nm_csb_ktoa *ktoa, struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; /* Update hwcur and hwtail as known by the host. */ nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur); /* nm_sync_finalize */ ring->tail = kring->rtail = kring->nr_hwtail; } #endif /* WITH_PTNETMAP */ #ifdef __FreeBSD__ /* * FreeBSD mbuf allocator/deallocator in emulation mode: */ #if __FreeBSD_version < 1100000 /* * For older versions of FreeBSD: * * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE * so that the destructor, if invoked, will not free the packet. * In principle we should set the destructor only on demand, * but since there might be a race we better do it on allocation. * As a consequence, we also need to set the destructor or we * would leak buffers. */ /* mbuf destructor, also need to change the type to EXT_EXTREF, * add an M_NOFREE flag, and then clear the flag and * chain into uma_zfree(zone_pack, mf) * (or reinstall the buffer ?) */ #define SET_MBUF_DESTRUCTOR(m, fn) do { \ (m)->m_ext.ext_free = (void *)fn; \ (m)->m_ext.ext_type = EXT_EXTREF; \ } while (0) static int void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { /* restore original mbuf */ m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; m->m_ext.ext_arg1 = NULL; m->m_ext.ext_type = EXT_PACKET; m->m_ext.ext_free = NULL; if (MBUF_REFCNT(m) == 0) SET_MBUF_REFCNT(m, 1); uma_zfree(zone_pack, m); return 0; } static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m) { /* m_getcl() (mb_ctor_mbuf) has an assert that checks that * M_NOFREE flag is not specified as third argument, * so we have to set M_NOFREE after m_getcl(). */ m->m_flags |= M_NOFREE; m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save m->m_ext.ext_free = (void *)void_mbuf_dtor; m->m_ext.ext_type = EXT_EXTREF; - ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); + nm_prdis(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); } return m; } #else /* __FreeBSD_version >= 1100000 */ /* * Newer versions of FreeBSD, using a straightforward scheme. * * We allocate mbufs with m_gethdr(), since the mbuf header is needed * by the driver. We also attach a customly-provided external storage, * which in this case is a netmap buffer. When calling m_extadd(), however * we pass a NULL address, since the real address (and length) will be * filled in by nm_os_generic_xmit_frame() right before calling * if_transmit(). * * The dtor function does nothing, however we need it since mb_free_ext() * has a KASSERT(), checking that the mbuf dtor function is not NULL. */ #if __FreeBSD_version <= 1200050 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { } #else /* __FreeBSD_version >= 1200051 */ /* The arg1 and arg2 pointers argument were removed by r324446, which * in included since version 1200051. */ static void void_mbuf_dtor(struct mbuf *m) { } #endif /* __FreeBSD_version >= 1200051 */ #define SET_MBUF_DESTRUCTOR(m, fn) do { \ (m)->m_ext.ext_free = (fn != NULL) ? \ (void *)fn : (void *)void_mbuf_dtor; \ } while (0) static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; (void)len; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { return m; } m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor, NULL, NULL, 0, EXT_NET_DRV); return m; } #endif /* __FreeBSD_version >= 1100000 */ #endif /* __FreeBSD__ */ struct nmreq_option * nmreq_findoption(struct nmreq_option *, uint16_t); int nmreq_checkduplicate(struct nmreq_option *); int netmap_init_bridges(void); void netmap_uninit_bridges(void); /* Functions to read and write CSB fields from the kernel. */ #if defined (linux) #define CSB_READ(csb, field, r) (get_user(r, &csb->field)) #define CSB_WRITE(csb, field, v) (put_user(v, &csb->field)) #else /* ! linux */ #define CSB_READ(csb, field, r) (r = fuword32(&csb->field)) #define CSB_WRITE(csb, field, v) (suword32(&csb->field, v)) #endif /* ! linux */ #endif /* _NET_NETMAP_KERN_H_ */ Index: stable/12/sys/dev/netmap/netmap_legacy.c =================================================================== --- stable/12/sys/dev/netmap/netmap_legacy.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_legacy.c (revision 344046) @@ -1,428 +1,435 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2018 Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include /* defines used in kernel.h */ #include /* FIONBIO */ #include #include /* struct socket */ #include /* sockaddrs */ #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined (_WIN32) #include "win_glue.h" #endif /* * common headers */ #include #include #include static int nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr, struct nmreq_register *req) { req->nr_offset = nmr->nr_offset; req->nr_memsize = nmr->nr_memsize; req->nr_tx_slots = nmr->nr_tx_slots; req->nr_rx_slots = nmr->nr_rx_slots; req->nr_tx_rings = nmr->nr_tx_rings; req->nr_rx_rings = nmr->nr_rx_rings; req->nr_mem_id = nmr->nr_arg2; req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK; if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) { /* Convert the older nmr->nr_ringid (original * netmap control API) to nmr->nr_flags. */ u_int regmode = NR_REG_DEFAULT; if (req->nr_ringid & NETMAP_SW_RING) { regmode = NR_REG_SW; } else if (req->nr_ringid & NETMAP_HW_RING) { regmode = NR_REG_ONE_NIC; } else { regmode = NR_REG_ALL_NIC; } req->nr_mode = regmode; } else { req->nr_mode = nmr->nr_flags & NR_REG_MASK; } /* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */ if (req->nr_mode == NR_REG_PIPE_MASTER || req->nr_mode == NR_REG_PIPE_SLAVE) { char suffix[10]; snprintf(suffix, sizeof(suffix), "%c%d", (req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'), req->nr_ringid); if (strlen(hdr->nr_name) + strlen(suffix) >= sizeof(hdr->nr_name)) { /* No space for the pipe suffix. */ return ENOBUFS; } strncat(hdr->nr_name, suffix, strlen(suffix)); req->nr_mode = NR_REG_ALL_NIC; req->nr_ringid = 0; } req->nr_flags = nmr->nr_flags & (~NR_REG_MASK); if (nmr->nr_ringid & NETMAP_NO_TX_POLL) { req->nr_flags |= NR_NO_TX_POLL; } if (nmr->nr_ringid & NETMAP_DO_RX_POLL) { req->nr_flags |= NR_DO_RX_POLL; } /* nmr->nr_arg1 (nr_pipes) ignored */ req->nr_extra_bufs = nmr->nr_arg3; return 0; } /* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs * (new API). The new struct is dynamically allocated. */ static struct nmreq_header * nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd) { struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr)); if (hdr == NULL) { goto oom; } /* Sanitize nmr->nr_name by adding the string terminator. */ if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) { nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; } /* First prepare the request header. */ hdr->nr_version = NETMAP_API; /* new API */ strlcpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name)); hdr->nr_options = (uintptr_t)NULL; hdr->nr_body = (uintptr_t)NULL; switch (ioctl_cmd) { case NIOCREGIF: { switch (nmr->nr_cmd) { case 0: { /* Regular NIOCREGIF operation. */ struct nmreq_register *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = NETMAP_REQ_REGISTER; if (nmreq_register_from_legacy(nmr, hdr, req)) { goto oom; } break; } case NETMAP_BDG_ATTACH: { struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH; if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) { goto oom; } /* Fix nr_mode, starting from nr_arg1. */ if (nmr->nr_arg1 & NETMAP_BDG_HOST) { req->reg.nr_mode = NR_REG_NIC_SW; } else { req->reg.nr_mode = NR_REG_ALL_NIC; } break; } case NETMAP_BDG_DETACH: { hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH; hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach)); break; } case NETMAP_BDG_VNET_HDR: case NETMAP_VNET_HDR_GET: { struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ? NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET; req->nr_hdr_len = nmr->nr_arg1; break; } case NETMAP_BDG_NEWIF : { struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF; req->nr_tx_slots = nmr->nr_tx_slots; req->nr_rx_slots = nmr->nr_rx_slots; req->nr_tx_rings = nmr->nr_tx_rings; req->nr_rx_rings = nmr->nr_rx_rings; req->nr_mem_id = nmr->nr_arg2; break; } case NETMAP_BDG_DELIF: { hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF; break; } case NETMAP_BDG_POLLING_ON: case NETMAP_BDG_POLLING_OFF: { struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ? NETMAP_REQ_VALE_POLLING_ENABLE : NETMAP_REQ_VALE_POLLING_DISABLE; switch (nmr->nr_flags & NR_REG_MASK) { default: req->nr_mode = 0; /* invalid */ break; case NR_REG_ONE_NIC: req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU; break; case NR_REG_ALL_NIC: req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU; break; } req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK; req->nr_num_polling_cpus = nmr->nr_arg1; break; } case NETMAP_PT_HOST_CREATE: case NETMAP_PT_HOST_DELETE: { nm_prerr("Netmap passthrough not supported yet"); return NULL; break; } } break; } case NIOCGINFO: { if (nmr->nr_cmd == NETMAP_BDG_LIST) { struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = NETMAP_REQ_VALE_LIST; req->nr_bridge_idx = nmr->nr_arg1; req->nr_port_idx = nmr->nr_arg2; } else { /* Regular NIOCGINFO. */ struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req)); if (!req) { goto oom; } hdr->nr_body = (uintptr_t)req; hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; req->nr_memsize = nmr->nr_memsize; req->nr_tx_slots = nmr->nr_tx_slots; req->nr_rx_slots = nmr->nr_rx_slots; req->nr_tx_rings = nmr->nr_tx_rings; req->nr_rx_rings = nmr->nr_rx_rings; req->nr_mem_id = nmr->nr_arg2; } break; } } return hdr; oom: if (hdr) { if (hdr->nr_body) { nm_os_free((void *)(uintptr_t)hdr->nr_body); } nm_os_free(hdr); } nm_prerr("Failed to allocate memory for nmreq_xyz struct"); return NULL; } static void nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr) { nmr->nr_offset = req->nr_offset; nmr->nr_memsize = req->nr_memsize; nmr->nr_tx_slots = req->nr_tx_slots; nmr->nr_rx_slots = req->nr_rx_slots; nmr->nr_tx_rings = req->nr_tx_rings; nmr->nr_rx_rings = req->nr_rx_rings; nmr->nr_arg2 = req->nr_mem_id; nmr->nr_arg3 = req->nr_extra_bufs; } /* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct. * It also frees the nmreq_xyz struct, as it was allocated by * nmreq_from_legacy(). */ static int nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr) { int ret = 0; /* We only write-back the fields that the user expects to be * written back. */ switch (hdr->nr_reqtype) { case NETMAP_REQ_REGISTER: { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; nmreq_register_to_legacy(req, nmr); break; } case NETMAP_REQ_PORT_INFO_GET: { struct nmreq_port_info_get *req = (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body; nmr->nr_memsize = req->nr_memsize; nmr->nr_tx_slots = req->nr_tx_slots; nmr->nr_rx_slots = req->nr_rx_slots; nmr->nr_tx_rings = req->nr_tx_rings; nmr->nr_rx_rings = req->nr_rx_rings; nmr->nr_arg2 = req->nr_mem_id; break; } case NETMAP_REQ_VALE_ATTACH: { struct nmreq_vale_attach *req = (struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body; nmreq_register_to_legacy(&req->reg, nmr); break; } case NETMAP_REQ_VALE_DETACH: { break; } case NETMAP_REQ_VALE_LIST: { struct nmreq_vale_list *req = (struct nmreq_vale_list *)(uintptr_t)hdr->nr_body; strlcpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name)); nmr->nr_arg1 = req->nr_bridge_idx; nmr->nr_arg2 = req->nr_port_idx; break; } case NETMAP_REQ_PORT_HDR_SET: case NETMAP_REQ_PORT_HDR_GET: { struct nmreq_port_hdr *req = (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; nmr->nr_arg1 = req->nr_hdr_len; break; } case NETMAP_REQ_VALE_NEWIF: { struct nmreq_vale_newif *req = (struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body; nmr->nr_tx_slots = req->nr_tx_slots; nmr->nr_rx_slots = req->nr_rx_slots; nmr->nr_tx_rings = req->nr_tx_rings; nmr->nr_rx_rings = req->nr_rx_rings; nmr->nr_arg2 = req->nr_mem_id; break; } case NETMAP_REQ_VALE_DELIF: case NETMAP_REQ_VALE_POLLING_ENABLE: case NETMAP_REQ_VALE_POLLING_DISABLE: { break; } } return ret; } int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *td) { int error = 0; switch (cmd) { case NIOCGINFO: case NIOCREGIF: { /* Request for the legacy control API. Convert it to a * NIOCCTRL request. */ struct nmreq *nmr = (struct nmreq *) data; - struct nmreq_header *hdr = nmreq_from_legacy(nmr, cmd); + struct nmreq_header *hdr; + + if (nmr->nr_version < 11) { + nm_prerr("Minimum supported API is 11 (requested %u)", + nmr->nr_version); + return EINVAL; + } + hdr = nmreq_from_legacy(nmr, cmd); if (hdr == NULL) { /* out of memory */ return ENOMEM; } error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td, /*nr_body_is_user=*/0); if (error == 0) { nmreq_to_legacy(hdr, nmr); } if (hdr->nr_body) { nm_os_free((void *)(uintptr_t)hdr->nr_body); } nm_os_free(hdr); break; } #ifdef WITH_VALE case NIOCCONFIG: { struct nm_ifreq *nr = (struct nm_ifreq *)data; error = netmap_bdg_config(nr); break; } #endif #ifdef __FreeBSD__ case FIONBIO: case FIOASYNC: - ND("FIONBIO/FIOASYNC are no-ops"); + /* FIONBIO/FIOASYNC are no-ops. */ break; case BIOCIMMEDIATE: case BIOCGHDRCMPLT: case BIOCSHDRCMPLT: case BIOCSSEESENT: - D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); + /* Ignore these commands. */ break; default: /* allow device-specific ioctls */ { struct nmreq *nmr = (struct nmreq *)data; struct ifnet *ifp = ifunit_ref(nmr->nr_name); if (ifp == NULL) { error = ENXIO; } else { struct socket so; bzero(&so, sizeof(so)); so.so_vnet = ifp->if_vnet; // so->so_proto not null. error = ifioctl(&so, cmd, data, td); if_rele(ifp); } break; } #else /* linux */ default: error = EOPNOTSUPP; #endif /* linux */ } return error; } Index: stable/12/sys/dev/netmap/netmap_mem2.c =================================================================== --- stable/12/sys/dev/netmap/netmap_mem2.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_mem2.c (revision 344046) @@ -1,2852 +1,2852 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012-2014 Matteo Landi * Copyright (C) 2012-2016 Luigi Rizzo * Copyright (C) 2012-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef linux #include "bsd_glue.h" #endif /* linux */ #ifdef __APPLE__ #include "osx_glue.h" #endif /* __APPLE__ */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* MALLOC_DEFINE */ #include #include /* vtophys */ #include /* vtophys */ #include /* sockaddrs */ #include #include #include #include #include #include /* bus_dmamap_* */ /* M_NETMAP only used in here */ MALLOC_DECLARE(M_NETMAP); MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); #endif /* __FreeBSD__ */ #ifdef _WIN32 #include #endif #include #include #include #include "netmap_mem2.h" #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ #else #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ #endif #define NETMAP_POOL_MAX_NAMSZ 32 enum { NETMAP_IF_POOL = 0, NETMAP_RING_POOL, NETMAP_BUF_POOL, NETMAP_POOLS_NR }; struct netmap_obj_params { u_int size; u_int num; u_int last_size; u_int last_num; }; struct netmap_obj_pool { char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ /* ---------------------------------------------------*/ /* these are only meaningful if the pool is finalized */ /* (see 'finalized' field in netmap_mem_d) */ u_int objtotal; /* actual total number of objects. */ u_int memtotal; /* actual total memory space */ u_int numclusters; /* actual number of clusters */ u_int objfree; /* number of free objects. */ struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ uint32_t *bitmap; /* one bit per buffer, 1 means free */ uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ int alloc_done; /* we have allocated the memory */ /* ---------------------------------------------------*/ /* limits */ u_int objminsize; /* minimum object size */ u_int objmaxsize; /* maximum object size */ u_int nummin; /* minimum number of objects */ u_int nummax; /* maximum number of objects */ /* these are changed only by config */ u_int _objtotal; /* total number of objects */ u_int _objsize; /* object size */ u_int _clustsize; /* cluster size */ u_int _clustentries; /* objects per cluster */ u_int _numclusters; /* number of clusters */ /* requested values */ u_int r_objtotal; u_int r_objsize; }; #define NMA_LOCK_T NM_MTX_T #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) struct netmap_mem_ops { int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, u_int *memflags, uint16_t *id); vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); int (*nmd_config)(struct netmap_mem_d *); int (*nmd_finalize)(struct netmap_mem_d *); void (*nmd_deref)(struct netmap_mem_d *); ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); void (*nmd_delete)(struct netmap_mem_d *); struct netmap_if * (*nmd_if_new)(struct netmap_adapter *, struct netmap_priv_d *); void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); int (*nmd_rings_create)(struct netmap_adapter *); void (*nmd_rings_delete)(struct netmap_adapter *); }; struct netmap_mem_d { NMA_LOCK_T nm_mtx; /* protect the allocator */ u_int nm_totalsize; /* shorthand */ u_int flags; #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ #define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */ int lasterr; /* last error for curr config */ int active; /* active users */ int refcount; /* the three allocators */ struct netmap_obj_pool pools[NETMAP_POOLS_NR]; nm_memid_t nm_id; /* allocator identifier */ int nm_grp; /* iommu groupd id */ /* list of all existing allocators, sorted by nm_id */ struct netmap_mem_d *prev, *next; struct netmap_mem_ops *ops; struct netmap_obj_params params[NETMAP_POOLS_NR]; #define NM_MEM_NAMESZ 16 char name[NM_MEM_NAMESZ]; }; int netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { int rv; NMA_LOCK(nmd); rv = nmd->ops->nmd_get_lut(nmd, lut); NMA_UNLOCK(nmd); return rv; } int netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size, u_int *memflags, nm_memid_t *memid) { int rv; NMA_LOCK(nmd); rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); NMA_UNLOCK(nmd); return rv; } vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) { vm_paddr_t pa; #if defined(__FreeBSD__) /* This function is called by netmap_dev_pager_fault(), which holds a * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we * spin on the trylock. */ NMA_SPINLOCK(nmd); #else NMA_LOCK(nmd); #endif pa = nmd->ops->nmd_ofstophys(nmd, off); NMA_UNLOCK(nmd); return pa; } static int netmap_mem_config(struct netmap_mem_d *nmd) { if (nmd->active) { /* already in use. Not fatal, but we * cannot change the configuration */ return 0; } return nmd->ops->nmd_config(nmd); } ssize_t netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off) { ssize_t rv; NMA_LOCK(nmd); rv = nmd->ops->nmd_if_offset(nmd, off); NMA_UNLOCK(nmd); return rv; } static void netmap_mem_delete(struct netmap_mem_d *nmd) { nmd->ops->nmd_delete(nmd); } struct netmap_if * netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) { struct netmap_if *nifp; struct netmap_mem_d *nmd = na->nm_mem; NMA_LOCK(nmd); nifp = nmd->ops->nmd_if_new(na, priv); NMA_UNLOCK(nmd); return nifp; } void netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif) { struct netmap_mem_d *nmd = na->nm_mem; NMA_LOCK(nmd); nmd->ops->nmd_if_delete(na, nif); NMA_UNLOCK(nmd); } int netmap_mem_rings_create(struct netmap_adapter *na) { int rv; struct netmap_mem_d *nmd = na->nm_mem; NMA_LOCK(nmd); rv = nmd->ops->nmd_rings_create(na); NMA_UNLOCK(nmd); return rv; } void netmap_mem_rings_delete(struct netmap_adapter *na) { struct netmap_mem_d *nmd = na->nm_mem; NMA_LOCK(nmd); nmd->ops->nmd_rings_delete(na); NMA_UNLOCK(nmd); } static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); static void nm_mem_release_id(struct netmap_mem_d *); nm_memid_t netmap_mem_get_id(struct netmap_mem_d *nmd) { return nmd->nm_id; } #ifdef NM_DEBUG_MEM_PUTGET #define NM_DBG_REFC(nmd, func, line) \ nm_prinf("%d mem[%d] -> %d", line, (nmd)->nm_id, (nmd)->refcount); #else #define NM_DBG_REFC(nmd, func, line) #endif /* circular list of all existing allocators */ static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; NM_MTX_T nm_mem_list_lock; struct netmap_mem_d * __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) { NM_MTX_LOCK(nm_mem_list_lock); nmd->refcount++; NM_DBG_REFC(nmd, func, line); NM_MTX_UNLOCK(nm_mem_list_lock); return nmd; } void __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) { int last; NM_MTX_LOCK(nm_mem_list_lock); last = (--nmd->refcount == 0); if (last) nm_mem_release_id(nmd); NM_DBG_REFC(nmd, func, line); NM_MTX_UNLOCK(nm_mem_list_lock); if (last) netmap_mem_delete(nmd); } int netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) { int lasterr = 0; if (nm_mem_assign_group(nmd, na->pdev) < 0) { return ENOMEM; } NMA_LOCK(nmd); if (netmap_mem_config(nmd)) goto out; nmd->active++; nmd->lasterr = nmd->ops->nmd_finalize(nmd); if (!nmd->lasterr && na->pdev) { nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); } out: lasterr = nmd->lasterr; NMA_UNLOCK(nmd); if (lasterr) netmap_mem_deref(nmd, na); return lasterr; } static int nm_isset(uint32_t *bitmap, u_int i) { return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); } static int netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) { u_int n, j; if (p->bitmap == NULL) { /* Allocate the bitmap */ n = (p->objtotal + 31) / 32; p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n); if (p->bitmap == NULL) { nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, p->name); return ENOMEM; } p->bitmap_slots = n; } else { memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0])); } p->objfree = 0; /* * Set all the bits in the bitmap that have * corresponding buffers to 1 to indicate they are * free. */ for (j = 0; j < p->objtotal; j++) { if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { if (netmap_debug & NM_DEBUG_MEM) nm_prinf("skipping %s %d", p->name, j); continue; } p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); p->objfree++; } if (netmap_verbose) nm_prinf("%s free %u", p->name, p->objfree); if (p->objfree == 0) { if (netmap_verbose) nm_prerr("%s: no objects available", p->name); return ENOMEM; } return 0; } static int netmap_mem_init_bitmaps(struct netmap_mem_d *nmd) { int i, error = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nmd->pools[i]; error = netmap_init_obj_allocator_bitmap(p); if (error) return error; } /* * buffers 0 and 1 are reserved */ if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name); return ENOMEM; } nmd->pools[NETMAP_BUF_POOL].objfree -= 2; if (nmd->pools[NETMAP_BUF_POOL].bitmap) { /* XXX This check is a workaround that prevents a * NULL pointer crash which currently happens only * with ptnetmap guests. * Removed shared-info --> is the bug still there? */ nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; } return 0; } int netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) { int last_user = 0; NMA_LOCK(nmd); if (na->active_fds <= 0) netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); if (nmd->active == 1) { last_user = 1; /* * Reset the allocator when it falls out of use so that any * pool resources leaked by unclean application exits are * reclaimed. */ netmap_mem_init_bitmaps(nmd); } nmd->ops->nmd_deref(nmd); nmd->active--; if (last_user) { nmd->nm_grp = -1; nmd->lasterr = 0; } NMA_UNLOCK(nmd); return last_user; } /* accessor functions */ static int netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; #ifdef __FreeBSD__ lut->plut = lut->lut; #endif lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; return 0; } static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 2, }, [NETMAP_RING_POOL] = { .size = 5*PAGE_SIZE, .num = 4, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = 4098, }, }; /* * nm_mem is the memory allocator used for all physical interfaces * running in netmap mode. * Virtual (VALE) ports will have each its own allocator. */ extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ struct netmap_mem_d nm_mem = { /* Our memory allocator. */ .pools = { [NETMAP_IF_POOL] = { .name = "netmap_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 10, /* don't be stingy */ .nummax = 10000, /* XXX very large */ }, [NETMAP_RING_POOL] = { .name = "netmap_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "netmap_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .params = { [NETMAP_IF_POOL] = { .size = 1024, .num = 100, }, [NETMAP_RING_POOL] = { .size = 9*PAGE_SIZE, .num = 200, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = NETMAP_BUF_MAX_NUM, }, }, .nm_id = 1, .nm_grp = -1, .prev = &nm_mem, .next = &nm_mem, .ops = &netmap_mem_global_ops, .name = "1" }; /* blueprint for the private memory allocators */ /* XXX clang is not happy about using name as a print format */ static const struct netmap_mem_d nm_blueprint = { .pools = { [NETMAP_IF_POOL] = { .name = "%s_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 1, .nummax = 100, }, [NETMAP_RING_POOL] = { .name = "%s_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "%s_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .nm_grp = -1, .flags = NETMAP_MEM_PRIVATE, .ops = &netmap_mem_global_ops, }; /* memory allocator related sysctls */ #define STRINGIFY(x) #x #define DECLARE_SYSCTLS(id, name) \ SYSBEGIN(mem2_ ## name); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ "Default size of private netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ "Default number of private netmap " STRINGIFY(name) "s"); \ SYSEND SYSCTL_DECL(_dev_netmap); DECLARE_SYSCTLS(NETMAP_IF_POOL, if); DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); /* call with nm_mem_list_lock held */ static int nm_mem_assign_id_locked(struct netmap_mem_d *nmd) { nm_memid_t id; struct netmap_mem_d *scan = netmap_last_mem_d; int error = ENOMEM; do { /* we rely on unsigned wrap around */ id = scan->nm_id + 1; if (id == 0) /* reserve 0 as error value */ id = 1; scan = scan->next; if (id != scan->nm_id) { nmd->nm_id = id; nmd->prev = scan->prev; nmd->next = scan; scan->prev->next = nmd; scan->prev = nmd; netmap_last_mem_d = nmd; nmd->refcount = 1; NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); error = 0; break; } } while (scan != netmap_last_mem_d); return error; } /* call with nm_mem_list_lock *not* held */ static int nm_mem_assign_id(struct netmap_mem_d *nmd) { int ret; NM_MTX_LOCK(nm_mem_list_lock); ret = nm_mem_assign_id_locked(nmd); NM_MTX_UNLOCK(nm_mem_list_lock); return ret; } /* call with nm_mem_list_lock held */ static void nm_mem_release_id(struct netmap_mem_d *nmd) { nmd->prev->next = nmd->next; nmd->next->prev = nmd->prev; if (netmap_last_mem_d == nmd) netmap_last_mem_d = nmd->prev; nmd->prev = nmd->next = NULL; } struct netmap_mem_d * netmap_mem_find(nm_memid_t id) { struct netmap_mem_d *nmd; NM_MTX_LOCK(nm_mem_list_lock); nmd = netmap_last_mem_d; do { if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { nmd->refcount++; NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); NM_MTX_UNLOCK(nm_mem_list_lock); return nmd; } nmd = nmd->next; } while (nmd != netmap_last_mem_d); NM_MTX_UNLOCK(nm_mem_list_lock); return NULL; } static int nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) { int err = 0, id; id = nm_iommu_group_id(dev); if (netmap_debug & NM_DEBUG_MEM) nm_prinf("iommu_group %d", id); NMA_LOCK(nmd); if (nmd->nm_grp < 0) nmd->nm_grp = id; if (nmd->nm_grp != id) { if (netmap_verbose) nm_prerr("iommu group mismatch: %u vs %u", nmd->nm_grp, id); nmd->lasterr = err = ENOMEM; } NMA_UNLOCK(nmd); return err; } static struct lut_entry * nm_alloc_lut(u_int nobj) { size_t n = sizeof(struct lut_entry) * nobj; struct lut_entry *lut; #ifdef linux lut = vmalloc(n); #else lut = nm_os_malloc(n); #endif return lut; } static void nm_free_lut(struct lut_entry *lut, u_int objtotal) { bzero(lut, sizeof(struct lut_entry) * objtotal); #ifdef linux vfree(lut); #else nm_os_free(lut); #endif } #if defined(linux) || defined(_WIN32) static struct plut_entry * nm_alloc_plut(u_int nobj) { size_t n = sizeof(struct plut_entry) * nobj; struct plut_entry *lut; lut = vmalloc(n); return lut; } static void nm_free_plut(struct plut_entry * lut) { vfree(lut); } #endif /* linux or _WIN32 */ /* * First, find the allocator that contains the requested offset, * then locate the cluster through a lookup table. */ static vm_paddr_t netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) { int i; vm_ooffset_t o = offset; vm_paddr_t pa; struct netmap_obj_pool *p; p = nmd->pools; for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { if (offset >= p[i].memtotal) continue; // now lookup the cluster's address #ifndef _WIN32 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + offset % p[i]._objsize; #else pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); pa.QuadPart += offset % p[i]._objsize; #endif return pa; } /* this is only in case of errors */ nm_prerr("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, p[NETMAP_IF_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal + p[NETMAP_BUF_POOL].memtotal); #ifndef _WIN32 return 0; /* bad address */ #else vm_paddr_t res; res.QuadPart = 0; return res; #endif } #ifdef _WIN32 /* * win32_build_virtual_memory_for_userspace * * This function get all the object making part of the pools and maps * a contiguous virtual memory space for the userspace * It works this way * 1 - allocate a Memory Descriptor List wide as the sum * of the memory needed for the pools * 2 - cycle all the objects in every pool and for every object do * * 2a - cycle all the objects in every pool, get the list * of the physical address descriptors * 2b - calculate the offset in the array of pages desciptor in the * main MDL * 2c - copy the descriptors of the object in the main MDL * * 3 - return the resulting MDL that needs to be mapped in userland * * In this way we will have an MDL that describes all the memory for the * objects in a single object */ PMDL win32_build_user_vm_map(struct netmap_mem_d* nmd) { u_int memflags, ofs = 0; PMDL mainMdl, tempMdl; uint64_t memsize; int i, j; if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { nm_prerr("memory not finalised yet"); return NULL; } mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); if (mainMdl == NULL) { nm_prerr("failed to allocate mdl"); return NULL; } NMA_LOCK(nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nmd->pools[i]; int clsz = p->_clustsize; int clobjs = p->_clustentries; /* objects per cluster */ int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); PPFN_NUMBER pSrc, pDst; /* each pool has a different cluster size so we need to reallocate */ tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); if (tempMdl == NULL) { NMA_UNLOCK(nmd); nm_prerr("fail to allocate tempMdl"); IoFreeMdl(mainMdl); return NULL; } pSrc = MmGetMdlPfnArray(tempMdl); /* create one entry per cluster, the lut[] has one entry per object */ for (j = 0; j < p->numclusters; j++, ofs += clsz) { pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ } IoFreeMdl(tempMdl); } NMA_UNLOCK(nmd); return mainMdl; } #endif /* _WIN32 */ /* * helper function for OS-specific mmap routines (currently only windows). * Given an nmd and a pool index, returns the cluster size and number of clusters. * Returns 0 if memory is finalised and the pool is valid, otherwise 1. * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. */ int netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) { if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) return 1; /* invalid arguments */ // NMA_LOCK_ASSERT(nmd); if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { *clustsize = *numclusters = 0; return 1; /* not ready yet */ } *clustsize = nmd->pools[pool]._clustsize; *numclusters = nmd->pools[pool].numclusters; return 0; /* success */ } static int netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, u_int *memflags, nm_memid_t *id) { int error = 0; error = netmap_mem_config(nmd); if (error) goto out; if (size) { if (nmd->flags & NETMAP_MEM_FINALIZED) { *size = nmd->nm_totalsize; } else { int i; *size = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = nmd->pools + i; *size += (p->_numclusters * p->_clustsize); } } } if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: return error; } /* * we store objects by kernel address, need to find the offset * within the pool to export the value to userspace. * Algorithm: scan until we find the cluster, then add the * actual offset in the cluster */ static ssize_t netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) { int i, k = p->_clustentries, n = p->objtotal; ssize_t ofs = 0; for (i = 0; i < n; i += k, ofs += p->_clustsize) { const char *base = p->lut[i].vaddr; ssize_t relofs = (const char *) vaddr - base; if (relofs < 0 || relofs >= p->_clustsize) continue; ofs = ofs + relofs; - ND("%s: return offset %d (cluster %d) for pointer %p", + nm_prdis("%s: return offset %d (cluster %d) for pointer %p", p->name, ofs, i, vaddr); return ofs; } nm_prerr("address %p is not contained inside any cluster (%s)", vaddr, p->name); return 0; /* An error occurred */ } /* Helper functions which convert virtual addresses to offsets */ #define netmap_if_offset(n, v) \ netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) static ssize_t netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) { return netmap_if_offset(nmd, addr); } /* * report the index, and use start position as a hint, * otherwise buffer allocation becomes terribly expensive. */ static void * netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) { uint32_t i = 0; /* index in the bitmap */ uint32_t mask, j = 0; /* slot counter */ void *vaddr = NULL; if (len > p->_objsize) { nm_prerr("%s request size %d too large", p->name, len); return NULL; } if (p->objfree == 0) { nm_prerr("no more %s objects", p->name); return NULL; } if (start) i = *start; /* termination is guaranteed by p->free, but better check bounds on i */ while (vaddr == NULL && i < p->bitmap_slots) { uint32_t cur = p->bitmap[i]; if (cur == 0) { /* bitmask is fully used */ i++; continue; } /* locate a slot */ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; p->bitmap[i] &= ~mask; /* mark object as in use */ p->objfree--; vaddr = p->lut[i * 32 + j].vaddr; if (index) *index = i * 32 + j; } - ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); + nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); if (start) *start = i; return vaddr; } /* * free by index, not by address. * XXX should we also cleanup the content ? */ static int netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) { uint32_t *ptr, mask; if (j >= p->objtotal) { nm_prerr("invalid index %u, max %u", j, p->objtotal); return 1; } ptr = &p->bitmap[j / 32]; mask = (1 << (j % 32)); if (*ptr & mask) { nm_prerr("ouch, double free on buffer %d", j); return 1; } else { *ptr |= mask; p->objfree++; return 0; } } /* * free by address. This is slow but is only used for a few * objects (rings, nifp) */ static void netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) { u_int i, j, n = p->numclusters; for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { void *base = p->lut[i * p->_clustentries].vaddr; ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; /* Given address, is out of the scope of the current cluster.*/ if (base == NULL || vaddr < base || relofs >= p->_clustsize) continue; j = j + relofs / p->_objsize; /* KASSERT(j != 0, ("Cannot free object 0")); */ netmap_obj_free(p, j); return; } nm_prerr("address %p is not contained inside any cluster (%s)", vaddr, p->name); } unsigned netmap_mem_bufsize(struct netmap_mem_d *nmd) { return nmd->pools[NETMAP_BUF_POOL]._objsize; } #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) #define netmap_buf_malloc(n, _pos, _index) \ netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) #if 0 /* currently unused */ /* Return the index associated to the given packet buffer */ #define netmap_buf_index(n, v) \ (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) #endif /* * allocate extra buffers in a linked list. * returns the actual number. */ uint32_t netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) { struct netmap_mem_d *nmd = na->nm_mem; uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ NMA_LOCK(nmd); *head = 0; /* default, 'null' index ie empty list */ for (i = 0 ; i < n; i++) { uint32_t cur = *head; /* save current head */ uint32_t *p = netmap_buf_malloc(nmd, &pos, head); if (p == NULL) { nm_prerr("no more buffers after %d of %d", i, n); *head = cur; /* restore */ break; } - ND(5, "allocate buffer %d -> %d", *head, cur); + nm_prdis(5, "allocate buffer %d -> %d", *head, cur); *p = cur; /* link to previous head */ } NMA_UNLOCK(nmd); return i; } static void netmap_extra_free(struct netmap_adapter *na, uint32_t head) { struct lut_entry *lut = na->na_lut.lut; struct netmap_mem_d *nmd = na->nm_mem; struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; uint32_t i, cur, *buf; - ND("freeing the extra list"); + nm_prdis("freeing the extra list"); for (i = 0; head >=2 && head < p->objtotal; i++) { cur = head; buf = lut[head].vaddr; head = *buf; *buf = 0; if (netmap_obj_free(p, cur)) break; } if (head != 0) nm_prerr("breaking with head %d", head); if (netmap_debug & NM_DEBUG_MEM) nm_prinf("freed %d buffers", i); } /* Return nonzero on error */ static int netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i = 0; /* slot counter */ uint32_t pos = 0; /* slot in p->bitmap */ uint32_t index = 0; /* buffer index */ for (i = 0; i < n; i++) { void *vaddr = netmap_buf_malloc(nmd, &pos, &index); if (vaddr == NULL) { nm_prerr("no more buffers after %d of %d", i, n); goto cleanup; } slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; slot[i].ptr = 0; } - ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); + nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); return (0); cleanup: while (i > 0) { i--; netmap_obj_free(p, slot[i].buf_idx); } bzero(slot, n * sizeof(slot[0])); return (ENOMEM); } static void netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i; for (i = 0; i < n; i++) { slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; } } static void netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; if (i < 2 || i >= p->objtotal) { nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); return; } netmap_obj_free(p, i); } static void netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { u_int i; for (i = 0; i < n; i++) { if (slot[i].buf_idx > 1) netmap_free_buf(nmd, slot[i].buf_idx); } - ND("%s: released some buffers, available: %u", + nm_prdis("%s: released some buffers, available: %u", p->name, p->objfree); } static void netmap_reset_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; if (p->bitmap) nm_os_free(p->bitmap); p->bitmap = NULL; if (p->invalid_bitmap) nm_os_free(p->invalid_bitmap); p->invalid_bitmap = NULL; if (!p->alloc_done) { /* allocation was done by somebody else. * Let them clean up after themselves. */ return; } if (p->lut) { u_int i; /* * Free each cluster allocated in * netmap_finalize_obj_allocator(). The cluster start * addresses are stored at multiples of p->_clusterentries * in the lut. */ for (i = 0; i < p->objtotal; i += p->_clustentries) { contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); } nm_free_lut(p->lut, p->objtotal); } p->lut = NULL; p->objtotal = 0; p->memtotal = 0; p->numclusters = 0; p->objfree = 0; p->alloc_done = 0; } /* * Free all resources related to an allocator. */ static void netmap_destroy_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; netmap_reset_obj_allocator(p); } /* * We receive a request for objtotal objects, of size objsize each. * Internally we may round up both numbers, as we allocate objects * in small clusters multiple of the page size. * We need to keep track of objtotal and clustentries, * as they are needed when freeing memory. * * XXX note -- userspace needs the buffers to be contiguous, * so we cannot afford gaps at the end of a cluster. */ /* call with NMA_LOCK held */ static int netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) { int i; u_int clustsize; /* the cluster size, multiple of page size */ u_int clustentries; /* how many objects per entry */ /* we store the current request, so we can * detect configuration changes later */ p->r_objtotal = objtotal; p->r_objsize = objsize; #define MAX_CLUSTSIZE (1<<22) // 4 MB #define LINE_ROUND NM_CACHE_ALIGN // 64 if (objsize >= MAX_CLUSTSIZE) { /* we could do it but there is no point */ nm_prerr("unsupported allocation for %d bytes", objsize); return EINVAL; } /* make sure objsize is a multiple of LINE_ROUND */ i = (objsize & (LINE_ROUND - 1)); if (i) { nm_prinf("aligning object by %d bytes", LINE_ROUND - i); objsize += LINE_ROUND - i; } if (objsize < p->objminsize || objsize > p->objmaxsize) { nm_prerr("requested objsize %d out of range [%d, %d]", objsize, p->objminsize, p->objmaxsize); return EINVAL; } if (objtotal < p->nummin || objtotal > p->nummax) { nm_prerr("requested objtotal %d out of range [%d, %d]", objtotal, p->nummin, p->nummax); return EINVAL; } /* * Compute number of objects using a brute-force approach: * given a max cluster size, * we try to fill it with objects keeping track of the * wasted space to the next page boundary. */ for (clustentries = 0, i = 1;; i++) { u_int delta, used = i * objsize; if (used > MAX_CLUSTSIZE) break; delta = used % PAGE_SIZE; if (delta == 0) { // exact solution clustentries = i; break; } } /* exact solution not found */ if (clustentries == 0) { nm_prerr("unsupported allocation for %d bytes", objsize); return EINVAL; } /* compute clustsize */ clustsize = clustentries * objsize; if (netmap_debug & NM_DEBUG_MEM) nm_prinf("objsize %d clustsize %d objects %d", objsize, clustsize, clustentries); /* * The number of clusters is n = ceil(objtotal/clustentries) * objtotal' = n * clustentries */ p->_clustentries = clustentries; p->_clustsize = clustsize; p->_numclusters = (objtotal + clustentries - 1) / clustentries; /* actual values (may be larger than requested) */ p->_objsize = objsize; p->_objtotal = p->_numclusters * clustentries; return 0; } /* call with NMA_LOCK held */ static int netmap_finalize_obj_allocator(struct netmap_obj_pool *p) { int i; /* must be signed */ size_t n; if (p->lut) { /* if the lut is already there we assume that also all the * clusters have already been allocated, possibily by somebody * else (e.g., extmem). In the latter case, the alloc_done flag * will remain at zero, so that we will not attempt to * deallocate the clusters by ourselves in * netmap_reset_obj_allocator. */ return 0; } /* optimistically assume we have enough memory */ p->numclusters = p->_numclusters; p->objtotal = p->_objtotal; p->alloc_done = 1; p->lut = nm_alloc_lut(p->objtotal); if (p->lut == NULL) { nm_prerr("Unable to create lookup table for '%s'", p->name); goto clean; } /* * Allocate clusters, init pointers */ n = p->_clustsize; for (i = 0; i < (int)p->objtotal;) { int lim = i + p->_clustentries; char *clust; /* * XXX Note, we only need contigmalloc() for buffers attached * to native interfaces. In all other cases (nifp, netmap rings * and even buffers for VALE ports or emulated interfaces) we * can live with standard malloc, because the hardware will not * access the pages directly. */ clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (clust == NULL) { /* * If we get here, there is a severe memory shortage, * so halve the allocated memory to reclaim some. */ nm_prerr("Unable to create cluster at %d for '%s' allocator", i, p->name); if (i < 2) /* nothing to halve */ goto out; lim = i / 2; for (i--; i >= lim; i--) { if (i % p->_clustentries == 0 && p->lut[i].vaddr) contigfree(p->lut[i].vaddr, n, M_NETMAP); p->lut[i].vaddr = NULL; } out: p->objtotal = i; /* we may have stopped in the middle of a cluster */ p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; break; } /* * Set lut state for all buffers in the current cluster. * * [i, lim) is the set of buffer indexes that cover the * current cluster. * * 'clust' is really the address of the current buffer in * the current cluster as we index through it with a stride * of p->_objsize. */ for (; i < lim; i++, clust += p->_objsize) { p->lut[i].vaddr = clust; #if !defined(linux) && !defined(_WIN32) p->lut[i].paddr = vtophys(clust); #endif } } p->memtotal = p->numclusters * p->_clustsize; if (netmap_verbose) nm_prinf("Pre-allocated %d clusters (%d/%dKB) for '%s'", p->numclusters, p->_clustsize >> 10, p->memtotal >> 10, p->name); return 0; clean: netmap_reset_obj_allocator(p); return ENOMEM; } /* call with lock held */ static int netmap_mem_params_changed(struct netmap_obj_params* p) { int i, rv = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { p[i].last_size = p[i].size; p[i].last_num = p[i].num; rv = 1; } } return rv; } static void netmap_mem_reset_all(struct netmap_mem_d *nmd) { int i; if (netmap_debug & NM_DEBUG_MEM) nm_prinf("resetting %p", nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } static int netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) { int i, lim = p->objtotal; struct netmap_lut *lut = &na->na_lut; if (na == NULL || na->pdev == NULL) return 0; #if defined(__FreeBSD__) /* On FreeBSD mapping and unmapping is performed by the txsync * and rxsync routine, packet by packet. */ (void)i; (void)lim; (void)lut; #elif defined(_WIN32) (void)i; (void)lim; (void)lut; nm_prerr("unsupported on Windows"); #else /* linux */ - ND("unmapping and freeing plut for %s", na->name); + nm_prdis("unmapping and freeing plut for %s", na->name); if (lut->plut == NULL) return 0; for (i = 0; i < lim; i += p->_clustentries) { if (lut->plut[i].paddr) netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); } nm_free_plut(lut->plut); lut->plut = NULL; #endif /* linux */ return 0; } static int netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) { int error = 0; int i, lim = p->objtotal; struct netmap_lut *lut = &na->na_lut; if (na->pdev == NULL) return 0; #if defined(__FreeBSD__) /* On FreeBSD mapping and unmapping is performed by the txsync * and rxsync routine, packet by packet. */ (void)i; (void)lim; (void)lut; #elif defined(_WIN32) (void)i; (void)lim; (void)lut; nm_prerr("unsupported on Windows"); #else /* linux */ if (lut->plut != NULL) { - ND("plut already allocated for %s", na->name); + nm_prdis("plut already allocated for %s", na->name); return 0; } - ND("allocating physical lut for %s", na->name); + nm_prdis("allocating physical lut for %s", na->name); lut->plut = nm_alloc_plut(lim); if (lut->plut == NULL) { nm_prerr("Failed to allocate physical lut for %s", na->name); return ENOMEM; } for (i = 0; i < lim; i += p->_clustentries) { lut->plut[i].paddr = 0; } for (i = 0; i < lim; i += p->_clustentries) { int j; if (p->lut[i].vaddr == NULL) continue; error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->lut[i].vaddr, p->_clustsize); if (error) { nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name); break; } for (j = 1; j < p->_clustentries; j++) { lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; } } if (error) netmap_mem_unmap(p, na); #endif /* linux */ return error; } static int netmap_mem_finalize_all(struct netmap_mem_d *nmd) { int i; if (nmd->flags & NETMAP_MEM_FINALIZED) return 0; nmd->lasterr = 0; nmd->nm_totalsize = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); if (nmd->lasterr) goto error; nmd->nm_totalsize += nmd->pools[i].memtotal; } nmd->lasterr = netmap_mem_init_bitmaps(nmd); if (nmd->lasterr) goto error; nmd->flags |= NETMAP_MEM_FINALIZED; if (netmap_verbose) nm_prinf("interfaces %d KB, rings %d KB, buffers %d MB", nmd->pools[NETMAP_IF_POOL].memtotal >> 10, nmd->pools[NETMAP_RING_POOL].memtotal >> 10, nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); if (netmap_verbose) nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); return 0; error: netmap_mem_reset_all(nmd); return nmd->lasterr; } /* * allocator for private memory */ static void * _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, struct netmap_mem_ops *ops, int *perr) { struct netmap_mem_d *d = NULL; int i, err = 0; d = nm_os_malloc(size); if (d == NULL) { err = ENOMEM; goto error; } *d = nm_blueprint; d->ops = ops; err = nm_mem_assign_id(d); if (err) goto error_free; snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); for (i = 0; i < NETMAP_POOLS_NR; i++) { snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, nm_blueprint.pools[i].name, d->name); d->params[i].num = p[i].num; d->params[i].size = p[i].size; } NMA_LOCK_INIT(d); err = netmap_mem_config(d); if (err) goto error_rel_id; d->flags &= ~NETMAP_MEM_FINALIZED; return d; error_rel_id: NMA_LOCK_DESTROY(d); nm_mem_release_id(d); error_free: nm_os_free(d); error: if (perr) *perr = err; return NULL; } struct netmap_mem_d * netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr) { struct netmap_mem_d *d = NULL; struct netmap_obj_params p[NETMAP_POOLS_NR]; int i; u_int v, maxd; /* account for the fake host rings */ txr++; rxr++; /* copy the min values */ for (i = 0; i < NETMAP_POOLS_NR; i++) { p[i] = netmap_min_priv_params[i]; } /* possibly increase them to fit user request */ v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); if (p[NETMAP_IF_POOL].size < v) p[NETMAP_IF_POOL].size = v; v = 2 + 4 * npipes; if (p[NETMAP_IF_POOL].num < v) p[NETMAP_IF_POOL].num = v; maxd = (txd > rxd) ? txd : rxd; v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; if (p[NETMAP_RING_POOL].size < v) p[NETMAP_RING_POOL].size = v; /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) * and two rx rings (again, 1 normal and 1 fake host) */ v = txr + rxr + 8 * npipes; if (p[NETMAP_RING_POOL].num < v) p[NETMAP_RING_POOL].num = v; /* for each pipe we only need the buffers for the 4 "real" rings. * On the other end, the pipe ring dimension may be different from * the parent port ring dimension. As a compromise, we allocate twice the * space actually needed if the pipe rings were the same size as the parent rings */ v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ if (p[NETMAP_BUF_POOL].num < v) p[NETMAP_BUF_POOL].num = v; if (netmap_verbose) nm_prinf("req if %d*%d ring %d*%d buf %d*%d", p[NETMAP_IF_POOL].num, p[NETMAP_IF_POOL].size, p[NETMAP_RING_POOL].num, p[NETMAP_RING_POOL].size, p[NETMAP_BUF_POOL].num, p[NETMAP_BUF_POOL].size); d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr); return d; } /* call with lock held */ static int netmap_mem2_config(struct netmap_mem_d *nmd) { int i; if (!netmap_mem_params_changed(nmd->params)) goto out; - ND("reconfiguring"); + nm_prdis("reconfiguring"); if (nmd->flags & NETMAP_MEM_FINALIZED) { /* reset previous allocation */ for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], nmd->params[i].num, nmd->params[i].size); if (nmd->lasterr) goto out; } out: return nmd->lasterr; } static int netmap_mem2_finalize(struct netmap_mem_d *nmd) { if (nmd->flags & NETMAP_MEM_FINALIZED) goto out; if (netmap_mem_finalize_all(nmd)) goto out; nmd->lasterr = 0; out: return nmd->lasterr; } static void netmap_mem2_delete(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_destroy_obj_allocator(&nmd->pools[i]); } NMA_LOCK_DESTROY(nmd); if (nmd != &nm_mem) nm_os_free(nmd); } #ifdef WITH_EXTMEM /* doubly linekd list of all existing external allocators */ static struct netmap_mem_ext *netmap_mem_ext_list = NULL; NM_MTX_T nm_mem_ext_list_lock; #endif /* WITH_EXTMEM */ int netmap_mem_init(void) { NM_MTX_INIT(nm_mem_list_lock); NMA_LOCK_INIT(&nm_mem); netmap_mem_get(&nm_mem); #ifdef WITH_EXTMEM NM_MTX_INIT(nm_mem_ext_list_lock); #endif /* WITH_EXTMEM */ return (0); } void netmap_mem_fini(void) { netmap_mem_put(&nm_mem); } static void netmap_free_rings(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < netmap_all_rings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { if (netmap_debug & NM_DEBUG_MEM) nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)", kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); continue; } if (netmap_debug & NM_DEBUG_MEM) nm_prinf("deleting ring %s", kring->name); if (!(kring->nr_kflags & NKR_FAKERING)) { - ND("freeing bufs for %s", kring->name); + nm_prdis("freeing bufs for %s", kring->name); netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); } else { - ND("NOT freeing bufs for %s", kring->name); + nm_prdis("NOT freeing bufs for %s", kring->name); } netmap_ring_free(na->nm_mem, ring); kring->ring = NULL; } } } /* call with NMA_LOCK held * * * Allocate netmap rings and buffers for this card * The rings are contiguous, but have variable size. * The kring array must follow the layout described * in netmap_krings_create(). */ static int netmap_mem2_rings_create(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < netmap_all_rings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; u_int len, ndesc; if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { /* uneeded, or already created by somebody else */ if (netmap_debug & NM_DEBUG_MEM) nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)", kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); continue; } if (netmap_debug & NM_DEBUG_MEM) nm_prinf("creating %s", kring->name); ndesc = kring->nkr_num_slots; len = sizeof(struct netmap_ring) + ndesc * sizeof(struct netmap_slot); ring = netmap_ring_malloc(na->nm_mem, len); if (ring == NULL) { nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t)); goto cleanup; } - ND("txring at %p", ring); + nm_prdis("txring at %p", ring); kring->ring = ring; *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; *(int64_t *)(uintptr_t)&ring->buf_ofs = (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - netmap_ring_offset(na->nm_mem, ring); /* copy values from kring */ ring->head = kring->rhead; ring->cur = kring->rcur; ring->tail = kring->rtail; *(uint32_t *)(uintptr_t)&ring->nr_buf_size = netmap_mem_bufsize(na->nm_mem); - ND("%s h %d c %d t %d", kring->name, + nm_prdis("%s h %d c %d t %d", kring->name, ring->head, ring->cur, ring->tail); - ND("initializing slots for %s_ring", nm_txrx2str(t)); + nm_prdis("initializing slots for %s_ring", nm_txrx2str(t)); if (!(kring->nr_kflags & NKR_FAKERING)) { /* this is a real ring */ if (netmap_debug & NM_DEBUG_MEM) nm_prinf("allocating buffers for %s", kring->name); if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); goto cleanup; } } else { /* this is a fake ring, set all indices to 0 */ if (netmap_debug & NM_DEBUG_MEM) nm_prinf("NOT allocating buffers for %s", kring->name); netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); } /* ring info */ *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; } } return 0; cleanup: /* we cannot actually cleanup here, since we don't own kring->users * and kring->nr_klags & NKR_NEEDRING. The caller must decrement * the first or zero-out the second, then call netmap_free_rings() * to do the cleanup */ return ENOMEM; } static void netmap_mem2_rings_delete(struct netmap_adapter *na) { /* last instance, release bufs and rings */ netmap_free_rings(na); } /* call with NMA_LOCK held */ /* * Allocate the per-fd structure netmap_if. * * We assume that the configuration stored in na * (number of tx/rx rings and descs) does not change while * the interface is in netmap mode. */ static struct netmap_if * netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) { struct netmap_if *nifp; ssize_t base; /* handy for relative offsets between rings and nifp */ u_int i, len, n[NR_TXRX], ntot; enum txrx t; ntot = 0; for_rx_tx(t) { /* account for the (eventually fake) host rings */ n[t] = netmap_all_rings(na, t); ntot += n[t]; } /* * the descriptor is followed inline by an array of offsets * to the tx and rx rings in the shared memory region. */ len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); nifp = netmap_if_malloc(na->nm_mem, len); if (nifp == NULL) { NMA_UNLOCK(na->nm_mem); return NULL; } /* initialize base fields -- override const */ *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name)); /* * fill the slots for the rx and tx rings. They contain the offset * between the ring and nifp, so the information is usable in * userspace to reach the ring from the nifp. */ base = netmap_if_offset(na->nm_mem, nifp); for (i = 0; i < n[NR_TX]; i++) { /* XXX instead of ofs == 0 maybe use the offset of an error * ring, like we do for buffers? */ ssize_t ofs = 0; if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] && i < priv->np_qlast[NR_TX]) { ofs = netmap_ring_offset(na->nm_mem, na->tx_rings[i]->ring) - base; } *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; } for (i = 0; i < n[NR_RX]; i++) { /* XXX instead of ofs == 0 maybe use the offset of an error * ring, like we do for buffers? */ ssize_t ofs = 0; if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] && i < priv->np_qlast[NR_RX]) { ofs = netmap_ring_offset(na->nm_mem, na->rx_rings[i]->ring) - base; } *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; } return (nifp); } static void netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { if (nifp == NULL) /* nothing to do */ return; if (nifp->ni_bufs_head) netmap_extra_free(na, nifp->ni_bufs_head); netmap_if_free(na->nm_mem, nifp); } static void netmap_mem2_deref(struct netmap_mem_d *nmd) { if (netmap_debug & NM_DEBUG_MEM) nm_prinf("active = %d", nmd->active); } struct netmap_mem_ops netmap_mem_global_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem2_config, .nmd_finalize = netmap_mem2_finalize, .nmd_deref = netmap_mem2_deref, .nmd_delete = netmap_mem2_delete, .nmd_if_offset = netmap_mem2_if_offset, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; int netmap_mem_pools_info_get(struct nmreq_pools_info *req, struct netmap_mem_d *nmd) { int ret; ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, &req->nr_mem_id); if (ret) { return ret; } NMA_LOCK(nmd); req->nr_if_pool_offset = 0; req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + nmd->pools[NETMAP_RING_POOL].memtotal; req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; NMA_UNLOCK(nmd); return 0; } #ifdef WITH_EXTMEM struct netmap_mem_ext { struct netmap_mem_d up; struct nm_os_extmem *os; struct netmap_mem_ext *next, *prev; }; /* call with nm_mem_list_lock held */ static void netmap_mem_ext_register(struct netmap_mem_ext *e) { NM_MTX_LOCK(nm_mem_ext_list_lock); if (netmap_mem_ext_list) netmap_mem_ext_list->prev = e; e->next = netmap_mem_ext_list; netmap_mem_ext_list = e; e->prev = NULL; NM_MTX_UNLOCK(nm_mem_ext_list_lock); } /* call with nm_mem_list_lock held */ static void netmap_mem_ext_unregister(struct netmap_mem_ext *e) { if (e->prev) e->prev->next = e->next; else netmap_mem_ext_list = e->next; if (e->next) e->next->prev = e->prev; e->prev = e->next = NULL; } static struct netmap_mem_ext * netmap_mem_ext_search(struct nm_os_extmem *os) { struct netmap_mem_ext *e; NM_MTX_LOCK(nm_mem_ext_list_lock); for (e = netmap_mem_ext_list; e; e = e->next) { if (nm_os_extmem_isequal(e->os, os)) { netmap_mem_get(&e->up); break; } } NM_MTX_UNLOCK(nm_mem_ext_list_lock); return e; } static void netmap_mem_ext_delete(struct netmap_mem_d *d) { int i; struct netmap_mem_ext *e = (struct netmap_mem_ext *)d; netmap_mem_ext_unregister(e); for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &d->pools[i]; if (p->lut) { nm_free_lut(p->lut, p->objtotal); p->lut = NULL; } } if (e->os) nm_os_extmem_delete(e->os); netmap_mem2_delete(d); } static int netmap_mem_ext_config(struct netmap_mem_d *nmd) { return 0; } struct netmap_mem_ops netmap_mem_ext_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem_ext_config, .nmd_finalize = netmap_mem2_finalize, .nmd_deref = netmap_mem2_deref, .nmd_delete = netmap_mem_ext_delete, .nmd_if_offset = netmap_mem2_if_offset, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; struct netmap_mem_d * netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror) { int error = 0; int i, j; struct netmap_mem_ext *nme; char *clust; size_t off; struct nm_os_extmem *os = NULL; int nr_pages; // XXX sanity checks if (pi->nr_if_pool_objtotal == 0) pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; if (pi->nr_if_pool_objsize == 0) pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; if (pi->nr_ring_pool_objtotal == 0) pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; if (pi->nr_ring_pool_objsize == 0) pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; if (pi->nr_buf_pool_objtotal == 0) pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; if (pi->nr_buf_pool_objsize == 0) pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; if (netmap_verbose & NM_DEBUG_MEM) nm_prinf("if %d %d ring %d %d buf %d %d", pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); os = nm_os_extmem_create(usrptr, pi, &error); if (os == NULL) { nm_prerr("os extmem creation failed"); goto out; } nme = netmap_mem_ext_search(os); if (nme) { nm_os_extmem_delete(os); return &nme->up; } if (netmap_verbose & NM_DEBUG_MEM) nm_prinf("not found, creating new"); nme = _netmap_mem_private_new(sizeof(*nme), (struct netmap_obj_params[]){ { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, &netmap_mem_ext_ops, &error); if (nme == NULL) goto out_unmap; nr_pages = nm_os_extmem_nr_pages(os); /* from now on pages will be released by nme destructor; * we let res = 0 to prevent release in out_unmap below */ nme->os = os; os = NULL; /* pass ownership */ clust = nm_os_extmem_nextpage(nme->os); off = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nme->up.pools[i]; struct netmap_obj_params *o = &nme->up.params[i]; p->_objsize = o->size; p->_clustsize = o->size; p->_clustentries = 1; p->lut = nm_alloc_lut(o->num); if (p->lut == NULL) { error = ENOMEM; goto out_delete; } p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); if (p->invalid_bitmap == NULL) { error = ENOMEM; goto out_delete; } if (nr_pages == 0) { p->objtotal = 0; p->memtotal = 0; p->objfree = 0; continue; } for (j = 0; j < o->num && nr_pages > 0; j++) { size_t noff; p->lut[j].vaddr = clust + off; #if !defined(linux) && !defined(_WIN32) p->lut[j].paddr = vtophys(p->lut[j].vaddr); #endif - ND("%s %d at %p", p->name, j, p->lut[j].vaddr); + nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr); noff = off + p->_objsize; if (noff < PAGE_SIZE) { off = noff; continue; } - ND("too big, recomputing offset..."); + nm_prdis("too big, recomputing offset..."); while (noff >= PAGE_SIZE) { char *old_clust = clust; noff -= PAGE_SIZE; clust = nm_os_extmem_nextpage(nme->os); nr_pages--; - ND("noff %zu page %p nr_pages %d", noff, + nm_prdis("noff %zu page %p nr_pages %d", noff, page_to_virt(*pages), nr_pages); if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && (nr_pages == 0 || old_clust + PAGE_SIZE != clust)) { /* out of space or non contiguous, * drop this object * */ p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); - ND("non contiguous at off %zu, drop", noff); + nm_prdis("non contiguous at off %zu, drop", noff); } if (nr_pages == 0) break; } off = noff; } p->objtotal = j; p->numclusters = p->objtotal; p->memtotal = j * p->_objsize; - ND("%d memtotal %u", j, p->memtotal); + nm_prdis("%d memtotal %u", j, p->memtotal); } netmap_mem_ext_register(nme); return &nme->up; out_delete: netmap_mem_put(&nme->up); out_unmap: if (os) nm_os_extmem_delete(os); out: if (perror) *perror = error; return NULL; } #endif /* WITH_EXTMEM */ #ifdef WITH_PTNETMAP struct mem_pt_if { struct mem_pt_if *next; struct ifnet *ifp; unsigned int nifp_offset; }; /* Netmap allocator for ptnetmap guests. */ struct netmap_mem_ptg { struct netmap_mem_d up; vm_paddr_t nm_paddr; /* physical address in the guest */ void *nm_addr; /* virtual address in the guest */ struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ nm_memid_t host_mem_id; /* allocator identifier in the host */ struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ }; /* Link a passthrough interface to a passthrough netmap allocator. */ static int netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, unsigned int nifp_offset) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); if (!ptif) { return ENOMEM; } NMA_LOCK(nmd); ptif->ifp = ifp; ptif->nifp_offset = nifp_offset; if (ptnmd->pt_ifs) { ptif->next = ptnmd->pt_ifs; } ptnmd->pt_ifs = ptif; NMA_UNLOCK(nmd); nm_prinf("ifp=%s,nifp_offset=%u", ptif->ifp->if_xname, ptif->nifp_offset); return 0; } /* Called with NMA_LOCK(nmd) held. */ static struct mem_pt_if * netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *curr; for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { if (curr->ifp == ifp) { return curr; } } return NULL; } /* Unlink a passthrough interface from a passthrough netmap allocator. */ int netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *prev = NULL; struct mem_pt_if *curr; int ret = -1; NMA_LOCK(nmd); for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { if (curr->ifp == ifp) { if (prev) { prev->next = curr->next; } else { ptnmd->pt_ifs = curr->next; } - D("removed (ifp=%p,nifp_offset=%u)", + nm_prinf("removed (ifp=%p,nifp_offset=%u)", curr->ifp, curr->nifp_offset); nm_os_free(curr); ret = 0; break; } prev = curr; } NMA_UNLOCK(nmd); return ret; } static int netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { return EINVAL; } *lut = ptnmd->buf_lut; return 0; } static int netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, u_int *memflags, uint16_t *id) { int error = 0; error = nmd->ops->nmd_config(nmd); if (error) goto out; if (size) *size = nmd->nm_totalsize; if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: return error; } static vm_paddr_t netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; vm_paddr_t paddr; /* if the offset is valid, just return csb->base_addr + off */ paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); - ND("off %lx padr %lx", off, (unsigned long)paddr); + nm_prdis("off %lx padr %lx", off, (unsigned long)paddr); return paddr; } static int netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) { /* nothing to do, we are configured on creation * and configuration never changes thereafter */ return 0; } static int netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; uint64_t mem_size; uint32_t bufsize; uint32_t nbuffers; uint32_t poolofs; vm_paddr_t paddr; char *vaddr; int i; int error = 0; if (nmd->flags & NETMAP_MEM_FINALIZED) goto out; if (ptnmd->ptn_dev == NULL) { - D("ptnetmap memdev not attached"); + nm_prerr("ptnetmap memdev not attached"); error = ENOMEM; goto out; } /* Map memory through ptnetmap-memdev BAR. */ error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, &ptnmd->nm_addr, &mem_size); if (error) goto out; /* Initialize the lut using the information contained in the * ptnetmap memory device. */ bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OBJSZ); nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OBJNUM); /* allocate the lut */ if (ptnmd->buf_lut.lut == NULL) { - D("allocating lut"); + nm_prinf("allocating lut"); ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); if (ptnmd->buf_lut.lut == NULL) { - D("lut allocation failed"); + nm_prerr("lut allocation failed"); return ENOMEM; } } /* we have physically contiguous memory mapped through PCI BAR */ poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OFS); vaddr = (char *)(ptnmd->nm_addr) + poolofs; paddr = ptnmd->nm_paddr + poolofs; for (i = 0; i < nbuffers; i++) { ptnmd->buf_lut.lut[i].vaddr = vaddr; vaddr += bufsize; paddr += bufsize; } ptnmd->buf_lut.objtotal = nbuffers; ptnmd->buf_lut.objsize = bufsize; nmd->nm_totalsize = (unsigned int)mem_size; /* Initialize these fields as are needed by * netmap_mem_bufsize(). * XXX please improve this, why do we need this * replication? maybe we nmd->pools[] should no be * there for the guest allocator? */ nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; nmd->flags |= NETMAP_MEM_FINALIZED; out: return error; } static void netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; if (nmd->active == 1 && (nmd->flags & NETMAP_MEM_FINALIZED)) { nmd->flags &= ~NETMAP_MEM_FINALIZED; /* unmap ptnetmap-memdev memory */ if (ptnmd->ptn_dev) { nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); } ptnmd->nm_addr = NULL; ptnmd->nm_paddr = 0; } } static ssize_t netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); } static void netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) { if (nmd == NULL) return; if (netmap_verbose) - D("deleting %p", nmd); + nm_prinf("deleting %p", nmd); if (nmd->active > 0) - D("bug: deleting mem allocator with active=%d!", nmd->active); + nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active); if (netmap_verbose) - D("done deleting %p", nmd); + nm_prinf("done deleting %p", nmd); NMA_LOCK_DESTROY(nmd); nm_os_free(nmd); } static struct netmap_if * netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif; struct netmap_if *nifp = NULL; ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { - D("Error: interface %p is not in passthrough", na->ifp); + nm_prerr("interface %s is not in passthrough", na->name); goto out; } nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + ptif->nifp_offset); out: return nifp; } static void netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { struct mem_pt_if *ptif; ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { - D("Error: interface %p is not in passthrough", na->ifp); + nm_prerr("interface %s is not in passthrough", na->name); } } static int netmap_mem_pt_guest_rings_create(struct netmap_adapter *na) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif; struct netmap_if *nifp; int i, error = -1; ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { - D("Error: interface %p is not in passthrough", na->ifp); + nm_prerr("interface %s is not in passthrough", na->name); goto out; } /* point each kring to the corresponding backend ring */ nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); for (i = 0; i < netmap_all_rings(na, NR_TX); i++) { struct netmap_kring *kring = na->tx_rings[i]; if (kring->ring) continue; kring->ring = (struct netmap_ring *) ((char *)nifp + nifp->ring_ofs[i]); } for (i = 0; i < netmap_all_rings(na, NR_RX); i++) { struct netmap_kring *kring = na->rx_rings[i]; if (kring->ring) continue; kring->ring = (struct netmap_ring *) ((char *)nifp + nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]); } error = 0; out: return error; } static void netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) { #if 0 enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; kring->ring = NULL; } } #endif } static struct netmap_mem_ops netmap_mem_pt_guest_ops = { .nmd_get_lut = netmap_mem_pt_guest_get_lut, .nmd_get_info = netmap_mem_pt_guest_get_info, .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, .nmd_config = netmap_mem_pt_guest_config, .nmd_finalize = netmap_mem_pt_guest_finalize, .nmd_deref = netmap_mem_pt_guest_deref, .nmd_if_offset = netmap_mem_pt_guest_if_offset, .nmd_delete = netmap_mem_pt_guest_delete, .nmd_if_new = netmap_mem_pt_guest_if_new, .nmd_if_delete = netmap_mem_pt_guest_if_delete, .nmd_rings_create = netmap_mem_pt_guest_rings_create, .nmd_rings_delete = netmap_mem_pt_guest_rings_delete }; /* Called with nm_mem_list_lock held. */ static struct netmap_mem_d * netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) { struct netmap_mem_d *mem = NULL; struct netmap_mem_d *scan = netmap_last_mem_d; do { /* find ptnetmap allocator through host ID */ if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { mem = scan; mem->refcount++; NM_DBG_REFC(mem, __FUNCTION__, __LINE__); break; } scan = scan->next; } while (scan != netmap_last_mem_d); return mem; } /* Called with nm_mem_list_lock held. */ static struct netmap_mem_d * netmap_mem_pt_guest_create(nm_memid_t mem_id) { struct netmap_mem_ptg *ptnmd; int err = 0; ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); if (ptnmd == NULL) { err = ENOMEM; goto error; } ptnmd->up.ops = &netmap_mem_pt_guest_ops; ptnmd->host_mem_id = mem_id; ptnmd->pt_ifs = NULL; /* Assign new id in the guest (We have the lock) */ err = nm_mem_assign_id_locked(&ptnmd->up); if (err) goto error; ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; ptnmd->up.flags |= NETMAP_MEM_IO; NMA_LOCK_INIT(&ptnmd->up); snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); return &ptnmd->up; error: netmap_mem_pt_guest_delete(&ptnmd->up); return NULL; } /* * find host id in guest allocators and create guest allocator * if it is not there */ static struct netmap_mem_d * netmap_mem_pt_guest_get(nm_memid_t mem_id) { struct netmap_mem_d *nmd; NM_MTX_LOCK(nm_mem_list_lock); nmd = netmap_mem_pt_guest_find_memid(mem_id); if (nmd == NULL) { nmd = netmap_mem_pt_guest_create(mem_id); } NM_MTX_UNLOCK(nm_mem_list_lock); return nmd; } /* * The guest allocator can be created by ptnetmap_memdev (during the device * attach) or by ptnetmap device (ptnet), during the netmap_attach. * * The order is not important (we have different order in LINUX and FreeBSD). * The first one, creates the device, and the second one simply attaches it. */ /* Called when ptnetmap_memdev is attaching, to attach a new allocator in * the guest */ struct netmap_mem_d * netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) { struct netmap_mem_d *nmd; struct netmap_mem_ptg *ptnmd; nmd = netmap_mem_pt_guest_get(mem_id); /* assign this device to the guest allocator */ if (nmd) { ptnmd = (struct netmap_mem_ptg *)nmd; ptnmd->ptn_dev = ptn_dev; } return nmd; } /* Called when ptnet device is attaching */ struct netmap_mem_d * netmap_mem_pt_guest_new(struct ifnet *ifp, unsigned int nifp_offset, unsigned int memid) { struct netmap_mem_d *nmd; if (ifp == NULL) { return NULL; } nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); if (nmd) { netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); } return nmd; } #endif /* WITH_PTNETMAP */ Index: stable/12/sys/dev/netmap/netmap_monitor.c =================================================================== --- stable/12/sys/dev/netmap/netmap_monitor.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_monitor.c (revision 344046) @@ -1,1045 +1,1044 @@ /* * Copyright (C) 2014-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Monitors * * netmap monitors can be used to do monitoring of network traffic * on another adapter, when the latter adapter is working in netmap mode. * * Monitors offer to userspace the same interface as any other netmap port, * with as many pairs of netmap rings as the monitored adapter. * However, only the rx rings are actually used. Each monitor rx ring receives * the traffic transiting on both the tx and rx corresponding rings in the * monitored adapter. During registration, the user can choose if she wants * to intercept tx only, rx only, or both tx and rx traffic. * * If the monitor is not able to cope with the stream of frames, excess traffic * will be dropped. * * If the monitored adapter leaves netmap mode, the monitor has to be restarted. * * Monitors can be either zero-copy or copy-based. * * Copy monitors see the frames before they are consumed: * * - For tx traffic, this is when the application sends them, before they are * passed down to the adapter. * * - For rx traffic, this is when they are received by the adapter, before * they are sent up to the application, if any (note that, if no * application is reading from a monitored ring, the ring will eventually * fill up and traffic will stop). * * Zero-copy monitors only see the frames after they have been consumed: * * - For tx traffic, this is after the slots containing the frames have been * marked as free. Note that this may happen at a considerably delay after * frame transmission, since freeing of slots is often done lazily. * * - For rx traffic, this is after the consumer on the monitored adapter * has released them. In most cases, the consumer is a userspace * application which may have modified the frame contents. * * Several copy or zero-copy monitors may be active on any ring. * */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_MONITOR #define NM_MONITOR_MAXSLOTS 4096 /* ******************************************************************** * functions common to both kind of monitors ******************************************************************** */ static int netmap_zmon_reg(struct netmap_adapter *, int); static int nm_is_zmon(struct netmap_adapter *na) { return na->nm_register == netmap_zmon_reg; } /* nm_sync callback for the monitor's own tx rings. * This makes no sense and always returns error */ static int netmap_monitor_txsync(struct netmap_kring *kring, int flags) { - RD(1, "%s %x", kring->name, flags); + nm_prlim(1, "%s %x", kring->name, flags); return EIO; } /* nm_sync callback for the monitor's own rx rings. * Note that the lock in netmap_zmon_parent_sync only protects * writers among themselves. Synchronization between writers * (i.e., netmap_zmon_parent_txsync and netmap_zmon_parent_rxsync) * and readers (i.e., netmap_zmon_rxsync) relies on memory barriers. */ static int netmap_monitor_rxsync(struct netmap_kring *kring, int flags) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)kring->na; if (unlikely(mna->priv.np_na == NULL)) { /* parent left netmap mode */ return EIO; } - ND("%s %x", kring->name, flags); + nm_prdis("%s %x", kring->name, flags); kring->nr_hwcur = kring->rhead; mb(); return 0; } /* nm_krings_create callbacks for monitors. */ static int netmap_monitor_krings_create(struct netmap_adapter *na) { int error = netmap_krings_create(na, 0); enum txrx t; if (error) return error; /* override the host rings callbacks */ for_rx_tx(t) { int i; u_int first = nma_get_nrings(na, t); for (i = 0; i < nma_get_host_nrings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[first + i]; kring->nm_sync = t == NR_TX ? netmap_monitor_txsync : netmap_monitor_rxsync; } } return 0; } /* nm_krings_delete callback for monitors */ static void netmap_monitor_krings_delete(struct netmap_adapter *na) { netmap_krings_delete(na); } static u_int nm_txrx2flag(enum txrx t) { return (t == NR_RX ? NR_MONITOR_RX : NR_MONITOR_TX); } /* allocate the monitors array in the monitored kring */ static int nm_monitor_alloc(struct netmap_kring *kring, u_int n) { size_t old_len, len; struct netmap_kring **nm; if (n <= kring->max_monitors) /* we already have more entries that requested */ return 0; old_len = sizeof(struct netmap_kring *)*kring->max_monitors; len = sizeof(struct netmap_kring *) * n; nm = nm_os_realloc(kring->monitors, len, old_len); if (nm == NULL) return ENOMEM; kring->monitors = nm; kring->max_monitors = n; return 0; } /* deallocate the parent array in the parent adapter */ static void nm_monitor_dealloc(struct netmap_kring *kring) { if (kring->monitors) { if (kring->n_monitors > 0) { - D("freeing not empty monitor array for %s (%d dangling monitors)!", kring->name, - kring->n_monitors); + nm_prerr("freeing not empty monitor array for %s (%d dangling monitors)!", + kring->name, kring->n_monitors); } nm_os_free(kring->monitors); kring->monitors = NULL; kring->max_monitors = 0; kring->n_monitors = 0; } } /* returns 1 iff kring has no monitors */ static inline int nm_monitor_none(struct netmap_kring *kring) { return kring->n_monitors == 0 && kring->zmon_list[NR_TX].next == NULL && kring->zmon_list[NR_RX].next == NULL; } /* * monitors work by replacing the nm_sync() and possibly the * nm_notify() callbacks in the monitored rings. */ static int netmap_zmon_parent_txsync(struct netmap_kring *, int); static int netmap_zmon_parent_rxsync(struct netmap_kring *, int); static int netmap_monitor_parent_txsync(struct netmap_kring *, int); static int netmap_monitor_parent_rxsync(struct netmap_kring *, int); static int netmap_monitor_parent_notify(struct netmap_kring *, int); static int nm_monitor_dummycb(struct netmap_kring *kring, int flags) { (void)kring; (void)flags; return 0; } static void nm_monitor_intercept_callbacks(struct netmap_kring *kring) { - ND("intercept callbacks on %s", kring->name); + nm_prdis("intercept callbacks on %s", kring->name); kring->mon_sync = kring->nm_sync != NULL ? kring->nm_sync : nm_monitor_dummycb; kring->mon_notify = kring->nm_notify; if (kring->tx == NR_TX) { kring->nm_sync = netmap_monitor_parent_txsync; } else { kring->nm_sync = netmap_monitor_parent_rxsync; kring->nm_notify = netmap_monitor_parent_notify; kring->mon_tail = kring->nr_hwtail; } } static void nm_monitor_restore_callbacks(struct netmap_kring *kring) { - ND("restoring callbacks on %s", kring->name); + nm_prdis("restoring callbacks on %s", kring->name); kring->nm_sync = kring->mon_sync; kring->mon_sync = NULL; if (kring->tx == NR_RX) { kring->nm_notify = kring->mon_notify; } kring->mon_notify = NULL; } static struct netmap_kring * nm_zmon_list_head(struct netmap_kring *mkring, enum txrx t) { struct netmap_adapter *na = mkring->na; struct netmap_kring *kring = mkring; struct netmap_zmon_list *z = &kring->zmon_list[t]; /* reach the head of the list */ while (nm_is_zmon(na) && z->prev != NULL) { kring = z->prev; na = kring->na; z = &kring->zmon_list[t]; } return nm_is_zmon(na) ? NULL : kring; } /* add the monitor mkring to the list of monitors of kring. * If this is the first monitor, intercept the callbacks */ static int netmap_monitor_add(struct netmap_kring *mkring, struct netmap_kring *kring, int zmon) { int error = NM_IRQ_COMPLETED; enum txrx t = kring->tx; struct netmap_zmon_list *z = &kring->zmon_list[t]; struct netmap_zmon_list *mz = &mkring->zmon_list[t]; struct netmap_kring *ikring = kring; /* a zero-copy monitor which is not the first in the list * must monitor the previous monitor */ if (zmon && z->prev != NULL) ikring = z->prev; /* tail of the list */ /* synchronize with concurrently running nm_sync()s */ nm_kr_stop(kring, NM_KR_LOCKED); if (nm_monitor_none(ikring)) { /* this is the first monitor, intercept the callbacks */ - ND("%s: intercept callbacks on %s", mkring->name, ikring->name); + nm_prdis("%s: intercept callbacks on %s", mkring->name, ikring->name); nm_monitor_intercept_callbacks(ikring); } if (zmon) { /* append the zmon to the list */ ikring->zmon_list[t].next = mkring; z->prev = mkring; /* new tail */ mz->prev = ikring; mz->next = NULL; /* grab a reference to the previous netmap adapter * in the chain (this may be the monitored port * or another zero-copy monitor) */ netmap_adapter_get(ikring->na); } else { /* make sure the monitor array exists and is big enough */ error = nm_monitor_alloc(kring, kring->n_monitors + 1); if (error) goto out; kring->monitors[kring->n_monitors] = mkring; mkring->mon_pos[kring->tx] = kring->n_monitors; kring->n_monitors++; } out: nm_kr_start(kring); return error; } /* remove the monitor mkring from the list of monitors of kring. * If this is the last monitor, restore the original callbacks */ static void netmap_monitor_del(struct netmap_kring *mkring, struct netmap_kring *kring, enum txrx t) { int zmon = nm_is_zmon(mkring->na); struct netmap_zmon_list *mz = &mkring->zmon_list[t]; struct netmap_kring *ikring = kring; if (zmon) { /* get to the head of the list */ kring = nm_zmon_list_head(mkring, t); ikring = mz->prev; } /* synchronize with concurrently running nm_sync()s * if kring is NULL (orphaned list) the monitored port * has exited netmap mode, so there is nothing to stop */ if (kring != NULL) nm_kr_stop(kring, NM_KR_LOCKED); if (zmon) { /* remove the monitor from the list */ if (mz->next != NULL) { mz->next->zmon_list[t].prev = mz->prev; /* we also need to let the next monitor drop the * reference to us and grab the reference to the * previous ring owner, instead */ if (mz->prev != NULL) netmap_adapter_get(mz->prev->na); netmap_adapter_put(mkring->na); } else if (kring != NULL) { /* in the monitored kring, prev is actually the * pointer to the tail of the list */ kring->zmon_list[t].prev = (mz->prev != kring ? mz->prev : NULL); } if (mz->prev != NULL) { netmap_adapter_put(mz->prev->na); mz->prev->zmon_list[t].next = mz->next; } mz->prev = NULL; mz->next = NULL; } else { /* this is a copy monitor */ uint32_t mon_pos = mkring->mon_pos[kring->tx]; kring->n_monitors--; if (mon_pos != kring->n_monitors) { kring->monitors[mon_pos] = kring->monitors[kring->n_monitors]; kring->monitors[mon_pos]->mon_pos[kring->tx] = mon_pos; } kring->monitors[kring->n_monitors] = NULL; if (kring->n_monitors == 0) { nm_monitor_dealloc(kring); } } if (ikring != NULL && nm_monitor_none(ikring)) { /* this was the last monitor, restore the callbacks */ nm_monitor_restore_callbacks(ikring); } if (kring != NULL) nm_kr_start(kring); } /* This is called when the monitored adapter leaves netmap mode * (see netmap_do_unregif). * We need to notify the monitors that the monitored rings are gone. * We do this by setting their mna->priv.np_na to NULL. * Note that the rings are already stopped when this happens, so * no monitor ring callback can be active. */ void netmap_monitor_stop(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < netmap_all_rings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[i]; struct netmap_zmon_list *z = &kring->zmon_list[t]; u_int j; for (j = 0; j < kring->n_monitors; j++) { struct netmap_kring *mkring = kring->monitors[j]; struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)mkring->na; /* forget about this adapter */ if (mna->priv.np_na != NULL) { netmap_adapter_put(mna->priv.np_na); mna->priv.np_na = NULL; } kring->monitors[j] = NULL; } if (!nm_is_zmon(na)) { /* we are the head of at most one list */ struct netmap_kring *zkring; for (zkring = z->next; zkring != NULL; zkring = zkring->zmon_list[t].next) { struct netmap_monitor_adapter *next = (struct netmap_monitor_adapter *)zkring->na; /* let the monitor forget about us */ netmap_adapter_put(next->priv.np_na); /* nop if null */ next->priv.np_na = NULL; } /* orhpan the zmon list */ if (z->next != NULL) z->next->zmon_list[t].prev = NULL; z->next = NULL; z->prev = NULL; } if (!nm_monitor_none(kring)) { kring->n_monitors = 0; nm_monitor_dealloc(kring); nm_monitor_restore_callbacks(kring); } } } } /* common functions for the nm_register() callbacks of both kind of * monitors. */ static int netmap_monitor_reg_common(struct netmap_adapter *na, int onoff, int zmon) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; struct netmap_kring *kring, *mkring; int i; enum txrx t, s; - ND("%p: onoff %d", na, onoff); + nm_prdis("%p: onoff %d", na, onoff); if (onoff) { if (pna == NULL) { /* parent left netmap mode, fatal */ - D("%s: internal error", na->name); + nm_prerr("%s: parent left netmap mode", na->name); return ENXIO; } for_rx_tx(t) { for (i = 0; i < netmap_all_rings(na, t); i++) { mkring = NMR(na, t)[i]; if (!nm_kring_pending_on(mkring)) continue; mkring->nr_mode = NKR_NETMAP_ON; if (t == NR_TX) continue; for_rx_tx(s) { if (i > nma_get_nrings(pna, s)) continue; if (mna->flags & nm_txrx2flag(s)) { kring = NMR(pna, s)[i]; netmap_monitor_add(mkring, kring, zmon); } } } } na->na_flags |= NAF_NETMAP_ON; } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { for (i = 0; i < netmap_all_rings(na, t); i++) { mkring = NMR(na, t)[i]; if (!nm_kring_pending_off(mkring)) continue; mkring->nr_mode = NKR_NETMAP_OFF; if (t == NR_TX) continue; /* we cannot access the parent krings if the parent * has left netmap mode. This is signaled by a NULL * pna pointer */ if (pna == NULL) continue; for_rx_tx(s) { if (i > nma_get_nrings(pna, s)) continue; if (mna->flags & nm_txrx2flag(s)) { kring = NMR(pna, s)[i]; netmap_monitor_del(mkring, kring, s); } } } } } return 0; } /* **************************************************************** * functions specific for zero-copy monitors **************************************************************** */ /* * Common function for both zero-copy tx and rx nm_sync() * callbacks */ static int netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx) { struct netmap_kring *mkring = kring->zmon_list[tx].next; struct netmap_ring *ring = kring->ring, *mring; int error = 0; int rel_slots, free_slots, busy, sent = 0; u_int beg, end, i; u_int lim = kring->nkr_num_slots - 1, mlim; // = mkring->nkr_num_slots - 1; if (mkring == NULL) { - RD(5, "NULL monitor on %s", kring->name); + nm_prlim(5, "NULL monitor on %s", kring->name); return 0; } mring = mkring->ring; mlim = mkring->nkr_num_slots - 1; /* get the relased slots (rel_slots) */ if (tx == NR_TX) { beg = kring->nr_hwtail + 1; error = kring->mon_sync(kring, flags); if (error) return error; end = kring->nr_hwtail + 1; } else { /* NR_RX */ beg = kring->nr_hwcur; end = kring->rhead; } rel_slots = end - beg; if (rel_slots < 0) rel_slots += kring->nkr_num_slots; if (!rel_slots) { /* no released slots, but we still need * to call rxsync if this is a rx ring */ goto out_rxsync; } /* we need to lock the monitor receive ring, since it * is the target of bot tx and rx traffic from the monitored * adapter */ mtx_lock(&mkring->q_lock); /* get the free slots available on the monitor ring */ i = mkring->nr_hwtail; busy = i - mkring->nr_hwcur; if (busy < 0) busy += mkring->nkr_num_slots; free_slots = mlim - busy; if (!free_slots) goto out; /* swap min(free_slots, rel_slots) slots */ if (free_slots < rel_slots) { beg += (rel_slots - free_slots); rel_slots = free_slots; } if (unlikely(beg >= kring->nkr_num_slots)) beg -= kring->nkr_num_slots; sent = rel_slots; for ( ; rel_slots; rel_slots--) { struct netmap_slot *s = &ring->slot[beg]; struct netmap_slot *ms = &mring->slot[i]; uint32_t tmp; tmp = ms->buf_idx; ms->buf_idx = s->buf_idx; s->buf_idx = tmp; - ND(5, "beg %d buf_idx %d", beg, tmp); + nm_prdis(5, "beg %d buf_idx %d", beg, tmp); tmp = ms->len; ms->len = s->len; s->len = tmp; ms->flags = s->flags; s->flags |= NS_BUF_CHANGED; beg = nm_next(beg, lim); i = nm_next(i, mlim); } mb(); mkring->nr_hwtail = i; out: mtx_unlock(&mkring->q_lock); if (sent) { /* notify the new frames to the monitor */ mkring->nm_notify(mkring, 0); } out_rxsync: if (tx == NR_RX) error = kring->mon_sync(kring, flags); return error; } /* callback used to replace the nm_sync callback in the monitored tx rings */ static int netmap_zmon_parent_txsync(struct netmap_kring *kring, int flags) { return netmap_zmon_parent_sync(kring, flags, NR_TX); } /* callback used to replace the nm_sync callback in the monitored rx rings */ static int netmap_zmon_parent_rxsync(struct netmap_kring *kring, int flags) { return netmap_zmon_parent_sync(kring, flags, NR_RX); } static int netmap_zmon_reg(struct netmap_adapter *na, int onoff) { return netmap_monitor_reg_common(na, onoff, 1 /* zcopy */); } /* nm_dtor callback for monitors */ static void netmap_zmon_dtor(struct netmap_adapter *na) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; netmap_adapter_put(pna); } /* **************************************************************** * functions specific for copy monitors **************************************************************** */ static void netmap_monitor_parent_sync(struct netmap_kring *kring, u_int first_new, int new_slots) { u_int j; for (j = 0; j < kring->n_monitors; j++) { struct netmap_kring *mkring = kring->monitors[j]; u_int i, mlim, beg; int free_slots, busy, sent = 0, m; u_int lim = kring->nkr_num_slots - 1; struct netmap_ring *ring = kring->ring, *mring = mkring->ring; u_int max_len = NETMAP_BUF_SIZE(mkring->na); mlim = mkring->nkr_num_slots - 1; /* we need to lock the monitor receive ring, since it * is the target of bot tx and rx traffic from the monitored * adapter */ mtx_lock(&mkring->q_lock); /* get the free slots available on the monitor ring */ i = mkring->nr_hwtail; busy = i - mkring->nr_hwcur; if (busy < 0) busy += mkring->nkr_num_slots; free_slots = mlim - busy; if (!free_slots) goto out; /* copy min(free_slots, new_slots) slots */ m = new_slots; beg = first_new; if (free_slots < m) { beg += (m - free_slots); if (beg >= kring->nkr_num_slots) beg -= kring->nkr_num_slots; m = free_slots; } for ( ; m; m--) { struct netmap_slot *s = &ring->slot[beg]; struct netmap_slot *ms = &mring->slot[i]; u_int copy_len = s->len; char *src = NMB(kring->na, s), *dst = NMB(mkring->na, ms); if (unlikely(copy_len > max_len)) { - RD(5, "%s->%s: truncating %d to %d", kring->name, + nm_prlim(5, "%s->%s: truncating %d to %d", kring->name, mkring->name, copy_len, max_len); copy_len = max_len; } memcpy(dst, src, copy_len); ms->len = copy_len; ms->flags = s->flags; sent++; beg = nm_next(beg, lim); i = nm_next(i, mlim); } mb(); mkring->nr_hwtail = i; out: mtx_unlock(&mkring->q_lock); if (sent) { /* notify the new frames to the monitor */ mkring->nm_notify(mkring, 0); } } } /* callback used to replace the nm_sync callback in the monitored tx rings */ static int netmap_monitor_parent_txsync(struct netmap_kring *kring, int flags) { u_int first_new; int new_slots; /* get the new slots */ if (kring->n_monitors > 0) { first_new = kring->nr_hwcur; new_slots = kring->rhead - first_new; if (new_slots < 0) new_slots += kring->nkr_num_slots; if (new_slots) netmap_monitor_parent_sync(kring, first_new, new_slots); } if (kring->zmon_list[NR_TX].next != NULL) { return netmap_zmon_parent_txsync(kring, flags); } return kring->mon_sync(kring, flags); } /* callback used to replace the nm_sync callback in the monitored rx rings */ static int netmap_monitor_parent_rxsync(struct netmap_kring *kring, int flags) { u_int first_new; int new_slots, error; /* get the new slots */ if (kring->zmon_list[NR_RX].next != NULL) { error = netmap_zmon_parent_rxsync(kring, flags); } else { error = kring->mon_sync(kring, flags); } if (error) return error; if (kring->n_monitors > 0) { first_new = kring->mon_tail; new_slots = kring->nr_hwtail - first_new; if (new_slots < 0) new_slots += kring->nkr_num_slots; if (new_slots) netmap_monitor_parent_sync(kring, first_new, new_slots); kring->mon_tail = kring->nr_hwtail; } return 0; } /* callback used to replace the nm_notify() callback in the monitored rx rings */ static int netmap_monitor_parent_notify(struct netmap_kring *kring, int flags) { int (*notify)(struct netmap_kring*, int); - ND(5, "%s %x", kring->name, flags); + nm_prdis(5, "%s %x", kring->name, flags); /* ?xsync callbacks have tryget called by their callers * (NIOCREGIF and poll()), but here we have to call it * by ourself */ if (nm_kr_tryget(kring, 0, NULL)) { /* in all cases, just skip the sync */ return NM_IRQ_COMPLETED; } if (kring->n_monitors > 0) { netmap_monitor_parent_rxsync(kring, NAF_FORCE_READ); } if (nm_monitor_none(kring)) { /* we are no longer monitoring this ring, so both * mon_sync and mon_notify are NULL */ notify = kring->nm_notify; } else { notify = kring->mon_notify; } nm_kr_put(kring); return notify(kring, flags); } static int netmap_monitor_reg(struct netmap_adapter *na, int onoff) { return netmap_monitor_reg_common(na, onoff, 0 /* no zcopy */); } static void netmap_monitor_dtor(struct netmap_adapter *na) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; netmap_adapter_put(pna); } /* check if req is a request for a monitor adapter that we can satisfy */ int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct nmreq_register preq; struct netmap_adapter *pna; /* parent adapter */ struct netmap_monitor_adapter *mna; struct ifnet *ifp = NULL; int error; int zcopy = (req->nr_flags & NR_ZCOPY_MON); if (zcopy) { req->nr_flags |= (NR_MONITOR_TX | NR_MONITOR_RX); } if ((req->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)) == 0) { - ND("not a monitor"); + nm_prdis("not a monitor"); return 0; } /* this is a request for a monitor adapter */ - ND("flags %lx", req->nr_flags); + nm_prdis("flags %lx", req->nr_flags); /* First, try to find the adapter that we want to monitor. * We use the same req, after we have turned off the monitor flags. * In this way we can potentially monitor everything netmap understands, * except other monitors. */ memcpy(&preq, req, sizeof(preq)); preq.nr_flags &= ~(NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON); hdr->nr_body = (uintptr_t)&preq; error = netmap_get_na(hdr, &pna, &ifp, nmd, create); hdr->nr_body = (uintptr_t)req; if (error) { - D("parent lookup failed: %d", error); + nm_prerr("parent lookup failed: %d", error); return error; } - ND("found parent: %s", pna->name); + nm_prdis("found parent: %s", pna->name); if (!nm_netmap_on(pna)) { /* parent not in netmap mode */ /* XXX we can wait for the parent to enter netmap mode, * by intercepting its nm_register callback (2014-03-16) */ - D("%s not in netmap mode", pna->name); + nm_prerr("%s not in netmap mode", pna->name); error = EINVAL; goto put_out; } mna = nm_os_malloc(sizeof(*mna)); if (mna == NULL) { - D("memory error"); error = ENOMEM; goto put_out; } mna->priv.np_na = pna; /* grab all the rings we need in the parent */ error = netmap_interp_ringid(&mna->priv, req->nr_mode, req->nr_ringid, req->nr_flags); if (error) { - D("ringid error"); + nm_prerr("ringid error"); goto free_out; } snprintf(mna->up.name, sizeof(mna->up.name), "%s/%s%s%s#%lu", pna->name, zcopy ? "z" : "", (req->nr_flags & NR_MONITOR_RX) ? "r" : "", (req->nr_flags & NR_MONITOR_TX) ? "t" : "", pna->monitor_id++); /* the monitor supports the host rings iff the parent does */ mna->up.na_flags |= (pna->na_flags & NAF_HOST_RINGS); /* a do-nothing txsync: monitors cannot be used to inject packets */ mna->up.nm_txsync = netmap_monitor_txsync; mna->up.nm_rxsync = netmap_monitor_rxsync; mna->up.nm_krings_create = netmap_monitor_krings_create; mna->up.nm_krings_delete = netmap_monitor_krings_delete; mna->up.num_tx_rings = 1; // XXX what should we do here with chained zmons? /* we set the number of our rx_rings to be max(num_rx_rings, num_rx_rings) * in the parent */ mna->up.num_rx_rings = pna->num_rx_rings; if (pna->num_tx_rings > pna->num_rx_rings) mna->up.num_rx_rings = pna->num_tx_rings; /* by default, the number of slots is the same as in * the parent rings, but the user may ask for a different * number */ mna->up.num_tx_desc = req->nr_tx_slots; nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 1, NM_MONITOR_MAXSLOTS, NULL); mna->up.num_rx_desc = req->nr_rx_slots; nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 1, NM_MONITOR_MAXSLOTS, NULL); if (zcopy) { mna->up.nm_register = netmap_zmon_reg; mna->up.nm_dtor = netmap_zmon_dtor; /* to have zero copy, we need to use the same memory allocator * as the monitored port */ mna->up.nm_mem = netmap_mem_get(pna->nm_mem); /* and the allocator cannot be changed */ mna->up.na_flags |= NAF_MEM_OWNER; } else { mna->up.nm_register = netmap_monitor_reg; mna->up.nm_dtor = netmap_monitor_dtor; mna->up.nm_mem = netmap_mem_private_new( mna->up.num_tx_rings, mna->up.num_tx_desc, mna->up.num_rx_rings, mna->up.num_rx_desc, 0, /* extra bufs */ 0, /* pipes */ &error); if (mna->up.nm_mem == NULL) goto put_out; } error = netmap_attach_common(&mna->up); if (error) { - D("attach_common error"); + nm_prerr("netmap_attach_common failed"); goto mem_put_out; } /* remember the traffic directions we have to monitor */ mna->flags = (req->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON)); *na = &mna->up; netmap_adapter_get(*na); /* keep the reference to the parent */ - ND("monitor ok"); + nm_prdis("monitor ok"); /* drop the reference to the ifp, if any */ if (ifp) if_rele(ifp); return 0; mem_put_out: netmap_mem_put(mna->up.nm_mem); free_out: nm_os_free(mna); put_out: netmap_unget_na(pna, ifp); return error; } #endif /* WITH_MONITOR */ Index: stable/12/sys/dev/netmap/netmap_null.c =================================================================== --- stable/12/sys/dev/netmap/netmap_null.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_null.c (revision 344046) @@ -1,184 +1,170 @@ /* * Copyright (C) 2018 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_NMNULL static int -netmap_null_txsync(struct netmap_kring *kring, int flags) +netmap_null_sync(struct netmap_kring *kring, int flags) { (void)kring; (void)flags; return 0; } static int -netmap_null_rxsync(struct netmap_kring *kring, int flags) -{ - (void)kring; - (void)flags; - return 0; -} - -static int netmap_null_krings_create(struct netmap_adapter *na) { return netmap_krings_create(na, 0); } -static void -netmap_null_krings_delete(struct netmap_adapter *na) -{ - netmap_krings_delete(na); -} - static int netmap_null_reg(struct netmap_adapter *na, int onoff) { if (na->active_fds == 0) { if (onoff) na->na_flags |= NAF_NETMAP_ON; else na->na_flags &= ~NAF_NETMAP_ON; } return 0; } static int netmap_null_bdg_attach(const char *name, struct netmap_adapter *na, struct nm_bridge *b) { (void)name; (void)na; (void)b; return EINVAL; } int netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct netmap_null_adapter *nna; int error; if (req->nr_mode != NR_REG_NULL) { nm_prdis("not a null port"); return 0; } if (!create) { nm_prerr("null ports cannot be re-opened"); return EINVAL; } if (nmd == NULL) { nm_prerr("null ports must use an existing allocator"); return EINVAL; } nna = nm_os_malloc(sizeof(*nna)); if (nna == NULL) { error = ENOMEM; goto err; } snprintf(nna->up.name, sizeof(nna->up.name), "null:%s", hdr->nr_name); - nna->up.nm_txsync = netmap_null_txsync; - nna->up.nm_rxsync = netmap_null_rxsync; + nna->up.nm_txsync = netmap_null_sync; + nna->up.nm_rxsync = netmap_null_sync; nna->up.nm_register = netmap_null_reg; nna->up.nm_krings_create = netmap_null_krings_create; - nna->up.nm_krings_delete = netmap_null_krings_delete; + nna->up.nm_krings_delete = netmap_krings_delete; nna->up.nm_bdg_attach = netmap_null_bdg_attach; nna->up.nm_mem = netmap_mem_get(nmd); nna->up.num_tx_rings = req->nr_tx_rings; nna->up.num_rx_rings = req->nr_rx_rings; nna->up.num_tx_desc = req->nr_tx_slots; nna->up.num_rx_desc = req->nr_rx_slots; error = netmap_attach_common(&nna->up); if (error) goto free_nna; *na = &nna->up; netmap_adapter_get(*na); nm_prdis("created null %s", nna->up.name); return 0; free_nna: nm_os_free(nna); err: return error; } #endif /* WITH_NMNULL */ Index: stable/12/sys/dev/netmap/netmap_offloadings.c =================================================================== --- stable/12/sys/dev/netmap/netmap_offloadings.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_offloadings.c (revision 344046) @@ -1,492 +1,492 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2014-2015 Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include /* struct socket */ #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #else #error Unsupported platform #endif /* unsupported */ #include #include /* This routine is called by bdg_mismatch_datapath() when it finishes * accumulating bytes for a segment, in order to fix some fields in the * segment headers (which still contain the same content as the header * of the original GSO packet). 'pkt' points to the beginning of the IP * header of the segment, while 'len' is the length of the IP packet. */ static void gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp, u_int idx, u_int segmented_bytes, u_int last_segment) { struct nm_iphdr *iph = (struct nm_iphdr *)(pkt); struct nm_ipv6hdr *ip6h = (struct nm_ipv6hdr *)(pkt); uint16_t *check = NULL; uint8_t *check_data = NULL; if (ipv4) { /* Set the IPv4 "Total Length" field. */ iph->tot_len = htobe16(len); - ND("ip total length %u", be16toh(ip->tot_len)); + nm_prdis("ip total length %u", be16toh(ip->tot_len)); /* Set the IPv4 "Identification" field. */ iph->id = htobe16(be16toh(iph->id) + idx); - ND("ip identification %u", be16toh(iph->id)); + nm_prdis("ip identification %u", be16toh(iph->id)); /* Compute and insert the IPv4 header checksum. */ iph->check = 0; iph->check = nm_os_csum_ipv4(iph); - ND("IP csum %x", be16toh(iph->check)); + nm_prdis("IP csum %x", be16toh(iph->check)); } else { /* Set the IPv6 "Payload Len" field. */ ip6h->payload_len = htobe16(len-iphlen); } if (tcp) { struct nm_tcphdr *tcph = (struct nm_tcphdr *)(pkt + iphlen); /* Set the TCP sequence number. */ tcph->seq = htobe32(be32toh(tcph->seq) + segmented_bytes); - ND("tcp seq %u", be32toh(tcph->seq)); + nm_prdis("tcp seq %u", be32toh(tcph->seq)); /* Zero the PSH and FIN TCP flags if this is not the last segment. */ if (!last_segment) tcph->flags &= ~(0x8 | 0x1); - ND("last_segment %u", last_segment); + nm_prdis("last_segment %u", last_segment); check = &tcph->check; check_data = (uint8_t *)tcph; } else { /* UDP */ struct nm_udphdr *udph = (struct nm_udphdr *)(pkt + iphlen); /* Set the UDP 'Length' field. */ udph->len = htobe16(len-iphlen); check = &udph->check; check_data = (uint8_t *)udph; } /* Compute and insert TCP/UDP checksum. */ *check = 0; if (ipv4) nm_os_csum_tcpudp_ipv4(iph, check_data, len-iphlen, check); else nm_os_csum_tcpudp_ipv6(ip6h, check_data, len-iphlen, check); - ND("TCP/UDP csum %x", be16toh(*check)); + nm_prdis("TCP/UDP csum %x", be16toh(*check)); } static inline int vnet_hdr_is_bad(struct nm_vnet_hdr *vh) { uint8_t gso_type = vh->gso_type & ~VIRTIO_NET_HDR_GSO_ECN; return ( (gso_type != VIRTIO_NET_HDR_GSO_NONE && gso_type != VIRTIO_NET_HDR_GSO_TCPV4 && gso_type != VIRTIO_NET_HDR_GSO_UDP && gso_type != VIRTIO_NET_HDR_GSO_TCPV6) || (vh->flags & ~(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) ); } /* The VALE mismatch datapath implementation. */ void bdg_mismatch_datapath(struct netmap_vp_adapter *na, struct netmap_vp_adapter *dst_na, const struct nm_bdg_fwd *ft_p, struct netmap_ring *dst_ring, u_int *j, u_int lim, u_int *howmany) { struct netmap_slot *dst_slot = NULL; struct nm_vnet_hdr *vh = NULL; const struct nm_bdg_fwd *ft_end = ft_p + ft_p->ft_frags; /* Source and destination pointers. */ uint8_t *dst, *src; size_t src_len, dst_len; /* Indices and counters for the destination ring. */ u_int j_start = *j; u_int j_cur = j_start; u_int dst_slots = 0; if (unlikely(ft_p == ft_end)) { - RD(1, "No source slots to process"); + nm_prlim(1, "No source slots to process"); return; } /* Init source and dest pointers. */ src = ft_p->ft_buf; src_len = ft_p->ft_len; dst_slot = &dst_ring->slot[j_cur]; dst = NMB(&dst_na->up, dst_slot); dst_len = src_len; /* If the source port uses the offloadings, while destination doesn't, * we grab the source virtio-net header and do the offloadings here. */ if (na->up.virt_hdr_len && !dst_na->up.virt_hdr_len) { vh = (struct nm_vnet_hdr *)src; /* Initial sanity check on the source virtio-net header. If * something seems wrong, just drop the packet. */ if (src_len < na->up.virt_hdr_len) { - RD(1, "Short src vnet header, dropping"); + nm_prlim(1, "Short src vnet header, dropping"); return; } if (unlikely(vnet_hdr_is_bad(vh))) { - RD(1, "Bad src vnet header, dropping"); + nm_prlim(1, "Bad src vnet header, dropping"); return; } } /* We are processing the first input slot and there is a mismatch * between source and destination virt_hdr_len (SHL and DHL). * When the a client is using virtio-net headers, the header length * can be: * - 10: the header corresponds to the struct nm_vnet_hdr * - 12: the first 10 bytes correspond to the struct * virtio_net_hdr, and the last 2 bytes store the * "mergeable buffers" info, which is an optional * hint that can be zeroed for compatibility * * The destination header is therefore built according to the * following table: * * SHL | DHL | destination header * ----------------------------- * 0 | 10 | zero * 0 | 12 | zero * 10 | 0 | doesn't exist * 10 | 12 | first 10 bytes are copied from source header, last 2 are zero * 12 | 0 | doesn't exist * 12 | 10 | copied from the first 10 bytes of source header */ bzero(dst, dst_na->up.virt_hdr_len); if (na->up.virt_hdr_len && dst_na->up.virt_hdr_len) memcpy(dst, src, sizeof(struct nm_vnet_hdr)); /* Skip the virtio-net headers. */ src += na->up.virt_hdr_len; src_len -= na->up.virt_hdr_len; dst += dst_na->up.virt_hdr_len; dst_len = dst_na->up.virt_hdr_len + src_len; /* Here it could be dst_len == 0 (which implies src_len == 0), * so we avoid passing a zero length fragment. */ if (dst_len == 0) { ft_p++; src = ft_p->ft_buf; src_len = ft_p->ft_len; dst_len = src_len; } if (vh && vh->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u_int gso_bytes = 0; /* Length of the GSO packet header. */ u_int gso_hdr_len = 0; /* Pointer to the GSO packet header. Assume it is in a single fragment. */ uint8_t *gso_hdr = NULL; /* Index of the current segment. */ u_int gso_idx = 0; /* Payload data bytes segmented so far (e.g. TCP data bytes). */ u_int segmented_bytes = 0; /* Is this an IPv4 or IPv6 GSO packet? */ u_int ipv4 = 0; /* Length of the IP header (20 if IPv4, 40 if IPv6). */ u_int iphlen = 0; /* Length of the Ethernet header (18 if 802.1q, otherwise 14). */ u_int ethhlen = 14; /* Is this a TCP or an UDP GSO packet? */ u_int tcp = ((vh->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) == VIRTIO_NET_HDR_GSO_UDP) ? 0 : 1; /* Segment the GSO packet contained into the input slots (frags). */ for (;;) { size_t copy; if (dst_slots >= *howmany) { /* We still have work to do, but we've run out of * dst slots, so we have to drop the packet. */ - ND(1, "Not enough slots, dropping GSO packet"); + nm_prdis(1, "Not enough slots, dropping GSO packet"); return; } /* Grab the GSO header if we don't have it. */ if (!gso_hdr) { uint16_t ethertype; gso_hdr = src; /* Look at the 'Ethertype' field to see if this packet * is IPv4 or IPv6, taking into account VLAN * encapsulation. */ for (;;) { if (src_len < ethhlen) { - RD(1, "Short GSO fragment [eth], dropping"); + nm_prlim(1, "Short GSO fragment [eth], dropping"); return; } ethertype = be16toh(*((uint16_t *) (gso_hdr + ethhlen - 2))); if (ethertype != 0x8100) /* not 802.1q */ break; ethhlen += 4; } switch (ethertype) { case 0x0800: /* IPv4 */ { struct nm_iphdr *iph = (struct nm_iphdr *) (gso_hdr + ethhlen); if (src_len < ethhlen + 20) { - RD(1, "Short GSO fragment " + nm_prlim(1, "Short GSO fragment " "[IPv4], dropping"); return; } ipv4 = 1; iphlen = 4 * (iph->version_ihl & 0x0F); break; } case 0x86DD: /* IPv6 */ ipv4 = 0; iphlen = 40; break; default: - RD(1, "Unsupported ethertype, " + nm_prlim(1, "Unsupported ethertype, " "dropping GSO packet"); return; } - ND(3, "type=%04x", ethertype); + nm_prdis(3, "type=%04x", ethertype); if (src_len < ethhlen + iphlen) { - RD(1, "Short GSO fragment [IP], dropping"); + nm_prlim(1, "Short GSO fragment [IP], dropping"); return; } /* Compute gso_hdr_len. For TCP we need to read the * content of the 'Data Offset' field. */ if (tcp) { struct nm_tcphdr *tcph = (struct nm_tcphdr *) (gso_hdr + ethhlen + iphlen); if (src_len < ethhlen + iphlen + 20) { - RD(1, "Short GSO fragment " + nm_prlim(1, "Short GSO fragment " "[TCP], dropping"); return; } gso_hdr_len = ethhlen + iphlen + 4 * (tcph->doff >> 4); } else { gso_hdr_len = ethhlen + iphlen + 8; /* UDP */ } if (src_len < gso_hdr_len) { - RD(1, "Short GSO fragment [TCP/UDP], dropping"); + nm_prlim(1, "Short GSO fragment [TCP/UDP], dropping"); return; } - ND(3, "gso_hdr_len %u gso_mtu %d", gso_hdr_len, + nm_prdis(3, "gso_hdr_len %u gso_mtu %d", gso_hdr_len, dst_na->mfs); /* Advance source pointers. */ src += gso_hdr_len; src_len -= gso_hdr_len; if (src_len == 0) { ft_p++; if (ft_p == ft_end) break; src = ft_p->ft_buf; src_len = ft_p->ft_len; } } /* Fill in the header of the current segment. */ if (gso_bytes == 0) { memcpy(dst, gso_hdr, gso_hdr_len); gso_bytes = gso_hdr_len; } /* Fill in data and update source and dest pointers. */ copy = src_len; if (gso_bytes + copy > dst_na->mfs) copy = dst_na->mfs - gso_bytes; memcpy(dst + gso_bytes, src, copy); gso_bytes += copy; src += copy; src_len -= copy; /* A segment is complete or we have processed all the the GSO payload bytes. */ if (gso_bytes >= dst_na->mfs || (src_len == 0 && ft_p + 1 == ft_end)) { /* After raw segmentation, we must fix some header * fields and compute checksums, in a protocol dependent * way. */ gso_fix_segment(dst + ethhlen, gso_bytes - ethhlen, ipv4, iphlen, tcp, gso_idx, segmented_bytes, src_len == 0 && ft_p + 1 == ft_end); - ND("frame %u completed with %d bytes", gso_idx, (int)gso_bytes); + nm_prdis("frame %u completed with %d bytes", gso_idx, (int)gso_bytes); dst_slot->len = gso_bytes; dst_slot->flags = 0; dst_slots++; segmented_bytes += gso_bytes - gso_hdr_len; gso_bytes = 0; gso_idx++; /* Next destination slot. */ j_cur = nm_next(j_cur, lim); dst_slot = &dst_ring->slot[j_cur]; dst = NMB(&dst_na->up, dst_slot); } /* Next input slot. */ if (src_len == 0) { ft_p++; if (ft_p == ft_end) break; src = ft_p->ft_buf; src_len = ft_p->ft_len; } } - ND(3, "%d bytes segmented", segmented_bytes); + nm_prdis(3, "%d bytes segmented", segmented_bytes); } else { /* Address of a checksum field into a destination slot. */ uint16_t *check = NULL; /* Accumulator for an unfolded checksum. */ rawsum_t csum = 0; /* Process a non-GSO packet. */ /* Init 'check' if necessary. */ if (vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { if (unlikely(vh->csum_offset + vh->csum_start > src_len)) - D("invalid checksum request"); + nm_prerr("invalid checksum request"); else check = (uint16_t *)(dst + vh->csum_start + vh->csum_offset); } while (ft_p != ft_end) { /* Init/update the packet checksum if needed. */ if (vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { if (!dst_slots) csum = nm_os_csum_raw(src + vh->csum_start, src_len - vh->csum_start, 0); else csum = nm_os_csum_raw(src, src_len, csum); } /* Round to a multiple of 64 */ src_len = (src_len + 63) & ~63; if (ft_p->ft_flags & NS_INDIRECT) { if (copyin(src, dst, src_len)) { /* Invalid user pointer, pretend len is 0. */ dst_len = 0; } } else { memcpy(dst, src, (int)src_len); } dst_slot->len = dst_len; dst_slots++; /* Next destination slot. */ j_cur = nm_next(j_cur, lim); dst_slot = &dst_ring->slot[j_cur]; dst = NMB(&dst_na->up, dst_slot); /* Next source slot. */ ft_p++; src = ft_p->ft_buf; dst_len = src_len = ft_p->ft_len; } /* Finalize (fold) the checksum if needed. */ if (check && vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { *check = nm_os_csum_fold(csum); } - ND(3, "using %u dst_slots", dst_slots); + nm_prdis(3, "using %u dst_slots", dst_slots); /* A second pass on the destination slots to set the slot flags, * using the right number of destination slots. */ while (j_start != j_cur) { dst_slot = &dst_ring->slot[j_start]; dst_slot->flags = (dst_slots << 8)| NS_MOREFRAG; j_start = nm_next(j_start, lim); } /* Clear NS_MOREFRAG flag on last entry. */ dst_slot->flags = (dst_slots << 8); } /* Update howmany and j. This is to commit the use of * those slots in the destination ring. */ if (unlikely(dst_slots > *howmany)) { - D("Slot allocation error: This is a bug"); + nm_prerr("bug: slot allocation error"); } *j = j_cur; *howmany -= dst_slots; } Index: stable/12/sys/dev/netmap/netmap_pipe.c =================================================================== --- stable/12/sys/dev/netmap/netmap_pipe.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_pipe.c (revision 344046) @@ -1,843 +1,864 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2014-2018 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_PIPES #define NM_PIPE_MAXSLOTS 4096 #define NM_PIPE_MAXRINGS 256 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */ SYSBEGIN(vars_pipes); SYSCTL_DECL(_dev_netmap); SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW, &netmap_default_pipes, 0, "For compatibility only"); SYSEND; /* allocate the pipe array in the parent adapter */ static int nm_pipe_alloc(struct netmap_adapter *na, u_int npipes) { size_t old_len, len; struct netmap_pipe_adapter **npa; if (npipes <= na->na_max_pipes) /* we already have more entries that requested */ return 0; if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES) return EINVAL; old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes; len = sizeof(struct netmap_pipe_adapter *) * npipes; npa = nm_os_realloc(na->na_pipes, len, old_len); if (npa == NULL) return ENOMEM; na->na_pipes = npa; na->na_max_pipes = npipes; return 0; } /* deallocate the parent array in the parent adapter */ void netmap_pipe_dealloc(struct netmap_adapter *na) { if (na->na_pipes) { if (na->na_next_pipe > 0) { - D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name, - na->na_next_pipe); + nm_prerr("freeing not empty pipe array for %s (%d dangling pipes)!", + na->name, na->na_next_pipe); } nm_os_free(na->na_pipes); na->na_pipes = NULL; na->na_max_pipes = 0; na->na_next_pipe = 0; } } /* find a pipe endpoint with the given id among the parent's pipes */ static struct netmap_pipe_adapter * netmap_pipe_find(struct netmap_adapter *parent, const char *pipe_id) { int i; struct netmap_pipe_adapter *na; for (i = 0; i < parent->na_next_pipe; i++) { const char *na_pipe_id; na = parent->na_pipes[i]; na_pipe_id = strrchr(na->up.name, na->role == NM_PIPE_ROLE_MASTER ? '{' : '}'); KASSERT(na_pipe_id != NULL, ("Invalid pipe name")); ++na_pipe_id; if (!strcmp(na_pipe_id, pipe_id)) { return na; } } return NULL; } /* add a new pipe endpoint to the parent array */ static int netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) { if (parent->na_next_pipe >= parent->na_max_pipes) { u_int npipes = parent->na_max_pipes ? 2*parent->na_max_pipes : 2; int error = nm_pipe_alloc(parent, npipes); if (error) return error; } parent->na_pipes[parent->na_next_pipe] = na; na->parent_slot = parent->na_next_pipe; parent->na_next_pipe++; return 0; } /* remove the given pipe endpoint from the parent array */ static void netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) { u_int n; n = --parent->na_next_pipe; if (n != na->parent_slot) { struct netmap_pipe_adapter **p = &parent->na_pipes[na->parent_slot]; *p = parent->na_pipes[n]; (*p)->parent_slot = na->parent_slot; } parent->na_pipes[n] = NULL; } int netmap_pipe_txsync(struct netmap_kring *txkring, int flags) { struct netmap_kring *rxkring = txkring->pipe; u_int k, lim = txkring->nkr_num_slots - 1, nk; int m; /* slots to transfer */ int complete; /* did we see a complete packet ? */ struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring; - ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); - ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d", + nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); + nm_prdis(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d", txkring->nr_hwcur, txkring->nr_hwtail, txkring->rcur, txkring->rhead, txkring->rtail); /* update the hwtail */ txkring->nr_hwtail = txkring->pipe_tail; m = txkring->rhead - txkring->nr_hwcur; /* new slots */ if (m < 0) m += txkring->nkr_num_slots; if (m == 0) { /* nothing to send */ return 0; } for (k = txkring->nr_hwcur, nk = lim + 1, complete = 0; m; m--, k = nm_next(k, lim), nk = (complete ? k : nk)) { struct netmap_slot *rs = &rxring->slot[k]; struct netmap_slot *ts = &txring->slot[k]; *rs = *ts; if (ts->flags & NS_BUF_CHANGED) { ts->flags &= ~NS_BUF_CHANGED; } complete = !(ts->flags & NS_MOREFRAG); } txkring->nr_hwcur = k; - ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d", + nm_prdis(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d", txkring->nr_hwcur, txkring->nr_hwtail, txkring->rcur, txkring->rhead, txkring->rtail, k); if (likely(nk <= lim)) { mb(); /* make sure the slots are updated before publishing them */ rxkring->pipe_tail = nk; /* only publish complete packets */ rxkring->nm_notify(rxkring, 0); } return 0; } int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags) { struct netmap_kring *txkring = rxkring->pipe; u_int k, lim = rxkring->nkr_num_slots - 1; int m; /* slots to release */ struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring; - ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); - ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d", + nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); + nm_prdis(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail, rxkring->rcur, rxkring->rhead, rxkring->rtail); /* update the hwtail */ rxkring->nr_hwtail = rxkring->pipe_tail; m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */ if (m < 0) m += rxkring->nkr_num_slots; if (m == 0) { /* nothing to release */ return 0; } for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) { struct netmap_slot *rs = &rxring->slot[k]; struct netmap_slot *ts = &txring->slot[k]; if (rs->flags & NS_BUF_CHANGED) { /* copy the slot and report the buffer change */ *ts = *rs; rs->flags &= ~NS_BUF_CHANGED; } } mb(); /* make sure the slots are updated before publishing them */ txkring->pipe_tail = nm_prev(k, lim); rxkring->nr_hwcur = k; - ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d", + nm_prdis(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d", rxkring->nr_hwcur, rxkring->nr_hwtail, rxkring->rcur, rxkring->rhead, rxkring->rtail, k); txkring->nm_notify(txkring, 0); return 0; } /* Pipe endpoints are created and destroyed together, so that endopoints do not * have to check for the existence of their peer at each ?xsync. * * To play well with the existing netmap infrastructure (refcounts etc.), we * adopt the following strategy: * * 1) The first endpoint that is created also creates the other endpoint and * grabs a reference to it. * * state A) user1 --> endpoint1 --> endpoint2 * * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives * its reference to the user: * * state B) user1 --> endpoint1 endpoint2 <--- user2 * * 3) Assume that, starting from state B endpoint2 is closed. In the unregister * callback endpoint2 notes that endpoint1 is still active and adds a reference * from endpoint1 to itself. When user2 then releases her own reference, * endpoint2 is not destroyed and we are back to state A. A symmetrical state * would be reached if endpoint1 were released instead. * * 4) If, starting from state A, endpoint1 is closed, the destructor notes that * it owns a reference to endpoint2 and releases it. * * Something similar goes on for the creation and destruction of the krings. */ +int netmap_pipe_krings_create_both(struct netmap_adapter *na, + struct netmap_adapter *ona) +{ + enum txrx t; + int error; + int i; + + /* case 1) below */ + nm_prdis("%p: case 1, create both ends", na); + error = netmap_krings_create(na, 0); + if (error) + return error; + + /* create the krings of the other end */ + error = netmap_krings_create(ona, 0); + if (error) + goto del_krings1; + + /* cross link the krings and initialize the pipe_tails */ + for_rx_tx(t) { + enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ + for (i = 0; i < nma_get_nrings(na, t); i++) { + struct netmap_kring *k1 = NMR(na, t)[i], + *k2 = NMR(ona, r)[i]; + k1->pipe = k2; + k2->pipe = k1; + /* mark all peer-adapter rings as fake */ + k2->nr_kflags |= NKR_FAKERING; + /* init tails */ + k1->pipe_tail = k1->nr_hwtail; + k2->pipe_tail = k2->nr_hwtail; + } + } + + return 0; + +del_krings1: + netmap_krings_delete(na); + return error; +} + /* netmap_pipe_krings_create. * * There are two cases: * * 1) state is * * usr1 --> e1 --> e2 * * and we are e1. We have to create both sets * of krings. * * 2) state is * * usr1 --> e1 --> e2 * * and we are e2. e1 is certainly registered and our * krings already exist. Nothing to do. */ static int netmap_pipe_krings_create(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona = &pna->peer->up; - int error = 0; - enum txrx t; - if (pna->peer_ref) { - int i; + if (pna->peer_ref) + return netmap_pipe_krings_create_both(na, ona); - /* case 1) above */ - ND("%p: case 1, create both ends", na); - error = netmap_krings_create(na, 0); - if (error) - goto err; + return 0; +} - /* create the krings of the other end */ - error = netmap_krings_create(ona, 0); - if (error) - goto del_krings1; +int +netmap_pipe_reg_both(struct netmap_adapter *na, struct netmap_adapter *ona) +{ + int i, error = 0; + enum txrx t; - /* cross link the krings and initialize the pipe_tails */ - for_rx_tx(t) { - enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ - for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *k1 = NMR(na, t)[i], - *k2 = NMR(ona, r)[i]; - k1->pipe = k2; - k2->pipe = k1; - /* mark all peer-adapter rings as fake */ - k2->nr_kflags |= NKR_FAKERING; - /* init tails */ - k1->pipe_tail = k1->nr_hwtail; - k2->pipe_tail = k2->nr_hwtail; + for_rx_tx(t) { + for (i = 0; i < nma_get_nrings(na, t); i++) { + struct netmap_kring *kring = NMR(na, t)[i]; + + if (nm_kring_pending_on(kring)) { + /* mark the peer ring as needed */ + kring->pipe->nr_kflags |= NKR_NEEDRING; } } + } + /* create all missing needed rings on the other end. + * Either our end, or the other, has been marked as + * fake, so the allocation will not be done twice. + */ + error = netmap_mem_rings_create(ona); + if (error) + return error; + + /* In case of no error we put our rings in netmap mode */ + for_rx_tx(t) { + for (i = 0; i < nma_get_nrings(na, t); i++) { + struct netmap_kring *kring = NMR(na, t)[i]; + if (nm_kring_pending_on(kring)) { + struct netmap_kring *sring, *dring; + + kring->nr_mode = NKR_NETMAP_ON; + if ((kring->nr_kflags & NKR_FAKERING) && + (kring->pipe->nr_kflags & NKR_FAKERING)) { + /* this is a re-open of a pipe + * end-point kept alive by the other end. + * We need to leave everything as it is + */ + continue; + } + + /* copy the buffers from the non-fake ring */ + if (kring->nr_kflags & NKR_FAKERING) { + sring = kring->pipe; + dring = kring; + } else { + sring = kring; + dring = kring->pipe; + } + memcpy(dring->ring->slot, + sring->ring->slot, + sizeof(struct netmap_slot) * + sring->nkr_num_slots); + /* mark both rings as fake and needed, + * so that buffers will not be + * deleted by the standard machinery + * (we will delete them by ourselves in + * netmap_pipe_krings_delete) + */ + sring->nr_kflags |= + (NKR_FAKERING | NKR_NEEDRING); + dring->nr_kflags |= + (NKR_FAKERING | NKR_NEEDRING); + kring->nr_mode = NKR_NETMAP_ON; + } + } } - return 0; -del_krings1: - netmap_krings_delete(na); -err: - return error; + return 0; } /* netmap_pipe_reg. * * There are two cases on registration (onoff==1) * * 1.a) state is * * usr1 --> e1 --> e2 * * and we are e1. Create the needed rings of the * other end. * * 1.b) state is * * usr1 --> e1 --> e2 <-- usr2 * * and we are e2. Drop the ref e1 is holding. * * There are two additional cases on unregister (onoff==0) * * 2.a) state is * * usr1 --> e1 --> e2 * * and we are e1. Nothing special to do, e2 will * be cleaned up by the destructor of e1. * * 2.b) state is * * usr1 --> e1 e2 <-- usr2 * * and we are either e1 or e2. Add a ref from the * other end. */ static int netmap_pipe_reg(struct netmap_adapter *na, int onoff) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona = &pna->peer->up; - int i, error = 0; - enum txrx t; + int error = 0; - ND("%p: onoff %d", na, onoff); + nm_prdis("%p: onoff %d", na, onoff); if (onoff) { - for_rx_tx(t) { - for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_on(kring)) { - /* mark the peer ring as needed */ - kring->pipe->nr_kflags |= NKR_NEEDRING; - } - } - } - - /* create all missing needed rings on the other end. - * Either our end, or the other, has been marked as - * fake, so the allocation will not be done twice. - */ - error = netmap_mem_rings_create(ona); - if (error) + error = netmap_pipe_reg_both(na, ona); + if (error) { return error; - - /* In case of no error we put our rings in netmap mode */ - for_rx_tx(t) { - for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - if (nm_kring_pending_on(kring)) { - struct netmap_kring *sring, *dring; - - kring->nr_mode = NKR_NETMAP_ON; - if ((kring->nr_kflags & NKR_FAKERING) && - (kring->pipe->nr_kflags & NKR_FAKERING)) { - /* this is a re-open of a pipe - * end-point kept alive by the other end. - * We need to leave everything as it is - */ - continue; - } - - /* copy the buffers from the non-fake ring */ - if (kring->nr_kflags & NKR_FAKERING) { - sring = kring->pipe; - dring = kring; - } else { - sring = kring; - dring = kring->pipe; - } - memcpy(dring->ring->slot, - sring->ring->slot, - sizeof(struct netmap_slot) * - sring->nkr_num_slots); - /* mark both rings as fake and needed, - * so that buffers will not be - * deleted by the standard machinery - * (we will delete them by ourselves in - * netmap_pipe_krings_delete) - */ - sring->nr_kflags |= - (NKR_FAKERING | NKR_NEEDRING); - dring->nr_kflags |= - (NKR_FAKERING | NKR_NEEDRING); - kring->nr_mode = NKR_NETMAP_ON; - } - } } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; - for_rx_tx(t) { - for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = NMR(na, t)[i]; - - if (nm_kring_pending_off(kring)) { - kring->nr_mode = NKR_NETMAP_OFF; - } - } - } + netmap_krings_mode_commit(na, onoff); } if (na->active_fds) { - ND("active_fds %d", na->active_fds); + nm_prdis("active_fds %d", na->active_fds); return 0; } if (pna->peer_ref) { - ND("%p: case 1.a or 2.a, nothing to do", na); + nm_prdis("%p: case 1.a or 2.a, nothing to do", na); return 0; } if (onoff) { - ND("%p: case 1.b, drop peer", na); + nm_prdis("%p: case 1.b, drop peer", na); pna->peer->peer_ref = 0; netmap_adapter_put(na); } else { - ND("%p: case 2.b, grab peer", na); + nm_prdis("%p: case 2.b, grab peer", na); netmap_adapter_get(na); pna->peer->peer_ref = 1; } return error; } -/* netmap_pipe_krings_delete. - * - * There are two cases: - * - * 1) state is - * - * usr1 --> e1 --> e2 - * - * and we are e1 (e2 is not registered, so krings_delete cannot be - * called on it); - * - * 2) state is - * - * usr1 --> e1 e2 <-- usr2 - * - * and we are either e1 or e2. - * - * In the former case we have to also delete the krings of e2; - * in the latter case we do nothing. - */ -static void -netmap_pipe_krings_delete(struct netmap_adapter *na) +void +netmap_pipe_krings_delete_both(struct netmap_adapter *na, + struct netmap_adapter *ona) { - struct netmap_pipe_adapter *pna = - (struct netmap_pipe_adapter *)na; - struct netmap_adapter *sna, *ona; /* na of the other end */ + struct netmap_adapter *sna; enum txrx t; int i; - if (!pna->peer_ref) { - ND("%p: case 2, kept alive by peer", na); - return; - } - ona = &pna->peer->up; - /* case 1) above */ - ND("%p: case 1, deleting everything", na); + /* case 1) below */ + nm_prdis("%p: case 1, deleting everything", na); /* To avoid double-frees we zero-out all the buffers in the kernel part * of each ring. The reason is this: If the user is behaving correctly, * all buffers are found in exactly one slot in the userspace part of * some ring. If the user is not behaving correctly, we cannot release * buffers cleanly anyway. In the latter case, the allocator will * return to a clean state only when all its users will close. */ sna = na; cleanup: for_rx_tx(t) { for (i = 0; i < nma_get_nrings(sna, t); i++) { struct netmap_kring *kring = NMR(sna, t)[i]; struct netmap_ring *ring = kring->ring; uint32_t j, lim = kring->nkr_num_slots - 1; - ND("%s ring %p hwtail %u hwcur %u", + nm_prdis("%s ring %p hwtail %u hwcur %u", kring->name, ring, kring->nr_hwtail, kring->nr_hwcur); if (ring == NULL) continue; if (kring->tx == NR_RX) ring->slot[kring->pipe_tail].buf_idx = 0; for (j = nm_next(kring->pipe_tail, lim); j != kring->nr_hwcur; j = nm_next(j, lim)) { - ND("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx); + nm_prdis("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx); ring->slot[j].buf_idx = 0; } kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING); } } if (sna != ona && ona->tx_rings) { sna = ona; goto cleanup; } netmap_mem_rings_delete(na); netmap_krings_delete(na); /* also zeroes tx_rings etc. */ if (ona->tx_rings == NULL) { /* already deleted, we must be on an * cleanup-after-error path */ return; } netmap_mem_rings_delete(ona); netmap_krings_delete(ona); } +/* netmap_pipe_krings_delete. + * + * There are two cases: + * + * 1) state is + * + * usr1 --> e1 --> e2 + * + * and we are e1 (e2 is not registered, so krings_delete cannot be + * called on it); + * + * 2) state is + * + * usr1 --> e1 e2 <-- usr2 + * + * and we are either e1 or e2. + * + * In the former case we have to also delete the krings of e2; + * in the latter case we do nothing. + */ +static void +netmap_pipe_krings_delete(struct netmap_adapter *na) +{ + struct netmap_pipe_adapter *pna = + (struct netmap_pipe_adapter *)na; + struct netmap_adapter *ona; /* na of the other end */ + if (!pna->peer_ref) { + nm_prdis("%p: case 2, kept alive by peer", na); + return; + } + ona = &pna->peer->up; + netmap_pipe_krings_delete_both(na, ona); +} + + static void netmap_pipe_dtor(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; - ND("%p %p", na, pna->parent_ifp); + nm_prdis("%p %p", na, pna->parent_ifp); if (pna->peer_ref) { - ND("%p: clean up peer", na); + nm_prdis("%p: clean up peer", na); pna->peer_ref = 0; netmap_adapter_put(&pna->peer->up); } if (pna->role == NM_PIPE_ROLE_MASTER) netmap_pipe_remove(pna->parent, pna); if (pna->parent_ifp) if_rele(pna->parent_ifp); netmap_adapter_put(pna->parent); pna->parent = NULL; } int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct netmap_adapter *pna; /* parent adapter */ struct netmap_pipe_adapter *mna, *sna, *reqna; struct ifnet *ifp = NULL; const char *pipe_id = NULL; int role = 0; int error, retries = 0; char *cbra; /* Try to parse the pipe syntax 'xx{yy' or 'xx}yy'. */ cbra = strrchr(hdr->nr_name, '{'); if (cbra != NULL) { role = NM_PIPE_ROLE_MASTER; } else { cbra = strrchr(hdr->nr_name, '}'); if (cbra != NULL) { role = NM_PIPE_ROLE_SLAVE; } else { - ND("not a pipe"); + nm_prdis("not a pipe"); return 0; } } pipe_id = cbra + 1; if (*pipe_id == '\0' || cbra == hdr->nr_name) { /* Bracket is the last character, so pipe name is missing; * or bracket is the first character, so base port name * is missing. */ return EINVAL; } if (req->nr_mode != NR_REG_ALL_NIC && req->nr_mode != NR_REG_ONE_NIC) { /* We only accept modes involving hardware rings. */ return EINVAL; } /* first, try to find the parent adapter */ for (;;) { char nr_name_orig[NETMAP_REQ_IFNAMSIZ]; int create_error; /* Temporarily remove the pipe suffix. */ strlcpy(nr_name_orig, hdr->nr_name, sizeof(nr_name_orig)); *cbra = '\0'; error = netmap_get_na(hdr, &pna, &ifp, nmd, create); /* Restore the pipe suffix. */ strlcpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name)); if (!error) break; if (error != ENXIO || retries++) { - ND("parent lookup failed: %d", error); + nm_prdis("parent lookup failed: %d", error); return error; } - ND("try to create a persistent vale port"); + nm_prdis("try to create a persistent vale port"); /* create a persistent vale port and try again */ *cbra = '\0'; NMG_UNLOCK(); create_error = netmap_vi_create(hdr, 1 /* autodelete */); NMG_LOCK(); strlcpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name)); if (create_error && create_error != EEXIST) { if (create_error != EOPNOTSUPP) { - D("failed to create a persistent vale port: %d", create_error); + nm_prerr("failed to create a persistent vale port: %d", + create_error); } return error; } } if (NETMAP_OWNED_BY_KERN(pna)) { - ND("parent busy"); + nm_prdis("parent busy"); error = EBUSY; goto put_out; } /* next, lookup the pipe id in the parent list */ reqna = NULL; mna = netmap_pipe_find(pna, pipe_id); if (mna) { if (mna->role == role) { - ND("found %s directly at %d", pipe_id, mna->parent_slot); + nm_prdis("found %s directly at %d", pipe_id, mna->parent_slot); reqna = mna; } else { - ND("found %s indirectly at %d", pipe_id, mna->parent_slot); + nm_prdis("found %s indirectly at %d", pipe_id, mna->parent_slot); reqna = mna->peer; } /* the pipe we have found already holds a ref to the parent, * so we need to drop the one we got from netmap_get_na() */ netmap_unget_na(pna, ifp); goto found; } - ND("pipe %s not found, create %d", pipe_id, create); + nm_prdis("pipe %s not found, create %d", pipe_id, create); if (!create) { error = ENODEV; goto put_out; } /* we create both master and slave. * The endpoint we were asked for holds a reference to * the other one. */ mna = nm_os_malloc(sizeof(*mna)); if (mna == NULL) { error = ENOMEM; goto put_out; } snprintf(mna->up.name, sizeof(mna->up.name), "%s{%s", pna->name, pipe_id); mna->role = NM_PIPE_ROLE_MASTER; mna->parent = pna; mna->parent_ifp = ifp; mna->up.nm_txsync = netmap_pipe_txsync; mna->up.nm_rxsync = netmap_pipe_rxsync; mna->up.nm_register = netmap_pipe_reg; mna->up.nm_dtor = netmap_pipe_dtor; mna->up.nm_krings_create = netmap_pipe_krings_create; mna->up.nm_krings_delete = netmap_pipe_krings_delete; mna->up.nm_mem = netmap_mem_get(pna->nm_mem); mna->up.na_flags |= NAF_MEM_OWNER; mna->up.na_lut = pna->na_lut; mna->up.num_tx_rings = req->nr_tx_rings; nm_bound_var(&mna->up.num_tx_rings, 1, 1, NM_PIPE_MAXRINGS, NULL); mna->up.num_rx_rings = req->nr_rx_rings; nm_bound_var(&mna->up.num_rx_rings, 1, 1, NM_PIPE_MAXRINGS, NULL); mna->up.num_tx_desc = req->nr_tx_slots; nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 1, NM_PIPE_MAXSLOTS, NULL); mna->up.num_rx_desc = req->nr_rx_slots; nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 1, NM_PIPE_MAXSLOTS, NULL); error = netmap_attach_common(&mna->up); if (error) goto free_mna; /* register the master with the parent */ error = netmap_pipe_add(pna, mna); if (error) goto free_mna; /* create the slave */ sna = nm_os_malloc(sizeof(*mna)); if (sna == NULL) { error = ENOMEM; goto unregister_mna; } /* most fields are the same, copy from master and then fix */ *sna = *mna; sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem); /* swap the number of tx/rx rings and slots */ sna->up.num_tx_rings = mna->up.num_rx_rings; sna->up.num_tx_desc = mna->up.num_rx_desc; sna->up.num_rx_rings = mna->up.num_tx_rings; sna->up.num_rx_desc = mna->up.num_tx_desc; snprintf(sna->up.name, sizeof(sna->up.name), "%s}%s", pna->name, pipe_id); sna->role = NM_PIPE_ROLE_SLAVE; error = netmap_attach_common(&sna->up); if (error) goto free_sna; /* join the two endpoints */ mna->peer = sna; sna->peer = mna; /* we already have a reference to the parent, but we * need another one for the other endpoint we created */ netmap_adapter_get(pna); /* likewise for the ifp, if any */ if (ifp) if_ref(ifp); if (role == NM_PIPE_ROLE_MASTER) { reqna = mna; mna->peer_ref = 1; netmap_adapter_get(&sna->up); } else { reqna = sna; sna->peer_ref = 1; netmap_adapter_get(&mna->up); } - ND("created master %p and slave %p", mna, sna); + nm_prdis("created master %p and slave %p", mna, sna); found: - ND("pipe %s %s at %p", pipe_id, + nm_prdis("pipe %s %s at %p", pipe_id, (reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna); *na = &reqna->up; netmap_adapter_get(*na); /* keep the reference to the parent. * It will be released by the req destructor */ return 0; free_sna: nm_os_free(sna); unregister_mna: netmap_pipe_remove(pna, mna); free_mna: nm_os_free(mna); put_out: netmap_unget_na(pna, ifp); return error; } #endif /* WITH_PIPES */ Index: stable/12/sys/dev/netmap/netmap_vale.c =================================================================== --- stable/12/sys/dev/netmap/netmap_vale.c (revision 344045) +++ stable/12/sys/dev/netmap/netmap_vale.c (revision 344046) @@ -1,1615 +1,1616 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2013-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__FreeBSD__) #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #include #ifdef WITH_VALE /* * system parameters (most of them in netmap_kern.h) * NM_BDG_NAME prefix for switch port names, default "vale" * NM_BDG_MAXPORTS number of ports * NM_BRIDGES max number of switches in the system. * XXX should become a sysctl or tunable * * Switch ports are named valeX:Y where X is the switch name and Y * is the port. If Y matches a physical interface name, the port is * connected to a physical device. * * Unlike physical interfaces, switch ports use their own memory region * for rings and buffers. * The virtual interfaces use per-queue lock instead of core lock. * In the tx loop, we aggregate traffic in batches to make all operations * faster. The batch size is bridge_batch. */ #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */ #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */ #define NM_BRIDGE_RINGSIZE 1024 /* in the device */ #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */ /* actual size of the tables */ #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NETMAP_MAX_FRAGS) /* NM_FT_NULL terminates a list of slots in the ft */ #define NM_FT_NULL NM_BDG_BATCH_MAX /* * bridge_batch is set via sysctl to the max batch size to be * used in the bridge. The actual value may be larger as the * last packet in the block may overflow the size. */ static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */ SYSBEGIN(vars_vale); SYSCTL_DECL(_dev_netmap); SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0, "Max batch size to be used in the bridge"); SYSEND; static int netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *, struct netmap_mem_d *nmd, struct netmap_vp_adapter **); static int netmap_vale_vp_bdg_attach(const char *, struct netmap_adapter *, struct nm_bridge *); static int netmap_vale_bwrap_attach(const char *, struct netmap_adapter *); /* * For each output interface, nm_vale_q is used to construct a list. * bq_len is the number of output buffers (we can have coalescing * during the copy). */ struct nm_vale_q { uint16_t bq_head; uint16_t bq_tail; uint32_t bq_len; /* number of buffers */ }; /* Holds the default callbacks */ struct netmap_bdg_ops vale_bdg_ops = { .lookup = netmap_vale_learning, .config = NULL, .dtor = NULL, .vp_create = netmap_vale_vp_create, .bwrap_attach = netmap_vale_bwrap_attach, .name = NM_BDG_NAME, }; /* * this is a slightly optimized copy routine which rounds * to multiple of 64 bytes and is often faster than dealing * with other odd sizes. We assume there is enough room * in the source and destination buffers. * * XXX only for multiples of 64 bytes, non overlapped. */ static inline void pkt_copy(void *_src, void *_dst, int l) { uint64_t *src = _src; uint64_t *dst = _dst; if (unlikely(l >= 1024)) { memcpy(dst, src, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } /* * Free the forwarding tables for rings attached to switch ports. */ static void nm_free_bdgfwd(struct netmap_adapter *na) { int nrings, i; struct netmap_kring **kring; NMG_LOCK_ASSERT(); nrings = na->num_tx_rings; kring = na->tx_rings; for (i = 0; i < nrings; i++) { if (kring[i]->nkr_ft) { nm_os_free(kring[i]->nkr_ft); kring[i]->nkr_ft = NULL; /* protect from freeing twice */ } } } /* * Allocate the forwarding tables for the rings attached to the bridge ports. */ static int nm_alloc_bdgfwd(struct netmap_adapter *na) { int nrings, l, i, num_dstq; struct netmap_kring **kring; NMG_LOCK_ASSERT(); /* all port:rings + broadcast */ num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1; l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX; l += sizeof(struct nm_vale_q) * num_dstq; l += sizeof(uint16_t) * NM_BDG_BATCH_MAX; nrings = netmap_real_rings(na, NR_TX); kring = na->tx_rings; for (i = 0; i < nrings; i++) { struct nm_bdg_fwd *ft; struct nm_vale_q *dstq; int j; ft = nm_os_malloc(l); if (!ft) { nm_free_bdgfwd(na); return ENOMEM; } dstq = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX); for (j = 0; j < num_dstq; j++) { dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL; dstq[j].bq_len = 0; } kring[i]->nkr_ft = ft; } return 0; } /* Allows external modules to create bridges in exclusive mode, * returns an authentication token that the external module will need * to provide during nm_bdg_ctl_{attach, detach}(), netmap_bdg_regops(), * and nm_bdg_update_private_data() operations. * Successfully executed if ret != NULL and *return_status == 0. */ void * netmap_vale_create(const char *bdg_name, int *return_status) { struct nm_bridge *b = NULL; void *ret = NULL; NMG_LOCK(); b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL); if (b) { *return_status = EEXIST; goto unlock_bdg_create; } b = nm_find_bridge(bdg_name, 1 /* create */, &vale_bdg_ops); if (!b) { *return_status = ENOMEM; goto unlock_bdg_create; } b->bdg_flags |= NM_BDG_ACTIVE | NM_BDG_EXCLUSIVE; ret = nm_bdg_get_auth_token(b); *return_status = 0; unlock_bdg_create: NMG_UNLOCK(); return ret; } /* Allows external modules to destroy a bridge created through * netmap_bdg_create(), the bridge must be empty. */ int netmap_vale_destroy(const char *bdg_name, void *auth_token) { struct nm_bridge *b = NULL; int ret = 0; NMG_LOCK(); b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL); if (!b) { ret = ENXIO; goto unlock_bdg_free; } if (!nm_bdg_valid_auth_token(b, auth_token)) { ret = EACCES; goto unlock_bdg_free; } if (!(b->bdg_flags & NM_BDG_EXCLUSIVE)) { ret = EINVAL; goto unlock_bdg_free; } b->bdg_flags &= ~(NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE); ret = netmap_bdg_free(b); if (ret) { b->bdg_flags |= NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE; } unlock_bdg_free: NMG_UNLOCK(); return ret; } /* Process NETMAP_REQ_VALE_LIST. */ int netmap_vale_list(struct nmreq_header *hdr) { struct nmreq_vale_list *req = (struct nmreq_vale_list *)(uintptr_t)hdr->nr_body; int namelen = strlen(hdr->nr_name); struct nm_bridge *b, *bridges; struct netmap_vp_adapter *vpna; int error = 0, i, j; u_int num_bridges; netmap_bns_getbridges(&bridges, &num_bridges); /* this is used to enumerate bridges and ports */ if (namelen) { /* look up indexes of bridge and port */ if (strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME))) { return EINVAL; } NMG_LOCK(); b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL); if (!b) { NMG_UNLOCK(); return ENOENT; } req->nr_bridge_idx = b - bridges; /* bridge index */ req->nr_port_idx = NM_BDG_NOPORT; for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; vpna = b->bdg_ports[i]; if (vpna == NULL) { nm_prerr("This should not happen"); continue; } /* the former and the latter identify a * virtual port and a NIC, respectively */ if (!strcmp(vpna->up.name, hdr->nr_name)) { req->nr_port_idx = i; /* port index */ break; } } NMG_UNLOCK(); } else { /* return the first non-empty entry starting from * bridge nr_arg1 and port nr_arg2. * * Users can detect the end of the same bridge by * seeing the new and old value of nr_arg1, and can * detect the end of all the bridge by error != 0 */ i = req->nr_bridge_idx; j = req->nr_port_idx; NMG_LOCK(); for (error = ENOENT; i < NM_BRIDGES; i++) { b = bridges + i; for ( ; j < NM_BDG_MAXPORTS; j++) { if (b->bdg_ports[j] == NULL) continue; vpna = b->bdg_ports[j]; /* write back the VALE switch name */ strlcpy(hdr->nr_name, vpna->up.name, sizeof(hdr->nr_name)); error = 0; goto out; } j = 0; /* following bridges scan from 0 */ } out: req->nr_bridge_idx = i; req->nr_port_idx = j; NMG_UNLOCK(); } return error; } /* Process NETMAP_REQ_VALE_ATTACH. */ int netmap_vale_attach(struct nmreq_header *hdr, void *auth_token) { struct nmreq_vale_attach *req = (struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body; struct netmap_vp_adapter * vpna; struct netmap_adapter *na = NULL; struct netmap_mem_d *nmd = NULL; struct nm_bridge *b = NULL; int error; NMG_LOCK(); /* permission check for modified bridges */ b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL); if (b && !nm_bdg_valid_auth_token(b, auth_token)) { error = EACCES; goto unlock_exit; } if (req->reg.nr_mem_id) { nmd = netmap_mem_find(req->reg.nr_mem_id); if (nmd == NULL) { error = EINVAL; goto unlock_exit; } } /* check for existing one */ error = netmap_get_vale_na(hdr, &na, nmd, 0); if (na) { error = EBUSY; goto unref_exit; } error = netmap_get_vale_na(hdr, &na, nmd, 1 /* create if not exists */); if (error) { /* no device */ goto unlock_exit; } if (na == NULL) { /* VALE prefix missing */ error = EINVAL; goto unlock_exit; } if (NETMAP_OWNED_BY_ANY(na)) { error = EBUSY; goto unref_exit; } if (na->nm_bdg_ctl) { /* nop for VALE ports. The bwrap needs to put the hwna * in netmap mode (see netmap_bwrap_bdg_ctl) */ error = na->nm_bdg_ctl(hdr, na); if (error) goto unref_exit; - ND("registered %s to netmap-mode", na->name); + nm_prdis("registered %s to netmap-mode", na->name); } vpna = (struct netmap_vp_adapter *)na; req->port_index = vpna->bdg_port; if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); return 0; unref_exit: netmap_adapter_put(na); unlock_exit: if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); return error; } /* Process NETMAP_REQ_VALE_DETACH. */ int netmap_vale_detach(struct nmreq_header *hdr, void *auth_token) { struct nmreq_vale_detach *nmreq_det = (void *)(uintptr_t)hdr->nr_body; struct netmap_vp_adapter *vpna; struct netmap_adapter *na; struct nm_bridge *b = NULL; int error; NMG_LOCK(); /* permission check for modified bridges */ b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL); if (b && !nm_bdg_valid_auth_token(b, auth_token)) { error = EACCES; goto unlock_exit; } error = netmap_get_vale_na(hdr, &na, NULL, 0 /* don't create */); if (error) { /* no device, or another bridge or user owns the device */ goto unlock_exit; } if (na == NULL) { /* VALE prefix missing */ error = EINVAL; goto unlock_exit; } else if (nm_is_bwrap(na) && ((struct netmap_bwrap_adapter *)na)->na_polling_state) { /* Don't detach a NIC with polling */ error = EBUSY; goto unref_exit; } vpna = (struct netmap_vp_adapter *)na; if (na->na_vp != vpna) { /* trying to detach first attach of VALE persistent port attached * to 2 bridges */ error = EBUSY; goto unref_exit; } nmreq_det->port_index = vpna->bdg_port; if (na->nm_bdg_ctl) { /* remove the port from bridge. The bwrap * also needs to put the hwna in normal mode */ error = na->nm_bdg_ctl(hdr, na); } unref_exit: netmap_adapter_put(na); unlock_exit: NMG_UNLOCK(); return error; } /* nm_dtor callback for ephemeral VALE ports */ static void netmap_vale_vp_dtor(struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na; struct nm_bridge *b = vpna->na_bdg; - ND("%s has %d references", na->name, na->na_refcount); + nm_prdis("%s has %d references", na->name, na->na_refcount); if (b) { netmap_bdg_detach_common(b, vpna->bdg_port, -1); } if (na->ifp != NULL && !nm_iszombie(na)) { NM_DETACH_NA(na->ifp); if (vpna->autodelete) { - ND("releasing %s", na->ifp->if_xname); + nm_prdis("releasing %s", na->ifp->if_xname); NMG_UNLOCK(); nm_os_vi_detach(na->ifp); NMG_LOCK(); } } } /* nm_krings_create callback for VALE ports. * Calls the standard netmap_krings_create, then adds leases on rx * rings and bdgfwd on tx rings. */ static int netmap_vale_vp_krings_create(struct netmap_adapter *na) { u_int tailroom; int error, i; uint32_t *leases; u_int nrx = netmap_real_rings(na, NR_RX); /* * Leases are attached to RX rings on vale ports */ tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx; error = netmap_krings_create(na, tailroom); if (error) return error; leases = na->tailroom; for (i = 0; i < nrx; i++) { /* Receive rings */ na->rx_rings[i]->nkr_leases = leases; leases += na->num_rx_desc; } error = nm_alloc_bdgfwd(na); if (error) { netmap_krings_delete(na); return error; } return 0; } /* nm_krings_delete callback for VALE ports. */ static void netmap_vale_vp_krings_delete(struct netmap_adapter *na) { nm_free_bdgfwd(na); netmap_krings_delete(na); } static int nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na, u_int ring_nr); /* * main dispatch routine for the bridge. * Grab packets from a kring, move them into the ft structure * associated to the tx (input) port. Max one instance per port, * filtered on input (ioctl, poll or XXX). * Returns the next position in the ring. */ static int nm_vale_preflush(struct netmap_kring *kring, u_int end) { struct netmap_vp_adapter *na = (struct netmap_vp_adapter*)kring->na; struct netmap_ring *ring = kring->ring; struct nm_bdg_fwd *ft; u_int ring_nr = kring->ring_id; u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1; u_int ft_i = 0; /* start from 0 */ u_int frags = 1; /* how many frags ? */ struct nm_bridge *b = na->na_bdg; /* To protect against modifications to the bridge we acquire a * shared lock, waiting if we can sleep (if the source port is * attached to a user process) or with a trylock otherwise (NICs). */ - ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j); + nm_prdis("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j); if (na->up.na_flags & NAF_BDG_MAYSLEEP) BDG_RLOCK(b); else if (!BDG_RTRYLOCK(b)) return j; - ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j); + nm_prdis(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j); ft = kring->nkr_ft; for (; likely(j != end); j = nm_next(j, lim)) { struct netmap_slot *slot = &ring->slot[j]; char *buf; ft[ft_i].ft_len = slot->len; ft[ft_i].ft_flags = slot->flags; ft[ft_i].ft_offset = 0; - ND("flags is 0x%x", slot->flags); + nm_prdis("flags is 0x%x", slot->flags); /* we do not use the buf changed flag, but we still need to reset it */ slot->flags &= ~NS_BUF_CHANGED; /* this slot goes into a list so initialize the link field */ ft[ft_i].ft_next = NM_FT_NULL; buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ? (void *)(uintptr_t)slot->ptr : NMB(&na->up, slot); if (unlikely(buf == NULL)) { nm_prlim(5, "NULL %s buffer pointer from %s slot %d len %d", (slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT", kring->name, j, ft[ft_i].ft_len); buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up); ft[ft_i].ft_len = 0; ft[ft_i].ft_flags = 0; } __builtin_prefetch(buf); ++ft_i; if (slot->flags & NS_MOREFRAG) { frags++; continue; } if (unlikely(netmap_verbose && frags > 1)) - RD(5, "%d frags at %d", frags, ft_i - frags); + nm_prlim(5, "%d frags at %d", frags, ft_i - frags); ft[ft_i - frags].ft_frags = frags; frags = 1; if (unlikely((int)ft_i >= bridge_batch)) ft_i = nm_vale_flush(ft, ft_i, na, ring_nr); } if (frags > 1) { /* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we * have to fix frags count. */ frags--; ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG; ft[ft_i - frags].ft_frags = frags; nm_prlim(5, "Truncate incomplete fragment at %d (%d frags)", ft_i, frags); } if (ft_i) ft_i = nm_vale_flush(ft, ft_i, na, ring_nr); BDG_RUNLOCK(b); return j; } /* ----- FreeBSD if_bridge hash function ------- */ /* * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). * * http://www.burtleburtle.net/bob/hash/spooky.html */ #define mix(a, b, c) \ do { \ a -= b; a -= c; a ^= (c >> 13); \ b -= c; b -= a; b ^= (a << 8); \ c -= a; c -= b; c ^= (b >> 13); \ a -= b; a -= c; a ^= (c >> 12); \ b -= c; b -= a; b ^= (a << 16); \ c -= a; c -= b; c ^= (b >> 5); \ a -= b; a -= c; a ^= (c >> 3); \ b -= c; b -= a; b ^= (a << 10); \ c -= a; c -= b; c ^= (b >> 15); \ } while (/*CONSTCOND*/0) static __inline uint32_t nm_vale_rthash(const uint8_t *addr) { uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key b += addr[5] << 8; b += addr[4]; a += addr[3] << 24; a += addr[2] << 16; a += addr[1] << 8; a += addr[0]; mix(a, b, c); #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1) return (c & BRIDGE_RTHASH_MASK); } #undef mix /* * Lookup function for a learning bridge. * Update the hash table with the source address, * and then returns the destination port index, and the * ring in *dst_ring (at the moment, always use ring 0) */ uint32_t netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, struct netmap_vp_adapter *na, void *private_data) { uint8_t *buf = ((uint8_t *)ft->ft_buf) + ft->ft_offset; u_int buf_len = ft->ft_len - ft->ft_offset; struct nm_hash_ent *ht = private_data; uint32_t sh, dh; u_int dst, mysrc = na->bdg_port; uint64_t smac, dmac; uint8_t indbuf[12]; if (buf_len < 14) { return NM_BDG_NOPORT; } if (ft->ft_flags & NS_INDIRECT) { if (copyin(buf, indbuf, sizeof(indbuf))) { return NM_BDG_NOPORT; } buf = indbuf; } dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff; smac = le64toh(*(uint64_t *)(buf + 4)); smac >>= 16; /* * The hash is somewhat expensive, there might be some * worthwhile optimizations here. */ if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */ uint8_t *s = buf+6; sh = nm_vale_rthash(s); /* hash of source */ /* update source port forwarding entry */ na->last_smac = ht[sh].mac = smac; /* XXX expire ? */ ht[sh].ports = mysrc; if (netmap_debug & NM_DEBUG_VALE) nm_prinf("src %02x:%02x:%02x:%02x:%02x:%02x on port %d", s[0], s[1], s[2], s[3], s[4], s[5], mysrc); } dst = NM_BDG_BROADCAST; if ((buf[0] & 1) == 0) { /* unicast */ dh = nm_vale_rthash(buf); /* hash of dst */ if (ht[dh].mac == dmac) { /* found dst */ dst = ht[dh].ports; } } return dst; } /* * Available space in the ring. Only used in VALE code * and only with is_rx = 1 */ static inline uint32_t nm_kr_space(struct netmap_kring *k, int is_rx) { int space; if (is_rx) { int busy = k->nkr_hwlease - k->nr_hwcur; if (busy < 0) busy += k->nkr_num_slots; space = k->nkr_num_slots - 1 - busy; } else { /* XXX never used in this branch */ space = k->nr_hwtail - k->nkr_hwlease; if (space < 0) space += k->nkr_num_slots; } #if 0 // sanity check if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_tail >= k->nkr_num_slots || busy < 0 || busy >= k->nkr_num_slots) { - D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, - k->nkr_lease_idx, k->nkr_num_slots); + nm_prerr("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", + k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, + k->nkr_lease_idx, k->nkr_num_slots); } #endif return space; } /* make a lease on the kring for N positions. return the * lease index * XXX only used in VALE code and with is_rx = 1 */ static inline uint32_t nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx) { uint32_t lim = k->nkr_num_slots - 1; uint32_t lease_idx = k->nkr_lease_idx; k->nkr_leases[lease_idx] = NR_NOSLOT; k->nkr_lease_idx = nm_next(lease_idx, lim); #ifdef CONFIG_NETMAP_DEBUG if (n > nm_kr_space(k, is_rx)) { nm_prerr("invalid request for %d slots", n); panic("x"); } #endif /* CONFIG NETMAP_DEBUG */ /* XXX verify that there are n slots */ k->nkr_hwlease += n; if (k->nkr_hwlease > lim) k->nkr_hwlease -= lim + 1; #ifdef CONFIG_NETMAP_DEBUG if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_hwtail >= k->nkr_num_slots || k->nkr_lease_idx >= k->nkr_num_slots) { nm_prerr("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d", k->na->name, k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } #endif /* CONFIG_NETMAP_DEBUG */ return lease_idx; } /* * * This flush routine supports only unicast and broadcast but a large * number of ports, and lets us replace the learn and dispatch functions. */ int nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na, u_int ring_nr) { struct nm_vale_q *dst_ents, *brddst; uint16_t num_dsts = 0, *dsts; struct nm_bridge *b = na->na_bdg; u_int i, me = na->bdg_port; /* * The work area (pointed by ft) is followed by an array of * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS * queues per port plus one for the broadcast traffic. * Then we have an array of destination indexes. */ dst_ents = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX); dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1); /* first pass: find a destination for each packet in the batch */ for (i = 0; likely(i < n); i += ft[i].ft_frags) { uint8_t dst_ring = ring_nr; /* default, same ring as origin */ uint16_t dst_port, d_i; struct nm_vale_q *d; struct nm_bdg_fwd *start_ft = NULL; - ND("slot %d frags %d", i, ft[i].ft_frags); + nm_prdis("slot %d frags %d", i, ft[i].ft_frags); if (na->up.virt_hdr_len < ft[i].ft_len) { ft[i].ft_offset = na->up.virt_hdr_len; start_ft = &ft[i]; } else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) { ft[i].ft_offset = ft[i].ft_len; start_ft = &ft[i+1]; } else { /* Drop the packet if the virtio-net header is not into the first * fragment nor at the very beginning of the second. */ continue; } dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data); if (netmap_verbose > 255) - RD(5, "slot %d port %d -> %d", i, me, dst_port); + nm_prlim(5, "slot %d port %d -> %d", i, me, dst_port); if (dst_port >= NM_BDG_NOPORT) continue; /* this packet is identified to be dropped */ else if (dst_port == NM_BDG_BROADCAST) dst_ring = 0; /* broadcasts always go to ring 0 */ else if (unlikely(dst_port == me || !b->bdg_ports[dst_port])) continue; /* get a position in the scratch pad */ d_i = dst_port * NM_BDG_MAXRINGS + dst_ring; d = dst_ents + d_i; /* append the first fragment to the list */ if (d->bq_head == NM_FT_NULL) { /* new destination */ d->bq_head = d->bq_tail = i; /* remember this position to be scanned later */ if (dst_port != NM_BDG_BROADCAST) dsts[num_dsts++] = d_i; } else { ft[d->bq_tail].ft_next = i; d->bq_tail = i; } d->bq_len += ft[i].ft_frags; } /* * Broadcast traffic goes to ring 0 on all destinations. * So we need to add these rings to the list of ports to scan. * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is * expensive. We should keep a compact list of active destinations * so we could shorten this loop. */ brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS; if (brddst->bq_head != NM_FT_NULL) { u_int j; for (j = 0; likely(j < b->bdg_active_ports); j++) { uint16_t d_i; i = b->bdg_port_index[j]; if (unlikely(i == me)) continue; d_i = i * NM_BDG_MAXRINGS; if (dst_ents[d_i].bq_head == NM_FT_NULL) dsts[num_dsts++] = d_i; } } - ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts); + nm_prdis(5, "pass 1 done %d pkts %d dsts", n, num_dsts); /* second pass: scan destinations */ for (i = 0; i < num_dsts; i++) { struct netmap_vp_adapter *dst_na; struct netmap_kring *kring; struct netmap_ring *ring; u_int dst_nr, lim, j, d_i, next, brd_next; u_int needed, howmany; int retry = netmap_txsync_retry; struct nm_vale_q *d; uint32_t my_start = 0, lease_idx = 0; int nrings; int virt_hdr_mismatch = 0; d_i = dsts[i]; - ND("second pass %d port %d", i, d_i); + nm_prdis("second pass %d port %d", i, d_i); d = dst_ents + d_i; // XXX fix the division dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS]; /* protect from the lookup function returning an inactive * destination port */ if (unlikely(dst_na == NULL)) goto cleanup; if (dst_na->up.na_flags & NAF_SW_ONLY) goto cleanup; /* * The interface may be in !netmap mode in two cases: * - when na is attached but not activated yet; * - when na is being deactivated but is still attached. */ if (unlikely(!nm_netmap_on(&dst_na->up))) { - ND("not in netmap mode!"); + nm_prdis("not in netmap mode!"); goto cleanup; } /* there is at least one either unicast or broadcast packet */ brd_next = brddst->bq_head; next = d->bq_head; /* we need to reserve this many slots. If fewer are * available, some packets will be dropped. * Packets may have multiple fragments, so we may not use * there is a chance that we may not use all of the slots * we have claimed, so we will need to handle the leftover * ones when we regain the lock. */ needed = d->bq_len + brddst->bq_len; if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) { if (netmap_verbose) { - RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len, + nm_prlim(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len, dst_na->up.virt_hdr_len); } /* There is a virtio-net header/offloadings mismatch between * source and destination. The slower mismatch datapath will * be used to cope with all the mismatches. */ virt_hdr_mismatch = 1; if (dst_na->mfs < na->mfs) { /* We may need to do segmentation offloadings, and so * we may need a number of destination slots greater * than the number of input slots ('needed'). * We look for the smallest integer 'x' which satisfies: * needed * na->mfs + x * H <= x * na->mfs * where 'H' is the length of the longest header that may * be replicated in the segmentation process (e.g. for * TCPv4 we must account for ethernet header, IP header * and TCPv4 header). */ KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0")); needed = (needed * na->mfs) / (dst_na->mfs - WORST_CASE_GSO_HEADER) + 1; - ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed); + nm_prdis(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed); } } - ND(5, "pass 2 dst %d is %x %s", + nm_prdis(5, "pass 2 dst %d is %x %s", i, d_i, is_vp ? "virtual" : "nic/host"); dst_nr = d_i & (NM_BDG_MAXRINGS-1); nrings = dst_na->up.num_rx_rings; if (dst_nr >= nrings) dst_nr = dst_nr % nrings; kring = dst_na->up.rx_rings[dst_nr]; ring = kring->ring; /* the destination ring may have not been opened for RX */ if (unlikely(ring == NULL || kring->nr_mode != NKR_NETMAP_ON)) goto cleanup; lim = kring->nkr_num_slots - 1; retry: if (dst_na->retry && retry) { /* try to get some free slot from the previous run */ kring->nm_notify(kring, NAF_FORCE_RECLAIM); /* actually useful only for bwraps, since there * the notify will trigger a txsync on the hwna. VALE ports * have dst_na->retry == 0 */ } /* reserve the buffers in the queue and an entry * to report completion, and drop lock. * XXX this might become a helper function. */ mtx_lock(&kring->q_lock); if (kring->nkr_stopped) { mtx_unlock(&kring->q_lock); goto cleanup; } my_start = j = kring->nkr_hwlease; howmany = nm_kr_space(kring, 1); if (needed < howmany) howmany = needed; lease_idx = nm_kr_lease(kring, howmany, 1); mtx_unlock(&kring->q_lock); /* only retry if we need more than available slots */ if (retry && needed <= howmany) retry = 0; /* copy to the destination queue */ while (howmany > 0) { struct netmap_slot *slot; struct nm_bdg_fwd *ft_p, *ft_end; u_int cnt; /* find the queue from which we pick next packet. * NM_FT_NULL is always higher than valid indexes * so we never dereference it if the other list * has packets (and if both are empty we never * get here). */ if (next < brd_next) { ft_p = ft + next; next = ft_p->ft_next; } else { /* insert broadcast */ ft_p = ft + brd_next; brd_next = ft_p->ft_next; } cnt = ft_p->ft_frags; // cnt > 0 if (unlikely(cnt > howmany)) break; /* no more space */ if (netmap_verbose && cnt > 1) - RD(5, "rx %d frags to %d", cnt, j); + nm_prlim(5, "rx %d frags to %d", cnt, j); ft_end = ft_p + cnt; if (unlikely(virt_hdr_mismatch)) { bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany); } else { howmany -= cnt; do { char *dst, *src = ft_p->ft_buf; size_t copy_len = ft_p->ft_len, dst_len = copy_len; slot = &ring->slot[j]; dst = NMB(&dst_na->up, slot); - ND("send [%d] %d(%d) bytes at %s:%d", + nm_prdis("send [%d] %d(%d) bytes at %s:%d", i, (int)copy_len, (int)dst_len, NM_IFPNAME(dst_ifp), j); /* round to a multiple of 64 */ copy_len = (copy_len + 63) & ~63; if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) || copy_len > NETMAP_BUF_SIZE(&na->up))) { - RD(5, "invalid len %d, down to 64", (int)copy_len); + nm_prlim(5, "invalid len %d, down to 64", (int)copy_len); copy_len = dst_len = 64; // XXX } if (ft_p->ft_flags & NS_INDIRECT) { if (copyin(src, dst, copy_len)) { // invalid user pointer, pretend len is 0 dst_len = 0; } } else { //memcpy(dst, src, copy_len); pkt_copy(src, dst, (int)copy_len); } slot->len = dst_len; slot->flags = (cnt << 8)| NS_MOREFRAG; j = nm_next(j, lim); needed--; ft_p++; } while (ft_p != ft_end); slot->flags = (cnt << 8); /* clear flag on last entry */ } /* are we done ? */ if (next == NM_FT_NULL && brd_next == NM_FT_NULL) break; } { /* current position */ uint32_t *p = kring->nkr_leases; /* shorthand */ uint32_t update_pos; int still_locked = 1; mtx_lock(&kring->q_lock); if (unlikely(howmany > 0)) { /* not used all bufs. If i am the last one * i can recover the slots, otherwise must * fill them with 0 to mark empty packets. */ - ND("leftover %d bufs", howmany); + nm_prdis("leftover %d bufs", howmany); if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) { /* yes i am the last one */ - ND("roll back nkr_hwlease to %d", j); + nm_prdis("roll back nkr_hwlease to %d", j); kring->nkr_hwlease = j; } else { while (howmany-- > 0) { ring->slot[j].len = 0; ring->slot[j].flags = 0; j = nm_next(j, lim); } } } p[lease_idx] = j; /* report I am done */ update_pos = kring->nr_hwtail; if (my_start == update_pos) { /* all slots before my_start have been reported, * so scan subsequent leases to see if other ranges * have been completed, and to a selwakeup or txsync. */ while (lease_idx != kring->nkr_lease_idx && p[lease_idx] != NR_NOSLOT) { j = p[lease_idx]; p[lease_idx] = NR_NOSLOT; lease_idx = nm_next(lease_idx, lim); } /* j is the new 'write' position. j != my_start * means there are new buffers to report */ if (likely(j != my_start)) { kring->nr_hwtail = j; still_locked = 0; mtx_unlock(&kring->q_lock); kring->nm_notify(kring, 0); /* this is netmap_notify for VALE ports and * netmap_bwrap_notify for bwrap. The latter will * trigger a txsync on the underlying hwna */ if (dst_na->retry && retry--) { /* XXX this is going to call nm_notify again. * Only useful for bwrap in virtual machines */ goto retry; } } } if (still_locked) mtx_unlock(&kring->q_lock); } cleanup: d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */ d->bq_len = 0; } brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */ brddst->bq_len = 0; return 0; } /* nm_txsync callback for VALE ports */ static int netmap_vale_vp_txsync(struct netmap_kring *kring, int flags) { struct netmap_vp_adapter *na = (struct netmap_vp_adapter *)kring->na; u_int done; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; if (bridge_batch <= 0) { /* testing only */ done = head; // used all goto done; } if (!na->na_bdg) { done = head; goto done; } if (bridge_batch > NM_BDG_BATCH) bridge_batch = NM_BDG_BATCH; done = nm_vale_preflush(kring, head); done: if (done != head) nm_prerr("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail); /* * packets between 'done' and 'cur' are left unsent. */ kring->nr_hwcur = done; kring->nr_hwtail = nm_prev(done, lim); if (netmap_debug & NM_DEBUG_TXSYNC) nm_prinf("%s ring %d flags %d", na->up.name, kring->ring_id, flags); return 0; } /* create a netmap_vp_adapter that describes a VALE port. * Only persistent VALE ports have a non-null ifp. */ static int netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct netmap_vp_adapter *vpna; struct netmap_adapter *na; int error = 0; u_int npipes = 0; u_int extrabufs = 0; if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { return EINVAL; } vpna = nm_os_malloc(sizeof(*vpna)); if (vpna == NULL) return ENOMEM; na = &vpna->up; na->ifp = ifp; strlcpy(na->name, hdr->nr_name, sizeof(na->name)); /* bound checking */ na->num_tx_rings = req->nr_tx_rings; nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); req->nr_tx_rings = na->num_tx_rings; /* write back */ na->num_rx_rings = req->nr_rx_rings; nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); req->nr_rx_rings = na->num_rx_rings; /* write back */ nm_bound_var(&req->nr_tx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); na->num_tx_desc = req->nr_tx_slots; nm_bound_var(&req->nr_rx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); /* validate number of pipes. We want at least 1, * but probably can do with some more. * So let's use 2 as default (when 0 is supplied) */ nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL); /* validate extra bufs */ extrabufs = req->nr_extra_bufs; nm_bound_var(&extrabufs, 0, 0, 128*NM_BDG_MAXSLOTS, NULL); req->nr_extra_bufs = extrabufs; /* write back */ na->num_rx_desc = req->nr_rx_slots; /* Set the mfs to a default value, as it is needed on the VALE * mismatch datapath. XXX We should set it according to the MTU * known to the kernel. */ vpna->mfs = NM_BDG_MFS_DEFAULT; vpna->last_smac = ~0llu; /*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero?? vpna->mfs = netmap_buf_size; */ if (netmap_verbose) nm_prinf("max frame size %u", vpna->mfs); na->na_flags |= NAF_BDG_MAYSLEEP; /* persistent VALE ports look like hw devices * with a native netmap adapter */ if (ifp) na->na_flags |= NAF_NATIVE; na->nm_txsync = netmap_vale_vp_txsync; na->nm_rxsync = netmap_vp_rxsync; /* use the one provided by bdg */ na->nm_register = netmap_vp_reg; /* use the one provided by bdg */ na->nm_krings_create = netmap_vale_vp_krings_create; na->nm_krings_delete = netmap_vale_vp_krings_delete; na->nm_dtor = netmap_vale_vp_dtor; - ND("nr_mem_id %d", req->nr_mem_id); + nm_prdis("nr_mem_id %d", req->nr_mem_id); na->nm_mem = nmd ? netmap_mem_get(nmd): netmap_mem_private_new( na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc, req->nr_extra_bufs, npipes, &error); if (na->nm_mem == NULL) goto err; na->nm_bdg_attach = netmap_vale_vp_bdg_attach; /* other nmd fields are set in the common routine */ error = netmap_attach_common(na); if (error) goto err; *ret = vpna; return 0; err: if (na->nm_mem != NULL) netmap_mem_put(na->nm_mem); nm_os_free(vpna); return error; } /* nm_bdg_attach callback for VALE ports * The na_vp port is this same netmap_adapter. There is no host port. */ static int netmap_vale_vp_bdg_attach(const char *name, struct netmap_adapter *na, struct nm_bridge *b) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; if ((b->bdg_flags & NM_BDG_NEED_BWRAP) || vpna->na_bdg) { return NM_NEED_BWRAP; } na->na_vp = vpna; strlcpy(na->name, name, sizeof(na->name)); na->na_hostvp = NULL; return 0; } static int netmap_vale_bwrap_krings_create(struct netmap_adapter *na) { int error; /* impersonate a netmap_vp_adapter */ error = netmap_vale_vp_krings_create(na); if (error) return error; error = netmap_bwrap_krings_create_common(na); if (error) { netmap_vale_vp_krings_delete(na); } return error; } static void netmap_vale_bwrap_krings_delete(struct netmap_adapter *na) { netmap_bwrap_krings_delete_common(na); netmap_vale_vp_krings_delete(na); } static int netmap_vale_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna) { struct netmap_bwrap_adapter *bna; struct netmap_adapter *na = NULL; struct netmap_adapter *hostna = NULL; int error; bna = nm_os_malloc(sizeof(*bna)); if (bna == NULL) { return ENOMEM; } na = &bna->up.up; strlcpy(na->name, nr_name, sizeof(na->name)); na->nm_register = netmap_bwrap_reg; na->nm_txsync = netmap_vale_vp_txsync; // na->nm_rxsync = netmap_bwrap_rxsync; na->nm_krings_create = netmap_vale_bwrap_krings_create; na->nm_krings_delete = netmap_vale_bwrap_krings_delete; na->nm_notify = netmap_bwrap_notify; bna->up.retry = 1; /* XXX maybe this should depend on the hwna */ /* Set the mfs, needed on the VALE mismatch datapath. */ bna->up.mfs = NM_BDG_MFS_DEFAULT; if (hwna->na_flags & NAF_HOST_RINGS) { hostna = &bna->host.up; hostna->nm_notify = netmap_bwrap_notify; bna->host.mfs = NM_BDG_MFS_DEFAULT; } error = netmap_bwrap_attach_common(na, hwna); if (error) { nm_os_free(bna); } return error; } int netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { return netmap_get_bdg_na(hdr, na, nmd, create, &vale_bdg_ops); } /* creates a persistent VALE port */ int nm_vi_create(struct nmreq_header *hdr) { struct nmreq_vale_newif *req = (struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body; int error = 0; /* Build a nmreq_register out of the nmreq_vale_newif, * so that we can call netmap_get_bdg_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_tx_slots = req->nr_tx_slots; regreq.nr_rx_slots = req->nr_rx_slots; regreq.nr_tx_rings = req->nr_tx_rings; regreq.nr_rx_rings = req->nr_rx_rings; regreq.nr_mem_id = req->nr_mem_id; hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_vi_create(hdr, 0 /* no autodelete */); hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF; hdr->nr_body = (uintptr_t)req; /* Write back to the original struct. */ req->nr_tx_slots = regreq.nr_tx_slots; req->nr_rx_slots = regreq.nr_rx_slots; req->nr_tx_rings = regreq.nr_tx_rings; req->nr_rx_rings = regreq.nr_rx_rings; req->nr_mem_id = regreq.nr_mem_id; return error; } /* remove a persistent VALE port from the system */ int nm_vi_destroy(const char *name) { struct ifnet *ifp; struct netmap_vp_adapter *vpna; int error; ifp = ifunit_ref(name); if (!ifp) return ENXIO; NMG_LOCK(); /* make sure this is actually a VALE port */ if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) { error = EINVAL; goto err; } vpna = (struct netmap_vp_adapter *)NA(ifp); /* we can only destroy ports that were created via NETMAP_BDG_NEWIF */ if (vpna->autodelete) { error = EINVAL; goto err; } /* also make sure that nobody is using the inferface */ if (NETMAP_OWNED_BY_ANY(&vpna->up) || vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) { error = EBUSY; goto err; } NMG_UNLOCK(); if (netmap_verbose) nm_prinf("destroying a persistent vale interface %s", ifp->if_xname); /* Linux requires all the references are released * before unregister */ netmap_detach(ifp); if_rele(ifp); nm_os_vi_detach(ifp); return 0; err: NMG_UNLOCK(); if_rele(ifp); return error; } static int nm_update_info(struct nmreq_register *req, struct netmap_adapter *na) { req->nr_rx_rings = na->num_rx_rings; req->nr_tx_rings = na->num_tx_rings; req->nr_rx_slots = na->num_rx_desc; req->nr_tx_slots = na->num_tx_desc; return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL, &req->nr_mem_id); } /* * Create a virtual interface registered to the system. * The interface will be attached to a bridge later. */ int netmap_vi_create(struct nmreq_header *hdr, int autodelete) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct ifnet *ifp; struct netmap_vp_adapter *vpna; struct netmap_mem_d *nmd = NULL; int error; if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { return EINVAL; } /* don't include VALE prefix */ if (!strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME))) return EINVAL; if (strlen(hdr->nr_name) >= IFNAMSIZ) { return EINVAL; } ifp = ifunit_ref(hdr->nr_name); if (ifp) { /* already exist, cannot create new one */ error = EEXIST; NMG_LOCK(); if (NM_NA_VALID(ifp)) { int update_err = nm_update_info(req, NA(ifp)); if (update_err) error = update_err; } NMG_UNLOCK(); if_rele(ifp); return error; } error = nm_os_vi_persist(hdr->nr_name, &ifp); if (error) return error; NMG_LOCK(); if (req->nr_mem_id) { nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) { error = EINVAL; goto err_1; } } /* netmap_vp_create creates a struct netmap_vp_adapter */ error = netmap_vale_vp_create(hdr, ifp, nmd, &vpna); if (error) { if (netmap_debug & NM_DEBUG_VALE) nm_prerr("error %d", error); goto err_1; } /* persist-specific routines */ vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl; if (!autodelete) { netmap_adapter_get(&vpna->up); } else { vpna->autodelete = 1; } NM_ATTACH_NA(ifp, &vpna->up); /* return the updated info */ error = nm_update_info(req, &vpna->up); if (error) { goto err_2; } - ND("returning nr_mem_id %d", req->nr_mem_id); + nm_prdis("returning nr_mem_id %d", req->nr_mem_id); if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); - ND("created %s", ifp->if_xname); + nm_prdis("created %s", ifp->if_xname); return 0; err_2: netmap_detach(ifp); err_1: if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); nm_os_vi_detach(ifp); return error; } #endif /* WITH_VALE */ Index: stable/12 =================================================================== --- stable/12 (revision 344045) +++ stable/12 (revision 344046) Property changes on: stable/12 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r343772,343867