Index: head/sys/dev/netmap/if_vtnet_netmap.h =================================================================== --- head/sys/dev/netmap/if_vtnet_netmap.h (revision 307571) +++ head/sys/dev/netmap/if_vtnet_netmap.h (revision 307572) @@ -1,428 +1,428 @@ /* * Copyright (C) 2014 Vincenzo Maffione, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ */ #include #include #include #include /* vtophys ? */ #include #define SOFTC_T vtnet_softc /* Free all the unused buffer in all the RX virtqueues. * This function is called when entering and exiting netmap mode. * - buffers queued by the virtio driver return skbuf/mbuf pointer * and need to be freed; * - buffers queued by netmap return the txq/rxq, and do not need work */ static void vtnet_netmap_free_bufs(struct SOFTC_T* sc) { int i, nmb = 0, n = 0, last; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; struct virtqueue *vq; struct mbuf *m; struct vtnet_txq *txq = &sc->vtnet_txqs[i]; struct vtnet_tx_header *txhdr; last = 0; vq = rxq->vtnrx_vq; while ((m = virtqueue_drain(vq, &last)) != NULL) { n++; if (m != (void *)rxq) m_freem(m); else nmb++; } last = 0; vq = txq->vtntx_vq; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { n++; if (txhdr != (void *)txq) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } else nmb++; } } D("freed %d mbufs, %d netmap bufs on %d queues", n - nmb, nmb, i); } /* Register and unregister. */ static int vtnet_netmap_reg(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; struct SOFTC_T *sc = ifp->if_softc; VTNET_CORE_LOCK(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } /* drain queues so netmap and native drivers * do not interfere with each other */ vtnet_netmap_free_bufs(sc); vtnet_init_locked(sc); /* also enable intr */ VTNET_CORE_UNLOCK(sc); return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); } /* Reconcile kernel and user view of the transmit ring. */ static int vtnet_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; /* device-specific */ struct SOFTC_T *sc = ifp->if_softc; struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr]; struct virtqueue *vq = txq->vtntx_vq; /* * First part: process new packets to send. */ rmb(); - + nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ struct sglist *sg = txq->vtntx_sg; nic_i = netmap_idx_k2n(kring, nm_i); for (n = 0; nm_i != head; n++) { /* we use an empty header here */ static struct virtio_net_hdr_mrg_rxbuf hdr; struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int err; NM_CHECK_ADDR_LEN(na, addr, len); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); /* Initialize the scatterlist, expose it to the hypervisor, * and kick the hypervisor (if necessary). */ sglist_reset(sg); // cheap // if vtnet_hdr_size > 0 ... err = sglist_append(sg, &hdr, sc->vtnet_hdr_size); // XXX later, support multi segment err = sglist_append_phys(sg, paddr, len); /* use na as the cookie */ err = virtqueue_enqueue(vq, txq, sg, sg->sg_nseg, 0); if (unlikely(err < 0)) { D("virtqueue_enqueue failed"); break; } nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } /* Update hwcur depending on where we stopped. */ kring->nr_hwcur = nm_i; /* note we migth break early */ /* No more free TX slots? Ask the hypervisor for notifications, * possibly only when a considerable amount of work has been * done. */ ND(3,"sent %d packets, hwcur %d", n, nm_i); virtqueue_disable_intr(vq); virtqueue_notify(vq); } else { if (ring->head != ring->tail) ND(5, "pure notify ? head %d tail %d nused %d %d", ring->head, ring->tail, virtqueue_nused(vq), (virtqueue_dump(vq), 1)); virtqueue_notify(vq); virtqueue_enable_intr(vq); // like postpone with 0 } - + /* Free used slots. We only consider our own used buffers, recognized * by the token we passed to virtqueue_add_outbuf. */ n = 0; for (;;) { struct vtnet_tx_header *txhdr = virtqueue_dequeue(vq, NULL); if (txhdr == NULL) break; if (likely(txhdr == (void *)txq)) { n++; if (virtqueue_nused(vq) < 32) { // XXX slow release break; } } else { /* leftover from previous transmission */ m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } } if (n) { kring->nr_hwtail += n; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; } if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) { ND(3, "disable intr, hwcur %d", nm_i); virtqueue_disable_intr(vq); } else { ND(3, "enable intr, hwcur %d", nm_i); virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT); } return 0; } static int vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int const lim = kring->nkr_num_slots - 1; u_int n; /* device-specific */ struct SOFTC_T *sc = ifp->if_softc; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; struct virtqueue *vq = rxq->vtnrx_vq; /* use a local sglist, default might be short */ struct sglist_seg ss[2]; struct sglist sg = { ss, 0, 0, 2 }; for (n = 0; nm_i != head; n++) { static struct virtio_net_hdr_mrg_rxbuf hdr; struct netmap_slot *slot = &ring->slot[nm_i]; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int err = 0; if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */ if (netmap_ring_reinit(kring)) return -1; } slot->flags &= ~NS_BUF_CHANGED; sglist_reset(&sg); // cheap err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size); err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na)); /* writable for the host */ err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg); if (err < 0) { D("virtqueue_enqueue failed"); break; } nm_i = nm_next(nm_i, lim); } return nm_i; } /* Reconcile kernel and user view of the receive ring. */ static int vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int nm_i; /* index into the netmap ring */ // u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; /* device-specific */ struct SOFTC_T *sc = ifp->if_softc; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; struct virtqueue *vq = rxq->vtnrx_vq; /* XXX netif_carrier_ok ? */ if (head > lim) return netmap_ring_reinit(kring); rmb(); /* * First part: import newly received packets. * Only accept our * own buffers (matching the token). We should only get * matching buffers, because of vtnet_netmap_free_rx_unused_bufs() * and vtnet_netmap_init_buffers(). */ if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; struct netmap_adapter *token; nm_i = kring->nr_hwtail; n = 0; for (;;) { int len; token = virtqueue_dequeue(vq, &len); if (token == NULL) break; if (likely(token == (void *)rxq)) { ring->slot[nm_i].len = len; ring->slot[nm_i].flags = slot_flags; nm_i = nm_next(nm_i, lim); n++; } else { D("This should not happen"); } } kring->nr_hwtail = nm_i; kring->nr_kflags &= ~NKR_PENDINTR; } ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur, kring->nr_hwcur, kring->nr_hwtail); /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; /* netmap ring index */ if (nm_i != head) { int err = vtnet_refill_rxq(kring, nm_i, head); if (err < 0) return 1; kring->nr_hwcur = err; virtqueue_notify(vq); /* After draining the queue may need an intr from the hypervisor */ vtnet_rxq_enable_intr(rxq); } ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur, ring->tail, kring->nr_hwcur, kring->nr_hwtail); return 0; } /* Make RX virtqueues buffers pointing to netmap buffers. */ static int vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc) { struct ifnet *ifp = sc->vtnet_ifp; struct netmap_adapter* na = NA(ifp); unsigned int r; if (!nm_native_on(na)) return 0; for (r = 0; r < na->num_rx_rings; r++) { struct netmap_kring *kring = &na->rx_rings[r]; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r]; struct virtqueue *vq = rxq->vtnrx_vq; struct netmap_slot* slot; int err = 0; slot = netmap_reset(na, NR_RX, r, 0); if (!slot) { D("strange, null netmap ring %d", r); return 0; } /* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue. * It's important to leave one virtqueue slot free, otherwise * we can run into ring->cur/ring->tail wraparounds. */ err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1); if (err < 0) return 0; virtqueue_notify(vq); } return 1; } /* Update the virtio-net device configurations. Number of queues can * change dinamically, by 'ethtool --set-channels $IFNAME combined $N'. * This is actually the only way virtio-net can currently enable * the multiqueue mode. * XXX note that we seem to lose packets if the netmap ring has more * slots than the queue */ static int vtnet_netmap_config(struct netmap_adapter *na, u_int *txr, u_int *txd, u_int *rxr, u_int *rxd) { struct ifnet *ifp = na->ifp; struct SOFTC_T *sc = ifp->if_softc; *txr = *rxr = sc->vtnet_max_vq_pairs; *rxd = 512; // sc->vtnet_rx_nmbufs; *txd = *rxd; // XXX D("vtnet config txq=%d, txd=%d rxq=%d, rxd=%d", *txr, *txd, *rxr, *rxd); return 0; } static void vtnet_netmap_attach(struct SOFTC_T *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->vtnet_ifp; na.num_tx_desc = 1024;// sc->vtnet_rx_nmbufs; na.num_rx_desc = 1024; // sc->vtnet_rx_nmbufs; na.nm_register = vtnet_netmap_reg; na.nm_txsync = vtnet_netmap_txsync; na.nm_rxsync = vtnet_netmap_rxsync; na.nm_config = vtnet_netmap_config; na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs; D("max rings %d", sc->vtnet_max_vq_pairs); netmap_attach(&na); D("virtio attached txq=%d, txd=%d rxq=%d, rxd=%d", na.num_tx_rings, na.num_tx_desc, na.num_tx_rings, na.num_rx_desc); } /* end of file */ Index: head/sys/dev/netmap/netmap_monitor.c =================================================================== --- head/sys/dev/netmap/netmap_monitor.c (revision 307571) +++ head/sys/dev/netmap/netmap_monitor.c (revision 307572) @@ -1,886 +1,886 @@ /* * Copyright (C) 2014-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Monitors * * netmap monitors can be used to do monitoring of network traffic * on another adapter, when the latter adapter is working in netmap mode. * * Monitors offer to userspace the same interface as any other netmap port, * with as many pairs of netmap rings as the monitored adapter. * However, only the rx rings are actually used. Each monitor rx ring receives * the traffic transiting on both the tx and rx corresponding rings in the * monitored adapter. During registration, the user can choose if she wants * to intercept tx only, rx only, or both tx and rx traffic. * * If the monitor is not able to cope with the stream of frames, excess traffic * will be dropped. * * If the monitored adapter leaves netmap mode, the monitor has to be restarted. * * Monitors can be either zero-copy or copy-based. * * Copy monitors see the frames before they are consumed: * * - For tx traffic, this is when the application sends them, before they are * passed down to the adapter. * * - For rx traffic, this is when they are received by the adapter, before * they are sent up to the application, if any (note that, if no * application is reading from a monitored ring, the ring will eventually * fill up and traffic will stop). * * Zero-copy monitors only see the frames after they have been consumed: * * - For tx traffic, this is after the slots containing the frames have been * marked as free. Note that this may happen at a considerably delay after * frame transmission, since freeing of slots is often done lazily. * * - For rx traffic, this is after the consumer on the monitored adapter * has released them. In most cases, the consumer is a userspace * application which may have modified the frame contents. * * Several copy monitors may be active on any ring. Zero-copy monitors, * instead, need exclusive access to each of the monitored rings. This may * change in the future, if we implement zero-copy monitor chaining. * */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_MONITOR #define NM_MONITOR_MAXSLOTS 4096 /* ******************************************************************** * functions common to both kind of monitors ******************************************************************** */ /* nm_sync callback for the monitor's own tx rings. * This makes no sense and always returns error */ static int netmap_monitor_txsync(struct netmap_kring *kring, int flags) { RD(1, "%s %x", kring->name, flags); return EIO; } /* nm_sync callback for the monitor's own rx rings. * Note that the lock in netmap_zmon_parent_sync only protects * writers among themselves. Synchronization between writers * (i.e., netmap_zmon_parent_txsync and netmap_zmon_parent_rxsync) * and readers (i.e., netmap_zmon_rxsync) relies on memory barriers. */ static int netmap_monitor_rxsync(struct netmap_kring *kring, int flags) { ND("%s %x", kring->name, flags); kring->nr_hwcur = kring->rcur; mb(); return 0; } /* nm_krings_create callbacks for monitors. */ static int netmap_monitor_krings_create(struct netmap_adapter *na) { int error = netmap_krings_create(na, 0); if (error) return error; /* override the host rings callbacks */ na->tx_rings[na->num_tx_rings].nm_sync = netmap_monitor_txsync; na->rx_rings[na->num_rx_rings].nm_sync = netmap_monitor_rxsync; return 0; } /* nm_krings_delete callback for monitors */ static void netmap_monitor_krings_delete(struct netmap_adapter *na) { netmap_krings_delete(na); } static u_int nm_txrx2flag(enum txrx t) { return (t == NR_RX ? NR_MONITOR_RX : NR_MONITOR_TX); } /* allocate the monitors array in the monitored kring */ static int nm_monitor_alloc(struct netmap_kring *kring, u_int n) { size_t len; struct netmap_kring **nm; if (n <= kring->max_monitors) /* we already have more entries that requested */ return 0; - + len = sizeof(struct netmap_kring *) * n; #ifndef _WIN32 nm = realloc(kring->monitors, len, M_DEVBUF, M_NOWAIT | M_ZERO); #else nm = realloc(kring->monitors, len, sizeof(struct netmap_kring *)*kring->max_monitors); #endif if (nm == NULL) return ENOMEM; kring->monitors = nm; kring->max_monitors = n; return 0; } /* deallocate the parent array in the parent adapter */ static void nm_monitor_dealloc(struct netmap_kring *kring) { if (kring->monitors) { if (kring->n_monitors > 0) { D("freeing not empty monitor array for %s (%d dangling monitors)!", kring->name, kring->n_monitors); } free(kring->monitors, M_DEVBUF); kring->monitors = NULL; kring->max_monitors = 0; kring->n_monitors = 0; } } /* * monitors work by replacing the nm_sync() and possibly the * nm_notify() callbacks in the monitored rings. */ static int netmap_zmon_parent_txsync(struct netmap_kring *, int); static int netmap_zmon_parent_rxsync(struct netmap_kring *, int); static int netmap_monitor_parent_txsync(struct netmap_kring *, int); static int netmap_monitor_parent_rxsync(struct netmap_kring *, int); static int netmap_monitor_parent_notify(struct netmap_kring *, int); /* add the monitor mkring to the list of monitors of kring. * If this is the first monitor, intercept the callbacks */ static int netmap_monitor_add(struct netmap_kring *mkring, struct netmap_kring *kring, int zcopy) { int error = NM_IRQ_COMPLETED; /* sinchronize with concurrently running nm_sync()s */ nm_kr_stop(kring, NM_KR_LOCKED); /* make sure the monitor array exists and is big enough */ error = nm_monitor_alloc(kring, kring->n_monitors + 1); if (error) goto out; kring->monitors[kring->n_monitors] = mkring; mkring->mon_pos = kring->n_monitors; kring->n_monitors++; if (kring->n_monitors == 1) { /* this is the first monitor, intercept callbacks */ ND("%s: intercept callbacks on %s", mkring->name, kring->name); kring->mon_sync = kring->nm_sync; /* zcopy monitors do not override nm_notify(), but * we save the original one regardless, so that * netmap_monitor_del() does not need to know the * monitor type */ kring->mon_notify = kring->nm_notify; if (kring->tx == NR_TX) { kring->nm_sync = (zcopy ? netmap_zmon_parent_txsync : netmap_monitor_parent_txsync); } else { kring->nm_sync = (zcopy ? netmap_zmon_parent_rxsync : netmap_monitor_parent_rxsync); if (!zcopy) { /* also intercept notify */ kring->nm_notify = netmap_monitor_parent_notify; kring->mon_tail = kring->nr_hwtail; } } } out: nm_kr_start(kring); return error; } /* remove the monitor mkring from the list of monitors of kring. * If this is the last monitor, restore the original callbacks */ static void netmap_monitor_del(struct netmap_kring *mkring, struct netmap_kring *kring) { /* sinchronize with concurrently running nm_sync()s */ nm_kr_stop(kring, NM_KR_LOCKED); kring->n_monitors--; if (mkring->mon_pos != kring->n_monitors) { kring->monitors[mkring->mon_pos] = kring->monitors[kring->n_monitors]; kring->monitors[mkring->mon_pos]->mon_pos = mkring->mon_pos; } kring->monitors[kring->n_monitors] = NULL; if (kring->n_monitors == 0) { /* this was the last monitor, restore callbacks and delete monitor array */ ND("%s: restoring sync on %s: %p", mkring->name, kring->name, kring->mon_sync); kring->nm_sync = kring->mon_sync; kring->mon_sync = NULL; if (kring->tx == NR_RX) { - ND("%s: restoring notify on %s: %p", + ND("%s: restoring notify on %s: %p", mkring->name, kring->name, kring->mon_notify); kring->nm_notify = kring->mon_notify; kring->mon_notify = NULL; } nm_monitor_dealloc(kring); } nm_kr_start(kring); } /* This is called when the monitored adapter leaves netmap mode * (see netmap_do_unregif). * We need to notify the monitors that the monitored rings are gone. * We do this by setting their mna->priv.np_na to NULL. * Note that the rings are already stopped when this happens, so * no monitor ring callback can be active. */ void netmap_monitor_stop(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; u_int j; for (j = 0; j < kring->n_monitors; j++) { struct netmap_kring *mkring = kring->monitors[j]; struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)mkring->na; /* forget about this adapter */ netmap_adapter_put(mna->priv.np_na); mna->priv.np_na = NULL; } } } } /* common functions for the nm_register() callbacks of both kind of * monitors. */ static int netmap_monitor_reg_common(struct netmap_adapter *na, int onoff, int zmon) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; struct netmap_kring *kring, *mkring; int i; enum txrx t; ND("%p: onoff %d", na, onoff); if (onoff) { if (pna == NULL) { /* parent left netmap mode, fatal */ D("%s: internal error", na->name); return ENXIO; } for_rx_tx(t) { if (mna->flags & nm_txrx2flag(t)) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = &NMR(pna, t)[i]; mkring = &na->rx_rings[i]; if (nm_kring_pending_on(mkring)) { netmap_monitor_add(mkring, kring, zmon); mkring->nr_mode = NKR_NETMAP_ON; } } } } na->na_flags |= NAF_NETMAP_ON; } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { if (mna->flags & nm_txrx2flag(t)) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { mkring = &na->rx_rings[i]; if (nm_kring_pending_off(mkring)) { mkring->nr_mode = NKR_NETMAP_OFF; /* we cannot access the parent krings if the parent * has left netmap mode. This is signaled by a NULL * pna pointer */ if (pna) { kring = &NMR(pna, t)[i]; netmap_monitor_del(mkring, kring); } } } } } } return 0; } /* **************************************************************** - * functions specific for zero-copy monitors + * functions specific for zero-copy monitors **************************************************************** */ /* * Common function for both zero-copy tx and rx nm_sync() * callbacks */ static int netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx) { struct netmap_kring *mkring = kring->monitors[0]; struct netmap_ring *ring = kring->ring, *mring; int error = 0; int rel_slots, free_slots, busy, sent = 0; u_int beg, end, i; u_int lim = kring->nkr_num_slots - 1, mlim; // = mkring->nkr_num_slots - 1; if (mkring == NULL) { RD(5, "NULL monitor on %s", kring->name); return 0; } mring = mkring->ring; mlim = mkring->nkr_num_slots - 1; /* get the relased slots (rel_slots) */ if (tx == NR_TX) { beg = kring->nr_hwtail; error = kring->mon_sync(kring, flags); if (error) return error; end = kring->nr_hwtail; } else { /* NR_RX */ beg = kring->nr_hwcur; end = kring->rhead; } rel_slots = end - beg; if (rel_slots < 0) rel_slots += kring->nkr_num_slots; if (!rel_slots) { /* no released slots, but we still need * to call rxsync if this is a rx ring */ goto out_rxsync; } /* we need to lock the monitor receive ring, since it * is the target of bot tx and rx traffic from the monitored * adapter */ mtx_lock(&mkring->q_lock); /* get the free slots available on the monitor ring */ i = mkring->nr_hwtail; busy = i - mkring->nr_hwcur; if (busy < 0) busy += mkring->nkr_num_slots; free_slots = mlim - busy; if (!free_slots) goto out; /* swap min(free_slots, rel_slots) slots */ if (free_slots < rel_slots) { beg += (rel_slots - free_slots); if (beg >= kring->nkr_num_slots) beg -= kring->nkr_num_slots; rel_slots = free_slots; } sent = rel_slots; for ( ; rel_slots; rel_slots--) { struct netmap_slot *s = &ring->slot[beg]; struct netmap_slot *ms = &mring->slot[i]; uint32_t tmp; tmp = ms->buf_idx; ms->buf_idx = s->buf_idx; s->buf_idx = tmp; ND(5, "beg %d buf_idx %d", beg, tmp); tmp = ms->len; ms->len = s->len; s->len = tmp; s->flags |= NS_BUF_CHANGED; beg = nm_next(beg, lim); i = nm_next(i, mlim); } mb(); mkring->nr_hwtail = i; out: mtx_unlock(&mkring->q_lock); if (sent) { /* notify the new frames to the monitor */ mkring->nm_notify(mkring, 0); } out_rxsync: if (tx == NR_RX) error = kring->mon_sync(kring, flags); return error; } /* callback used to replace the nm_sync callback in the monitored tx rings */ static int netmap_zmon_parent_txsync(struct netmap_kring *kring, int flags) { ND("%s %x", kring->name, flags); return netmap_zmon_parent_sync(kring, flags, NR_TX); } /* callback used to replace the nm_sync callback in the monitored rx rings */ static int netmap_zmon_parent_rxsync(struct netmap_kring *kring, int flags) { ND("%s %x", kring->name, flags); return netmap_zmon_parent_sync(kring, flags, NR_RX); } static int netmap_zmon_reg(struct netmap_adapter *na, int onoff) { return netmap_monitor_reg_common(na, onoff, 1 /* zcopy */); } /* nm_dtor callback for monitors */ static void netmap_zmon_dtor(struct netmap_adapter *na) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; netmap_adapter_put(pna); } /* **************************************************************** - * functions specific for copy monitors + * functions specific for copy monitors **************************************************************** */ static void netmap_monitor_parent_sync(struct netmap_kring *kring, u_int first_new, int new_slots) { u_int j; for (j = 0; j < kring->n_monitors; j++) { struct netmap_kring *mkring = kring->monitors[j]; u_int i, mlim, beg; int free_slots, busy, sent = 0, m; u_int lim = kring->nkr_num_slots - 1; struct netmap_ring *ring = kring->ring, *mring = mkring->ring; u_int max_len = NETMAP_BUF_SIZE(mkring->na); mlim = mkring->nkr_num_slots - 1; /* we need to lock the monitor receive ring, since it * is the target of bot tx and rx traffic from the monitored * adapter */ mtx_lock(&mkring->q_lock); /* get the free slots available on the monitor ring */ i = mkring->nr_hwtail; busy = i - mkring->nr_hwcur; if (busy < 0) busy += mkring->nkr_num_slots; free_slots = mlim - busy; if (!free_slots) goto out; /* copy min(free_slots, new_slots) slots */ m = new_slots; beg = first_new; if (free_slots < m) { beg += (m - free_slots); if (beg >= kring->nkr_num_slots) beg -= kring->nkr_num_slots; m = free_slots; } for ( ; m; m--) { struct netmap_slot *s = &ring->slot[beg]; struct netmap_slot *ms = &mring->slot[i]; u_int copy_len = s->len; char *src = NMB(kring->na, s), *dst = NMB(mkring->na, ms); if (unlikely(copy_len > max_len)) { RD(5, "%s->%s: truncating %d to %d", kring->name, mkring->name, copy_len, max_len); copy_len = max_len; } memcpy(dst, src, copy_len); ms->len = copy_len; sent++; beg = nm_next(beg, lim); i = nm_next(i, mlim); } mb(); mkring->nr_hwtail = i; out: mtx_unlock(&mkring->q_lock); if (sent) { /* notify the new frames to the monitor */ mkring->nm_notify(mkring, 0); } } } /* callback used to replace the nm_sync callback in the monitored tx rings */ static int netmap_monitor_parent_txsync(struct netmap_kring *kring, int flags) { u_int first_new; int new_slots; /* get the new slots */ first_new = kring->nr_hwcur; new_slots = kring->rhead - first_new; if (new_slots < 0) new_slots += kring->nkr_num_slots; if (new_slots) netmap_monitor_parent_sync(kring, first_new, new_slots); return kring->mon_sync(kring, flags); } /* callback used to replace the nm_sync callback in the monitored rx rings */ static int netmap_monitor_parent_rxsync(struct netmap_kring *kring, int flags) { u_int first_new; int new_slots, error; /* get the new slots */ error = kring->mon_sync(kring, flags); if (error) return error; first_new = kring->mon_tail; new_slots = kring->nr_hwtail - first_new; if (new_slots < 0) new_slots += kring->nkr_num_slots; if (new_slots) netmap_monitor_parent_sync(kring, first_new, new_slots); kring->mon_tail = kring->nr_hwtail; return 0; } /* callback used to replace the nm_notify() callback in the monitored rx rings */ static int netmap_monitor_parent_notify(struct netmap_kring *kring, int flags) { int (*notify)(struct netmap_kring*, int); ND(5, "%s %x", kring->name, flags); /* ?xsync callbacks have tryget called by their callers * (NIOCREGIF and poll()), but here we have to call it * by ourself */ if (nm_kr_tryget(kring, 0, NULL)) { /* in all cases, just skip the sync */ return NM_IRQ_COMPLETED; } if (kring->n_monitors > 0) { netmap_monitor_parent_rxsync(kring, NAF_FORCE_READ); notify = kring->mon_notify; } else { /* we are no longer monitoring this ring, so both * mon_sync and mon_notify are NULL */ notify = kring->nm_notify; } nm_kr_put(kring); return notify(kring, flags); } static int netmap_monitor_reg(struct netmap_adapter *na, int onoff) { return netmap_monitor_reg_common(na, onoff, 0 /* no zcopy */); } static void netmap_monitor_dtor(struct netmap_adapter *na) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; netmap_adapter_put(pna); } /* check if nmr is a request for a monitor adapter that we can satisfy */ int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create) { struct nmreq pnmr; struct netmap_adapter *pna; /* parent adapter */ struct netmap_monitor_adapter *mna; struct ifnet *ifp = NULL; int i, error; enum txrx t; int zcopy = (nmr->nr_flags & NR_ZCOPY_MON); char monsuff[10] = ""; if ((nmr->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)) == 0) { if (nmr->nr_flags & NR_ZCOPY_MON) { - /* the flag makes no sense unless you are + /* the flag makes no sense unless you are * creating a monitor */ return EINVAL; } ND("not a monitor"); return 0; } /* this is a request for a monitor adapter */ ND("flags %x", nmr->nr_flags); mna = malloc(sizeof(*mna), M_DEVBUF, M_NOWAIT | M_ZERO); if (mna == NULL) { D("memory error"); return ENOMEM; } /* first, try to find the adapter that we want to monitor * We use the same nmr, after we have turned off the monitor flags. * In this way we can potentially monitor everything netmap understands, * except other monitors. */ memcpy(&pnmr, nmr, sizeof(pnmr)); pnmr.nr_flags &= ~(NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON); error = netmap_get_na(&pnmr, &pna, &ifp, create); if (error) { D("parent lookup failed: %d", error); free(mna, M_DEVBUF); return error; } ND("found parent: %s", pna->name); if (!nm_netmap_on(pna)) { /* parent not in netmap mode */ /* XXX we can wait for the parent to enter netmap mode, * by intercepting its nm_register callback (2014-03-16) */ D("%s not in netmap mode", pna->name); error = EINVAL; goto put_out; } /* grab all the rings we need in the parent */ mna->priv.np_na = pna; error = netmap_interp_ringid(&mna->priv, nmr->nr_ringid, nmr->nr_flags); if (error) { D("ringid error"); goto put_out; } if (mna->priv.np_qlast[NR_TX] - mna->priv.np_qfirst[NR_TX] == 1) { snprintf(monsuff, 10, "-%d", mna->priv.np_qfirst[NR_TX]); } snprintf(mna->up.name, sizeof(mna->up.name), "%s%s/%s%s%s", pna->name, monsuff, zcopy ? "z" : "", (nmr->nr_flags & NR_MONITOR_RX) ? "r" : "", (nmr->nr_flags & NR_MONITOR_TX) ? "t" : ""); if (zcopy) { /* zero copy monitors need exclusive access to the monitored rings */ for_rx_tx(t) { if (! (nmr->nr_flags & nm_txrx2flag(t))) continue; for (i = mna->priv.np_qfirst[t]; i < mna->priv.np_qlast[t]; i++) { struct netmap_kring *kring = &NMR(pna, t)[i]; if (kring->n_monitors > 0) { error = EBUSY; D("ring %s already monitored by %s", kring->name, kring->monitors[0]->name); goto put_out; } } } mna->up.nm_register = netmap_zmon_reg; mna->up.nm_dtor = netmap_zmon_dtor; /* to have zero copy, we need to use the same memory allocator * as the monitored port */ mna->up.nm_mem = pna->nm_mem; mna->up.na_lut = pna->na_lut; } else { /* normal monitors are incompatible with zero copy ones */ for_rx_tx(t) { if (! (nmr->nr_flags & nm_txrx2flag(t))) continue; for (i = mna->priv.np_qfirst[t]; i < mna->priv.np_qlast[t]; i++) { struct netmap_kring *kring = &NMR(pna, t)[i]; if (kring->n_monitors > 0 && kring->monitors[0]->na->nm_register == netmap_zmon_reg) { error = EBUSY; D("ring busy"); goto put_out; } } } mna->up.nm_rxsync = netmap_monitor_rxsync; mna->up.nm_register = netmap_monitor_reg; mna->up.nm_dtor = netmap_monitor_dtor; } /* the monitor supports the host rings iff the parent does */ mna->up.na_flags = (pna->na_flags & NAF_HOST_RINGS); /* a do-nothing txsync: monitors cannot be used to inject packets */ mna->up.nm_txsync = netmap_monitor_txsync; mna->up.nm_rxsync = netmap_monitor_rxsync; mna->up.nm_krings_create = netmap_monitor_krings_create; mna->up.nm_krings_delete = netmap_monitor_krings_delete; mna->up.num_tx_rings = 1; // XXX we don't need it, but field can't be zero /* we set the number of our rx_rings to be max(num_rx_rings, num_rx_rings) * in the parent */ mna->up.num_rx_rings = pna->num_rx_rings; if (pna->num_tx_rings > pna->num_rx_rings) mna->up.num_rx_rings = pna->num_tx_rings; /* by default, the number of slots is the same as in * the parent rings, but the user may ask for a different * number */ mna->up.num_tx_desc = nmr->nr_tx_slots; nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 1, NM_MONITOR_MAXSLOTS, NULL); mna->up.num_rx_desc = nmr->nr_rx_slots; nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 1, NM_MONITOR_MAXSLOTS, NULL); error = netmap_attach_common(&mna->up); if (error) { D("attach_common error"); goto put_out; } /* remember the traffic directions we have to monitor */ mna->flags = (nmr->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)); *na = &mna->up; netmap_adapter_get(*na); /* keep the reference to the parent */ ND("monitor ok"); /* drop the reference to the ifp, if any */ if (ifp) if_rele(ifp); return 0; put_out: netmap_unget_na(pna, ifp); free(mna, M_DEVBUF); return error; } #endif /* WITH_MONITOR */ Index: head/sys/dev/netmap/netmap_pipe.c =================================================================== --- head/sys/dev/netmap/netmap_pipe.c (revision 307571) +++ head/sys/dev/netmap/netmap_pipe.c (revision 307572) @@ -1,689 +1,689 @@ /* * Copyright (C) 2014-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_PIPES #define NM_PIPE_MAXSLOTS 4096 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */ SYSBEGIN(vars_pipes); SYSCTL_DECL(_dev_netmap); SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW, &netmap_default_pipes, 0 , ""); SYSEND; /* allocate the pipe array in the parent adapter */ static int nm_pipe_alloc(struct netmap_adapter *na, u_int npipes) { size_t len; struct netmap_pipe_adapter **npa; if (npipes <= na->na_max_pipes) /* we already have more entries that requested */ return 0; - + if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES) return EINVAL; len = sizeof(struct netmap_pipe_adapter *) * npipes; #ifndef _WIN32 npa = realloc(na->na_pipes, len, M_DEVBUF, M_NOWAIT | M_ZERO); #else npa = realloc(na->na_pipes, len, sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes); #endif if (npa == NULL) return ENOMEM; na->na_pipes = npa; na->na_max_pipes = npipes; return 0; } /* deallocate the parent array in the parent adapter */ void netmap_pipe_dealloc(struct netmap_adapter *na) { if (na->na_pipes) { if (na->na_next_pipe > 0) { D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name, na->na_next_pipe); } free(na->na_pipes, M_DEVBUF); na->na_pipes = NULL; na->na_max_pipes = 0; na->na_next_pipe = 0; } } /* find a pipe endpoint with the given id among the parent's pipes */ static struct netmap_pipe_adapter * netmap_pipe_find(struct netmap_adapter *parent, u_int pipe_id) { int i; struct netmap_pipe_adapter *na; for (i = 0; i < parent->na_next_pipe; i++) { na = parent->na_pipes[i]; if (na->id == pipe_id) { return na; } } return NULL; } /* add a new pipe endpoint to the parent array */ static int netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) { if (parent->na_next_pipe >= parent->na_max_pipes) { u_int npipes = parent->na_max_pipes ? 2*parent->na_max_pipes : 2; int error = nm_pipe_alloc(parent, npipes); if (error) return error; } parent->na_pipes[parent->na_next_pipe] = na; na->parent_slot = parent->na_next_pipe; parent->na_next_pipe++; return 0; } /* remove the given pipe endpoint from the parent array */ static void netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) { u_int n; n = --parent->na_next_pipe; if (n != na->parent_slot) { struct netmap_pipe_adapter **p = &parent->na_pipes[na->parent_slot]; *p = parent->na_pipes[n]; (*p)->parent_slot = na->parent_slot; } parent->na_pipes[n] = NULL; } static int netmap_pipe_txsync(struct netmap_kring *txkring, int flags) { struct netmap_kring *rxkring = txkring->pipe; u_int limit; /* slots to transfer */ u_int j, k, lim_tx = txkring->nkr_num_slots - 1, lim_rx = rxkring->nkr_num_slots - 1; int m, busy; ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); ND(2, "before: hwcur %d hwtail %d cur %d head %d tail %d", txkring->nr_hwcur, txkring->nr_hwtail, txkring->rcur, txkring->rhead, txkring->rtail); j = rxkring->nr_hwtail; /* RX */ k = txkring->nr_hwcur; /* TX */ m = txkring->rhead - txkring->nr_hwcur; /* new slots */ if (m < 0) m += txkring->nkr_num_slots; limit = m; m = lim_rx; /* max avail space on destination */ busy = j - rxkring->nr_hwcur; /* busy slots */ if (busy < 0) busy += rxkring->nkr_num_slots; m -= busy; /* subtract busy slots */ ND(2, "m %d limit %d", m, limit); if (m < limit) limit = m; if (limit == 0) { /* either the rxring is full, or nothing to send */ return 0; } while (limit-- > 0) { struct netmap_slot *rs = &rxkring->ring->slot[j]; struct netmap_slot *ts = &txkring->ring->slot[k]; struct netmap_slot tmp; /* swap the slots */ tmp = *rs; *rs = *ts; *ts = tmp; /* report the buffer change */ ts->flags |= NS_BUF_CHANGED; rs->flags |= NS_BUF_CHANGED; j = nm_next(j, lim_rx); k = nm_next(k, lim_tx); } mb(); /* make sure the slots are updated before publishing them */ rxkring->nr_hwtail = j; txkring->nr_hwcur = k; txkring->nr_hwtail = nm_prev(k, lim_tx); ND(2, "after: hwcur %d hwtail %d cur %d head %d tail %d j %d", txkring->nr_hwcur, txkring->nr_hwtail, txkring->rcur, txkring->rhead, txkring->rtail, j); mb(); /* make sure rxkring->nr_hwtail is updated before notifying */ rxkring->nm_notify(rxkring, 0); return 0; } static int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags) { struct netmap_kring *txkring = rxkring->pipe; uint32_t oldhwcur = rxkring->nr_hwcur; ND("%s %x <- %s", rxkring->name, flags, txkring->name); rxkring->nr_hwcur = rxkring->rhead; /* recover user-relased slots */ ND(5, "hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail, rxkring->rcur, rxkring->rhead, rxkring->rtail); mb(); /* paired with the first mb() in txsync */ if (oldhwcur != rxkring->nr_hwcur) { /* we have released some slots, notify the other end */ mb(); /* make sure nr_hwcur is updated before notifying */ txkring->nm_notify(txkring, 0); } return 0; } /* Pipe endpoints are created and destroyed together, so that endopoints do not * have to check for the existence of their peer at each ?xsync. * * To play well with the existing netmap infrastructure (refcounts etc.), we * adopt the following strategy: * * 1) The first endpoint that is created also creates the other endpoint and * grabs a reference to it. * * state A) user1 --> endpoint1 --> endpoint2 * * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives * its reference to the user: * * state B) user1 --> endpoint1 endpoint2 <--- user2 * * 3) Assume that, starting from state B endpoint2 is closed. In the unregister * callback endpoint2 notes that endpoint1 is still active and adds a reference * from endpoint1 to itself. When user2 then releases her own reference, * endpoint2 is not destroyed and we are back to state A. A symmetrical state * would be reached if endpoint1 were released instead. * * 4) If, starting from state A, endpoint1 is closed, the destructor notes that * it owns a reference to endpoint2 and releases it. * * Something similar goes on for the creation and destruction of the krings. */ /* netmap_pipe_krings_delete. * * There are two cases: * * 1) state is * * usr1 --> e1 --> e2 * * and we are e1. We have to create both sets * of krings. * * 2) state is * * usr1 --> e1 --> e2 * * and we are e2. e1 is certainly registered and our * krings already exist. Nothing to do. */ static int netmap_pipe_krings_create(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona = &pna->peer->up; int error = 0; enum txrx t; if (pna->peer_ref) { int i; /* case 1) above */ D("%p: case 1, create both ends", na); error = netmap_krings_create(na, 0); if (error) goto err; /* create the krings of the other end */ error = netmap_krings_create(ona, 0); if (error) goto del_krings1; /* cross link the krings */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ for (i = 0; i < nma_get_nrings(na, t); i++) { NMR(na, t)[i].pipe = NMR(&pna->peer->up, r) + i; NMR(&pna->peer->up, r)[i].pipe = NMR(na, t) + i; } } } return 0; del_krings1: netmap_krings_delete(na); err: return error; } /* netmap_pipe_reg. * * There are two cases on registration (onoff==1) * * 1.a) state is * * usr1 --> e1 --> e2 * * and we are e1. Create the needed rings of the * other end. * * 1.b) state is * * usr1 --> e1 --> e2 <-- usr2 * * and we are e2. Drop the ref e1 is holding. * * There are two additional cases on unregister (onoff==0) * * 2.a) state is * * usr1 --> e1 --> e2 * * and we are e1. Nothing special to do, e2 will * be cleaned up by the destructor of e1. * * 2.b) state is * * usr1 --> e1 e2 <-- usr2 * * and we are either e1 or e2. Add a ref from the * other end and hide our rings. */ static int netmap_pipe_reg(struct netmap_adapter *na, int onoff) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona = &pna->peer->up; int i, error = 0; enum txrx t; ND("%p: onoff %d", na, onoff); if (onoff) { for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_on(kring)) { /* mark the partner ring as needed */ kring->pipe->nr_kflags |= NKR_NEEDRING; } } } - + /* create all missing needed rings on the other end */ error = netmap_mem_rings_create(ona); if (error) return error; /* In case of no error we put our rings in netmap mode */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_on(kring)) { kring->nr_mode = NKR_NETMAP_ON; } } } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_off(kring)) { kring->nr_mode = NKR_NETMAP_OFF; /* mark the peer ring as no longer needed by us * (it may still be kept if sombody else is using it) */ kring->pipe->nr_kflags &= ~NKR_NEEDRING; } } } /* delete all the peer rings that are no longer needed */ netmap_mem_rings_delete(ona); } if (na->active_fds) { D("active_fds %d", na->active_fds); return 0; } if (pna->peer_ref) { ND("%p: case 1.a or 2.a, nothing to do", na); return 0; } if (onoff) { ND("%p: case 1.b, drop peer", na); pna->peer->peer_ref = 0; netmap_adapter_put(na); } else { ND("%p: case 2.b, grab peer", na); netmap_adapter_get(na); pna->peer->peer_ref = 1; } return error; } /* netmap_pipe_krings_delete. * * There are two cases: * * 1) state is * * usr1 --> e1 --> e2 * * and we are e1 (e2 is not registered, so krings_delete cannot be * called on it); * * 2) state is * * usr1 --> e1 e2 <-- usr2 * * and we are either e1 or e2. * * In the former case we have to also delete the krings of e2; * in the latter case we do nothing (note that our krings * have already been hidden in the unregister callback). */ static void netmap_pipe_krings_delete(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona; /* na of the other end */ if (!pna->peer_ref) { ND("%p: case 2, kept alive by peer", na); return; } /* case 1) above */ ND("%p: case 1, deleting everyhing", na); netmap_krings_delete(na); /* also zeroes tx_rings etc. */ ona = &pna->peer->up; if (ona->tx_rings == NULL) { /* already deleted, we must be on an * cleanup-after-error path */ return; } netmap_krings_delete(ona); } static void netmap_pipe_dtor(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; ND("%p", na); if (pna->peer_ref) { ND("%p: clean up peer", na); pna->peer_ref = 0; netmap_adapter_put(&pna->peer->up); } if (pna->role == NR_REG_PIPE_MASTER) netmap_pipe_remove(pna->parent, pna); netmap_adapter_put(pna->parent); pna->parent = NULL; } int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create) { struct nmreq pnmr; struct netmap_adapter *pna; /* parent adapter */ struct netmap_pipe_adapter *mna, *sna, *req; struct ifnet *ifp = NULL; u_int pipe_id; int role = nmr->nr_flags & NR_REG_MASK; int error; ND("flags %x", nmr->nr_flags); if (role != NR_REG_PIPE_MASTER && role != NR_REG_PIPE_SLAVE) { ND("not a pipe"); return 0; } role = nmr->nr_flags & NR_REG_MASK; /* first, try to find the parent adapter */ bzero(&pnmr, sizeof(pnmr)); memcpy(&pnmr.nr_name, nmr->nr_name, IFNAMSIZ); /* pass to parent the requested number of pipes */ pnmr.nr_arg1 = nmr->nr_arg1; error = netmap_get_na(&pnmr, &pna, &ifp, create); if (error) { ND("parent lookup failed: %d", error); return error; } ND("found parent: %s", na->name); if (NETMAP_OWNED_BY_KERN(pna)) { ND("parent busy"); error = EBUSY; goto put_out; } /* next, lookup the pipe id in the parent list */ req = NULL; pipe_id = nmr->nr_ringid & NETMAP_RING_MASK; mna = netmap_pipe_find(pna, pipe_id); if (mna) { if (mna->role == role) { ND("found %d directly at %d", pipe_id, mna->parent_slot); req = mna; } else { ND("found %d indirectly at %d", pipe_id, mna->parent_slot); req = mna->peer; } /* the pipe we have found already holds a ref to the parent, * so we need to drop the one we got from netmap_get_na() */ netmap_adapter_put(pna); goto found; } ND("pipe %d not found, create %d", pipe_id, create); if (!create) { error = ENODEV; goto put_out; } /* we create both master and slave. * The endpoint we were asked for holds a reference to * the other one. */ mna = malloc(sizeof(*mna), M_DEVBUF, M_NOWAIT | M_ZERO); if (mna == NULL) { error = ENOMEM; goto put_out; } snprintf(mna->up.name, sizeof(mna->up.name), "%s{%d", pna->name, pipe_id); mna->id = pipe_id; mna->role = NR_REG_PIPE_MASTER; mna->parent = pna; mna->up.nm_txsync = netmap_pipe_txsync; mna->up.nm_rxsync = netmap_pipe_rxsync; mna->up.nm_register = netmap_pipe_reg; mna->up.nm_dtor = netmap_pipe_dtor; mna->up.nm_krings_create = netmap_pipe_krings_create; mna->up.nm_krings_delete = netmap_pipe_krings_delete; mna->up.nm_mem = pna->nm_mem; mna->up.na_lut = pna->na_lut; mna->up.num_tx_rings = 1; mna->up.num_rx_rings = 1; mna->up.num_tx_desc = nmr->nr_tx_slots; nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 1, NM_PIPE_MAXSLOTS, NULL); mna->up.num_rx_desc = nmr->nr_rx_slots; nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 1, NM_PIPE_MAXSLOTS, NULL); error = netmap_attach_common(&mna->up); if (error) goto free_mna; /* register the master with the parent */ error = netmap_pipe_add(pna, mna); if (error) goto free_mna; /* create the slave */ sna = malloc(sizeof(*mna), M_DEVBUF, M_NOWAIT | M_ZERO); if (sna == NULL) { error = ENOMEM; goto unregister_mna; } /* most fields are the same, copy from master and then fix */ *sna = *mna; snprintf(sna->up.name, sizeof(sna->up.name), "%s}%d", pna->name, pipe_id); sna->role = NR_REG_PIPE_SLAVE; error = netmap_attach_common(&sna->up); if (error) goto free_sna; /* join the two endpoints */ mna->peer = sna; sna->peer = mna; /* we already have a reference to the parent, but we * need another one for the other endpoint we created */ netmap_adapter_get(pna); if (role == NR_REG_PIPE_MASTER) { req = mna; mna->peer_ref = 1; netmap_adapter_get(&sna->up); } else { req = sna; sna->peer_ref = 1; netmap_adapter_get(&mna->up); } ND("created master %p and slave %p", mna, sna); found: ND("pipe %d %s at %p", pipe_id, (req->role == NR_REG_PIPE_MASTER ? "master" : "slave"), req); *na = &req->up; netmap_adapter_get(*na); /* keep the reference to the parent. * It will be released by the req destructor */ /* drop the ifp reference, if any */ if (ifp) { if_rele(ifp); } return 0; free_sna: free(sna, M_DEVBUF); unregister_mna: netmap_pipe_remove(pna, mna); free_mna: free(mna, M_DEVBUF); put_out: netmap_unget_na(pna, ifp); return error; } #endif /* WITH_PIPES */ Index: head/sys/dev/netmap/netmap_vale.c =================================================================== --- head/sys/dev/netmap/netmap_vale.c (revision 307571) +++ head/sys/dev/netmap/netmap_vale.c (revision 307572) @@ -1,2778 +1,2778 @@ /* * Copyright (C) 2013-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module implements the VALE switch for netmap --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #define BDG_RWLOCK_T struct rwlock // struct rwlock #define BDG_RWINIT(b) \ rw_init_flags(&(b)->bdg_lock, "bdg lock", RW_NOWITNESS) #define BDG_WLOCK(b) rw_wlock(&(b)->bdg_lock) #define BDG_WUNLOCK(b) rw_wunlock(&(b)->bdg_lock) #define BDG_RLOCK(b) rw_rlock(&(b)->bdg_lock) #define BDG_RTRYLOCK(b) rw_try_rlock(&(b)->bdg_lock) #define BDG_RUNLOCK(b) rw_runlock(&(b)->bdg_lock) #define BDG_RWDESTROY(b) rw_destroy(&(b)->bdg_lock) #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_VALE /* * system parameters (most of them in netmap_kern.h) * NM_BDG_NAME prefix for switch port names, default "vale" * NM_BDG_MAXPORTS number of ports * NM_BRIDGES max number of switches in the system. * XXX should become a sysctl or tunable * * Switch ports are named valeX:Y where X is the switch name and Y * is the port. If Y matches a physical interface name, the port is * connected to a physical device. * * Unlike physical interfaces, switch ports use their own memory region * for rings and buffers. * The virtual interfaces use per-queue lock instead of core lock. * In the tx loop, we aggregate traffic in batches to make all operations * faster. The batch size is bridge_batch. */ #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */ #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */ #define NM_BRIDGE_RINGSIZE 1024 /* in the device */ #define NM_BDG_HASH 1024 /* forwarding table entries */ #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */ #define NM_MULTISEG 64 /* max size of a chain of bufs */ /* actual size of the tables */ #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NM_MULTISEG) /* NM_FT_NULL terminates a list of slots in the ft */ #define NM_FT_NULL NM_BDG_BATCH_MAX /* * bridge_batch is set via sysctl to the max batch size to be * used in the bridge. The actual value may be larger as the * last packet in the block may overflow the size. */ static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */ SYSBEGIN(vars_vale); SYSCTL_DECL(_dev_netmap); SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0 , ""); SYSEND; static int netmap_vp_create(struct nmreq *, struct ifnet *, struct netmap_vp_adapter **); static int netmap_vp_reg(struct netmap_adapter *na, int onoff); static int netmap_bwrap_reg(struct netmap_adapter *, int onoff); /* * For each output interface, nm_bdg_q is used to construct a list. * bq_len is the number of output buffers (we can have coalescing * during the copy). */ struct nm_bdg_q { uint16_t bq_head; uint16_t bq_tail; uint32_t bq_len; /* number of buffers */ }; /* XXX revise this */ struct nm_hash_ent { uint64_t mac; /* the top 2 bytes are the epoch */ uint64_t ports; }; /* * nm_bridge is a descriptor for a VALE switch. * Interfaces for a bridge are all in bdg_ports[]. * The array has fixed size, an empty entry does not terminate * the search, but lookups only occur on attach/detach so we * don't mind if they are slow. * * The bridge is non blocking on the transmit ports: excess * packets are dropped if there is no room on the output port. * * bdg_lock protects accesses to the bdg_ports array. * This is a rw lock (or equivalent). */ struct nm_bridge { /* XXX what is the proper alignment/layout ? */ BDG_RWLOCK_T bdg_lock; /* protects bdg_ports */ int bdg_namelen; uint32_t bdg_active_ports; /* 0 means free */ char bdg_basename[IFNAMSIZ]; /* Indexes of active ports (up to active_ports) * and all other remaining ports. */ uint8_t bdg_port_index[NM_BDG_MAXPORTS]; struct netmap_vp_adapter *bdg_ports[NM_BDG_MAXPORTS]; /* * The function to decide the destination port. * It returns either of an index of the destination port, * NM_BDG_BROADCAST to broadcast this packet, or NM_BDG_NOPORT not to * forward this packet. ring_nr is the source ring index, and the * function may overwrite this value to forward this packet to a * different ring index. * This function must be set by netmap_bdg_ctl(). */ struct netmap_bdg_ops bdg_ops; /* the forwarding table, MAC+ports. * XXX should be changed to an argument to be passed to * the lookup function, and allocated on attach */ struct nm_hash_ent ht[NM_BDG_HASH]; #ifdef CONFIG_NET_NS struct net *ns; #endif /* CONFIG_NET_NS */ }; const char* netmap_bdg_name(struct netmap_vp_adapter *vp) { struct nm_bridge *b = vp->na_bdg; if (b == NULL) return NULL; return b->bdg_basename; } #ifndef CONFIG_NET_NS /* * XXX in principle nm_bridges could be created dynamically * Right now we have a static array and deletions are protected * by an exclusive lock. */ static struct nm_bridge *nm_bridges; #endif /* !CONFIG_NET_NS */ /* * this is a slightly optimized copy routine which rounds * to multiple of 64 bytes and is often faster than dealing * with other odd sizes. We assume there is enough room * in the source and destination buffers. * * XXX only for multiples of 64 bytes, non overlapped. */ static inline void pkt_copy(void *_src, void *_dst, int l) { uint64_t *src = _src; uint64_t *dst = _dst; if (unlikely(l >= 1024)) { memcpy(dst, src, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } static int nm_is_id_char(const char c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_'); } /* Validate the name of a VALE bridge port and return the * position of the ":" character. */ static int nm_vale_name_validate(const char *name) { int colon_pos = -1; int i; if (!name || strlen(name) < strlen(NM_BDG_NAME)) { return -1; } for (i = 0; name[i]; i++) { if (name[i] == ':') { if (colon_pos != -1) { return -1; } colon_pos = i; } else if (!nm_is_id_char(name[i])) { return -1; } } if (i >= IFNAMSIZ) { return -1; } return colon_pos; } /* * locate a bridge among the existing ones. * MUST BE CALLED WITH NMG_LOCK() * * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME. * We assume that this is called with a name of at least NM_NAME chars. */ static struct nm_bridge * nm_find_bridge(const char *name, int create) { int i, namelen; struct nm_bridge *b = NULL, *bridges; u_int num_bridges; NMG_LOCK_ASSERT(); netmap_bns_getbridges(&bridges, &num_bridges); namelen = nm_vale_name_validate(name); if (namelen < 0) { D("invalid bridge name %s", name ? name : NULL); return NULL; } /* lookup the name, remember empty slot if there is one */ for (i = 0; i < num_bridges; i++) { struct nm_bridge *x = bridges + i; if (x->bdg_active_ports == 0) { if (create && b == NULL) b = x; /* record empty slot */ } else if (x->bdg_namelen != namelen) { continue; } else if (strncmp(name, x->bdg_basename, namelen) == 0) { ND("found '%.*s' at %d", namelen, name, i); b = x; break; } } if (i == num_bridges && b) { /* name not found, can create entry */ /* initialize the bridge */ strncpy(b->bdg_basename, name, namelen); ND("create new bridge %s with ports %d", b->bdg_basename, b->bdg_active_ports); b->bdg_namelen = namelen; b->bdg_active_ports = 0; for (i = 0; i < NM_BDG_MAXPORTS; i++) b->bdg_port_index[i] = i; /* set the default function */ b->bdg_ops.lookup = netmap_bdg_learning; /* reset the MAC address table */ bzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH); NM_BNS_GET(b); } return b; } /* * Free the forwarding tables for rings attached to switch ports. */ static void nm_free_bdgfwd(struct netmap_adapter *na) { int nrings, i; struct netmap_kring *kring; NMG_LOCK_ASSERT(); nrings = na->num_tx_rings; kring = na->tx_rings; for (i = 0; i < nrings; i++) { if (kring[i].nkr_ft) { free(kring[i].nkr_ft, M_DEVBUF); kring[i].nkr_ft = NULL; /* protect from freeing twice */ } } } /* * Allocate the forwarding tables for the rings attached to the bridge ports. */ static int nm_alloc_bdgfwd(struct netmap_adapter *na) { int nrings, l, i, num_dstq; struct netmap_kring *kring; NMG_LOCK_ASSERT(); /* all port:rings + broadcast */ num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1; l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX; l += sizeof(struct nm_bdg_q) * num_dstq; l += sizeof(uint16_t) * NM_BDG_BATCH_MAX; nrings = netmap_real_rings(na, NR_TX); kring = na->tx_rings; for (i = 0; i < nrings; i++) { struct nm_bdg_fwd *ft; struct nm_bdg_q *dstq; int j; ft = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO); if (!ft) { nm_free_bdgfwd(na); return ENOMEM; } dstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX); for (j = 0; j < num_dstq; j++) { dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL; dstq[j].bq_len = 0; } kring[i].nkr_ft = ft; } return 0; } /* remove from bridge b the ports in slots hw and sw * (sw can be -1 if not needed) */ static void netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw) { int s_hw = hw, s_sw = sw; int i, lim =b->bdg_active_ports; uint8_t tmp[NM_BDG_MAXPORTS]; /* New algorithm: make a copy of bdg_port_index; lookup NA(ifp)->bdg_port and SWNA(ifp)->bdg_port in the array of bdg_port_index, replacing them with entries from the bottom of the array; decrement bdg_active_ports; acquire BDG_WLOCK() and copy back the array. */ if (netmap_verbose) D("detach %d and %d (lim %d)", hw, sw, lim); /* make a copy of the list of active ports, update it, * and then copy back within BDG_WLOCK(). */ memcpy(tmp, b->bdg_port_index, sizeof(tmp)); for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) { if (hw >= 0 && tmp[i] == hw) { ND("detach hw %d at %d", hw, i); lim--; /* point to last active port */ tmp[i] = tmp[lim]; /* swap with i */ tmp[lim] = hw; /* now this is inactive */ hw = -1; } else if (sw >= 0 && tmp[i] == sw) { ND("detach sw %d at %d", sw, i); lim--; tmp[i] = tmp[lim]; tmp[lim] = sw; sw = -1; } else { i++; } } if (hw >= 0 || sw >= 0) { D("XXX delete failed hw %d sw %d, should panic...", hw, sw); } BDG_WLOCK(b); if (b->bdg_ops.dtor) b->bdg_ops.dtor(b->bdg_ports[s_hw]); b->bdg_ports[s_hw] = NULL; if (s_sw >= 0) { b->bdg_ports[s_sw] = NULL; } memcpy(b->bdg_port_index, tmp, sizeof(tmp)); b->bdg_active_ports = lim; BDG_WUNLOCK(b); ND("now %d active ports", lim); if (lim == 0) { ND("marking bridge %s as free", b->bdg_basename); bzero(&b->bdg_ops, sizeof(b->bdg_ops)); NM_BNS_PUT(b); } } /* nm_bdg_ctl callback for VALE ports */ static int netmap_vp_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; struct nm_bridge *b = vpna->na_bdg; (void)nmr; // XXX merge ? if (attach) return 0; /* nothing to do */ if (b) { netmap_set_all_rings(na, 0 /* disable */); netmap_bdg_detach_common(b, vpna->bdg_port, -1); vpna->na_bdg = NULL; netmap_set_all_rings(na, 1 /* enable */); } /* I have took reference just for attach */ netmap_adapter_put(na); return 0; } /* nm_dtor callback for ephemeral VALE ports */ static void netmap_vp_dtor(struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na; struct nm_bridge *b = vpna->na_bdg; ND("%s has %d references", na->name, na->na_refcount); if (b) { netmap_bdg_detach_common(b, vpna->bdg_port, -1); } } /* remove a persistent VALE port from the system */ static int nm_vi_destroy(const char *name) { struct ifnet *ifp; int error; ifp = ifunit_ref(name); if (!ifp) return ENXIO; NMG_LOCK(); /* make sure this is actually a VALE port */ if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) { error = EINVAL; goto err; } if (NA(ifp)->na_refcount > 1) { error = EBUSY; goto err; } NMG_UNLOCK(); D("destroying a persistent vale interface %s", ifp->if_xname); /* Linux requires all the references are released * before unregister */ if_rele(ifp); netmap_detach(ifp); nm_os_vi_detach(ifp); return 0; err: NMG_UNLOCK(); if_rele(ifp); return error; } /* * Create a virtual interface registered to the system. * The interface will be attached to a bridge later. */ static int nm_vi_create(struct nmreq *nmr) { struct ifnet *ifp; struct netmap_vp_adapter *vpna; int error; /* don't include VALE prefix */ if (!strncmp(nmr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME))) return EINVAL; ifp = ifunit_ref(nmr->nr_name); if (ifp) { /* already exist, cannot create new one */ if_rele(ifp); return EEXIST; } error = nm_os_vi_persist(nmr->nr_name, &ifp); if (error) return error; NMG_LOCK(); /* netmap_vp_create creates a struct netmap_vp_adapter */ error = netmap_vp_create(nmr, ifp, &vpna); if (error) { D("error %d", error); nm_os_vi_detach(ifp); return error; } /* persist-specific routines */ vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl; netmap_adapter_get(&vpna->up); NM_ATTACH_NA(ifp, &vpna->up); NMG_UNLOCK(); D("created %s", ifp->if_xname); return 0; } /* Try to get a reference to a netmap adapter attached to a VALE switch. * If the adapter is found (or is created), this function returns 0, a * non NULL pointer is returned into *na, and the caller holds a * reference to the adapter. * If an adapter is not found, then no reference is grabbed and the * function returns an error code, or 0 if there is just a VALE prefix * mismatch. Therefore the caller holds a reference when * (*na != NULL && return == 0). */ int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create) { char *nr_name = nmr->nr_name; const char *ifname; struct ifnet *ifp; int error = 0; struct netmap_vp_adapter *vpna, *hostna = NULL; struct nm_bridge *b; int i, j, cand = -1, cand2 = -1; int needed; *na = NULL; /* default return value */ /* first try to see if this is a bridge port. */ NMG_LOCK_ASSERT(); if (strncmp(nr_name, NM_BDG_NAME, sizeof(NM_BDG_NAME) - 1)) { return 0; /* no error, but no VALE prefix */ } b = nm_find_bridge(nr_name, create); if (b == NULL) { D("no bridges available for '%s'", nr_name); return (create ? ENOMEM : ENXIO); } if (strlen(nr_name) < b->bdg_namelen) /* impossible */ panic("x"); /* Now we are sure that name starts with the bridge's name, * lookup the port in the bridge. We need to scan the entire * list. It is not important to hold a WLOCK on the bridge * during the search because NMG_LOCK already guarantees * that there are no other possible writers. */ /* lookup in the local list of ports */ for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; vpna = b->bdg_ports[i]; // KASSERT(na != NULL); ND("checking %s", vpna->up.name); if (!strcmp(vpna->up.name, nr_name)) { netmap_adapter_get(&vpna->up); ND("found existing if %s refs %d", nr_name) *na = &vpna->up; return 0; } } /* not found, should we create it? */ if (!create) return ENXIO; /* yes we should, see if we have space to attach entries */ needed = 2; /* in some cases we only need 1 */ if (b->bdg_active_ports + needed >= NM_BDG_MAXPORTS) { D("bridge full %d, cannot create new port", b->bdg_active_ports); return ENOMEM; } /* record the next two ports available, but do not allocate yet */ cand = b->bdg_port_index[b->bdg_active_ports]; cand2 = b->bdg_port_index[b->bdg_active_ports + 1]; ND("+++ bridge %s port %s used %d avail %d %d", b->bdg_basename, ifname, b->bdg_active_ports, cand, cand2); /* * try see if there is a matching NIC with this name * (after the bridge's name) */ ifname = nr_name + b->bdg_namelen + 1; ifp = ifunit_ref(ifname); if (!ifp) { /* Create an ephemeral virtual port * This block contains all the ephemeral-specific logics */ if (nmr->nr_cmd) { /* nr_cmd must be 0 for a virtual port */ return EINVAL; } /* bdg_netmap_attach creates a struct netmap_adapter */ error = netmap_vp_create(nmr, NULL, &vpna); if (error) { D("error %d", error); free(ifp, M_DEVBUF); return error; } /* shortcut - we can skip get_hw_na(), * ownership check and nm_bdg_attach() */ } else { struct netmap_adapter *hw; error = netmap_get_hw_na(ifp, &hw); if (error || hw == NULL) goto out; /* host adapter might not be created */ error = hw->nm_bdg_attach(nr_name, hw); if (error) goto out; vpna = hw->na_vp; hostna = hw->na_hostvp; if (nmr->nr_arg1 != NETMAP_BDG_HOST) hostna = NULL; } BDG_WLOCK(b); vpna->bdg_port = cand; ND("NIC %p to bridge port %d", vpna, cand); /* bind the port to the bridge (virtual ports are not active) */ b->bdg_ports[cand] = vpna; vpna->na_bdg = b; b->bdg_active_ports++; if (hostna != NULL) { /* also bind the host stack to the bridge */ b->bdg_ports[cand2] = hostna; hostna->bdg_port = cand2; hostna->na_bdg = b; b->bdg_active_ports++; ND("host %p to bridge port %d", hostna, cand2); } ND("if %s refs %d", ifname, vpna->up.na_refcount); BDG_WUNLOCK(b); *na = &vpna->up; netmap_adapter_get(*na); return 0; out: if_rele(ifp); return error; } /* Process NETMAP_BDG_ATTACH */ static int nm_bdg_ctl_attach(struct nmreq *nmr) { struct netmap_adapter *na; int error; NMG_LOCK(); error = netmap_get_bdg_na(nmr, &na, 1 /* create if not exists */); if (error) /* no device */ goto unlock_exit; if (na == NULL) { /* VALE prefix missing */ error = EINVAL; goto unlock_exit; } if (NETMAP_OWNED_BY_ANY(na)) { error = EBUSY; goto unref_exit; } if (na->nm_bdg_ctl) { /* nop for VALE ports. The bwrap needs to put the hwna * in netmap mode (see netmap_bwrap_bdg_ctl) */ error = na->nm_bdg_ctl(na, nmr, 1); if (error) goto unref_exit; ND("registered %s to netmap-mode", na->name); } NMG_UNLOCK(); return 0; unref_exit: netmap_adapter_put(na); unlock_exit: NMG_UNLOCK(); return error; } static inline int nm_is_bwrap(struct netmap_adapter *na) { return na->nm_register == netmap_bwrap_reg; } /* process NETMAP_BDG_DETACH */ static int nm_bdg_ctl_detach(struct nmreq *nmr) { struct netmap_adapter *na; int error; NMG_LOCK(); error = netmap_get_bdg_na(nmr, &na, 0 /* don't create */); if (error) { /* no device, or another bridge or user owns the device */ goto unlock_exit; } if (na == NULL) { /* VALE prefix missing */ error = EINVAL; goto unlock_exit; } else if (nm_is_bwrap(na) && ((struct netmap_bwrap_adapter *)na)->na_polling_state) { /* Don't detach a NIC with polling */ error = EBUSY; netmap_adapter_put(na); goto unlock_exit; } if (na->nm_bdg_ctl) { /* remove the port from bridge. The bwrap * also needs to put the hwna in normal mode */ error = na->nm_bdg_ctl(na, nmr, 0); } netmap_adapter_put(na); unlock_exit: NMG_UNLOCK(); return error; } struct nm_bdg_polling_state; struct nm_bdg_kthread { struct nm_kthread *nmk; u_int qfirst; u_int qlast; struct nm_bdg_polling_state *bps; }; struct nm_bdg_polling_state { bool configured; bool stopped; struct netmap_bwrap_adapter *bna; u_int reg; u_int qfirst; u_int qlast; u_int cpu_from; u_int ncpus; struct nm_bdg_kthread *kthreads; }; static void netmap_bwrap_polling(void *data) { struct nm_bdg_kthread *nbk = data; struct netmap_bwrap_adapter *bna; u_int qfirst, qlast, i; struct netmap_kring *kring0, *kring; if (!nbk) return; qfirst = nbk->qfirst; qlast = nbk->qlast; bna = nbk->bps->bna; kring0 = NMR(bna->hwna, NR_RX); for (i = qfirst; i < qlast; i++) { kring = kring0 + i; kring->nm_notify(kring, 0); } } static int nm_bdg_create_kthreads(struct nm_bdg_polling_state *bps) { struct nm_kthread_cfg kcfg; int i, j; bps->kthreads = malloc(sizeof(struct nm_bdg_kthread) * bps->ncpus, M_DEVBUF, M_NOWAIT | M_ZERO); if (bps->kthreads == NULL) return ENOMEM; bzero(&kcfg, sizeof(kcfg)); kcfg.worker_fn = netmap_bwrap_polling; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; int all = (bps->ncpus == 1 && bps->reg == NR_REG_ALL_NIC); int affinity = bps->cpu_from + i; t->bps = bps; - t->qfirst = all ? bps->qfirst /* must be 0 */: affinity; + t->qfirst = all ? bps->qfirst /* must be 0 */: affinity; t->qlast = all ? bps->qlast : t->qfirst + 1; D("kthread %d a:%u qf:%u ql:%u", i, affinity, t->qfirst, t->qlast); kcfg.type = i; kcfg.worker_private = t; t->nmk = nm_os_kthread_create(&kcfg); if (t->nmk == NULL) { goto cleanup; } nm_os_kthread_set_affinity(t->nmk, affinity); } return 0; cleanup: for (j = 0; j < i; j++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kthread_delete(t->nmk); } free(bps->kthreads, M_DEVBUF); return EFAULT; } /* a version of ptnetmap_start_kthreads() */ static int nm_bdg_polling_start_kthreads(struct nm_bdg_polling_state *bps) { int error, i, j; if (!bps) { D("polling is not configured"); return EFAULT; } bps->stopped = false; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; error = nm_os_kthread_start(t->nmk); if (error) { D("error in nm_kthread_start()"); goto cleanup; } } return 0; cleanup: for (j = 0; j < i; j++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kthread_stop(t->nmk); } bps->stopped = true; return error; } static void nm_bdg_polling_stop_delete_kthreads(struct nm_bdg_polling_state *bps) { int i; if (!bps) return; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kthread_stop(t->nmk); nm_os_kthread_delete(t->nmk); } bps->stopped = true; } static int get_polling_cfg(struct nmreq *nmr, struct netmap_adapter *na, struct nm_bdg_polling_state *bps) { int req_cpus, avail_cpus, core_from; u_int reg, i, qfirst, qlast; avail_cpus = nm_os_ncpus(); req_cpus = nmr->nr_arg1; if (req_cpus == 0) { D("req_cpus must be > 0"); return EINVAL; } else if (req_cpus >= avail_cpus) { D("for safety, we need at least one core left in the system"); return EINVAL; } reg = nmr->nr_flags & NR_REG_MASK; i = nmr->nr_ringid & NETMAP_RING_MASK; /* * ONE_NIC: dedicate one core to one ring. If multiple cores * are specified, consecutive rings are also polled. * For example, if ringid=2 and 2 cores are given, * ring 2 and 3 are polled by core 2 and 3, respectively. * ALL_NIC: poll all the rings using a core specified by ringid. * the number of cores must be 1. */ if (reg == NR_REG_ONE_NIC) { if (i + req_cpus > nma_get_nrings(na, NR_RX)) { D("only %d rings exist (ring %u-%u is given)", nma_get_nrings(na, NR_RX), i, i+req_cpus); return EINVAL; } qfirst = i; qlast = qfirst + req_cpus; core_from = qfirst; } else if (reg == NR_REG_ALL_NIC) { if (req_cpus != 1) { D("ncpus must be 1 not %d for REG_ALL_NIC", req_cpus); return EINVAL; } qfirst = 0; qlast = nma_get_nrings(na, NR_RX); core_from = i; } else { D("reg must be ALL_NIC or ONE_NIC"); return EINVAL; } bps->reg = reg; bps->qfirst = qfirst; bps->qlast = qlast; bps->cpu_from = core_from; bps->ncpus = req_cpus; D("%s qfirst %u qlast %u cpu_from %u ncpus %u", reg == NR_REG_ALL_NIC ? "REG_ALL_NIC" : "REG_ONE_NIC", qfirst, qlast, core_from, req_cpus); return 0; } static int nm_bdg_ctl_polling_start(struct nmreq *nmr, struct netmap_adapter *na) { struct nm_bdg_polling_state *bps; struct netmap_bwrap_adapter *bna; int error; bna = (struct netmap_bwrap_adapter *)na; if (bna->na_polling_state) { D("ERROR adapter already in polling mode"); return EFAULT; } bps = malloc(sizeof(*bps), M_DEVBUF, M_NOWAIT | M_ZERO); if (!bps) return ENOMEM; bps->configured = false; bps->stopped = true; if (get_polling_cfg(nmr, na, bps)) { free(bps, M_DEVBUF); return EINVAL; } if (nm_bdg_create_kthreads(bps)) { free(bps, M_DEVBUF); return EFAULT; } bps->configured = true; bna->na_polling_state = bps; bps->bna = bna; /* disable interrupt if possible */ if (bna->hwna->nm_intr) bna->hwna->nm_intr(bna->hwna, 0); /* start kthread now */ error = nm_bdg_polling_start_kthreads(bps); if (error) { D("ERROR nm_bdg_polling_start_kthread()"); free(bps->kthreads, M_DEVBUF); free(bps, M_DEVBUF); bna->na_polling_state = NULL; if (bna->hwna->nm_intr) bna->hwna->nm_intr(bna->hwna, 1); } return error; } static int nm_bdg_ctl_polling_stop(struct nmreq *nmr, struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct nm_bdg_polling_state *bps; if (!bna->na_polling_state) { D("ERROR adapter is not in polling mode"); return EFAULT; } bps = bna->na_polling_state; nm_bdg_polling_stop_delete_kthreads(bna->na_polling_state); bps->configured = false; free(bps, M_DEVBUF); bna->na_polling_state = NULL; /* reenable interrupt */ if (bna->hwna->nm_intr) bna->hwna->nm_intr(bna->hwna, 1); return 0; } /* Called by either user's context (netmap_ioctl()) * or external kernel modules (e.g., Openvswitch). * Operation is indicated in nmr->nr_cmd. * NETMAP_BDG_OPS that sets configure/lookup/dtor functions to the bridge * requires bdg_ops argument; the other commands ignore this argument. * * Called without NMG_LOCK. */ int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops) { struct nm_bridge *b, *bridges; struct netmap_adapter *na; struct netmap_vp_adapter *vpna; char *name = nmr->nr_name; int cmd = nmr->nr_cmd, namelen = strlen(name); int error = 0, i, j; u_int num_bridges; netmap_bns_getbridges(&bridges, &num_bridges); switch (cmd) { case NETMAP_BDG_NEWIF: error = nm_vi_create(nmr); break; case NETMAP_BDG_DELIF: error = nm_vi_destroy(nmr->nr_name); break; case NETMAP_BDG_ATTACH: error = nm_bdg_ctl_attach(nmr); break; case NETMAP_BDG_DETACH: error = nm_bdg_ctl_detach(nmr); break; case NETMAP_BDG_LIST: /* this is used to enumerate bridges and ports */ if (namelen) { /* look up indexes of bridge and port */ if (strncmp(name, NM_BDG_NAME, strlen(NM_BDG_NAME))) { error = EINVAL; break; } NMG_LOCK(); b = nm_find_bridge(name, 0 /* don't create */); if (!b) { error = ENOENT; NMG_UNLOCK(); break; } error = 0; nmr->nr_arg1 = b - bridges; /* bridge index */ nmr->nr_arg2 = NM_BDG_NOPORT; for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; vpna = b->bdg_ports[i]; if (vpna == NULL) { D("---AAAAAAAAARGH-------"); continue; } /* the former and the latter identify a * virtual port and a NIC, respectively */ if (!strcmp(vpna->up.name, name)) { nmr->nr_arg2 = i; /* port index */ break; } } NMG_UNLOCK(); } else { /* return the first non-empty entry starting from * bridge nr_arg1 and port nr_arg2. * * Users can detect the end of the same bridge by * seeing the new and old value of nr_arg1, and can * detect the end of all the bridge by error != 0 */ i = nmr->nr_arg1; j = nmr->nr_arg2; NMG_LOCK(); for (error = ENOENT; i < NM_BRIDGES; i++) { b = bridges + i; if (j >= b->bdg_active_ports) { j = 0; /* following bridges scan from 0 */ continue; } nmr->nr_arg1 = i; nmr->nr_arg2 = j; j = b->bdg_port_index[j]; vpna = b->bdg_ports[j]; strncpy(name, vpna->up.name, (size_t)IFNAMSIZ); error = 0; break; } NMG_UNLOCK(); } break; case NETMAP_BDG_REGOPS: /* XXX this should not be available from userspace */ /* register callbacks to the given bridge. * nmr->nr_name may be just bridge's name (including ':' * if it is not just NM_NAME). */ if (!bdg_ops) { error = EINVAL; break; } NMG_LOCK(); b = nm_find_bridge(name, 0 /* don't create */); if (!b) { error = EINVAL; } else { b->bdg_ops = *bdg_ops; } NMG_UNLOCK(); break; case NETMAP_BDG_VNET_HDR: /* Valid lengths for the virtio-net header are 0 (no header), 10 and 12. */ if (nmr->nr_arg1 != 0 && nmr->nr_arg1 != sizeof(struct nm_vnet_hdr) && nmr->nr_arg1 != 12) { error = EINVAL; break; } NMG_LOCK(); error = netmap_get_bdg_na(nmr, &na, 0); if (na && !error) { vpna = (struct netmap_vp_adapter *)na; na->virt_hdr_len = nmr->nr_arg1; if (na->virt_hdr_len) { vpna->mfs = NETMAP_BUF_SIZE(na); } D("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); netmap_adapter_put(na); } else if (!na) { error = ENXIO; } NMG_UNLOCK(); break; case NETMAP_BDG_POLLING_ON: case NETMAP_BDG_POLLING_OFF: NMG_LOCK(); error = netmap_get_bdg_na(nmr, &na, 0); if (na && !error) { if (!nm_is_bwrap(na)) { error = EOPNOTSUPP; } else if (cmd == NETMAP_BDG_POLLING_ON) { error = nm_bdg_ctl_polling_start(nmr, na); if (!error) netmap_adapter_get(na); } else { error = nm_bdg_ctl_polling_stop(nmr, na); if (!error) netmap_adapter_put(na); } netmap_adapter_put(na); } NMG_UNLOCK(); break; default: D("invalid cmd (nmr->nr_cmd) (0x%x)", cmd); error = EINVAL; break; } return error; } int netmap_bdg_config(struct nmreq *nmr) { struct nm_bridge *b; int error = EINVAL; NMG_LOCK(); b = nm_find_bridge(nmr->nr_name, 0); if (!b) { NMG_UNLOCK(); return error; } NMG_UNLOCK(); /* Don't call config() with NMG_LOCK() held */ BDG_RLOCK(b); if (b->bdg_ops.config != NULL) error = b->bdg_ops.config((struct nm_ifreq *)nmr); BDG_RUNLOCK(b); return error; } /* nm_krings_create callback for VALE ports. * Calls the standard netmap_krings_create, then adds leases on rx * rings and bdgfwd on tx rings. */ static int netmap_vp_krings_create(struct netmap_adapter *na) { u_int tailroom; int error, i; uint32_t *leases; u_int nrx = netmap_real_rings(na, NR_RX); /* * Leases are attached to RX rings on vale ports */ tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx; error = netmap_krings_create(na, tailroom); if (error) return error; leases = na->tailroom; for (i = 0; i < nrx; i++) { /* Receive rings */ na->rx_rings[i].nkr_leases = leases; leases += na->num_rx_desc; } error = nm_alloc_bdgfwd(na); if (error) { netmap_krings_delete(na); return error; } return 0; } /* nm_krings_delete callback for VALE ports. */ static void netmap_vp_krings_delete(struct netmap_adapter *na) { nm_free_bdgfwd(na); netmap_krings_delete(na); } static int nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na, u_int ring_nr); /* * main dispatch routine for the bridge. * Grab packets from a kring, move them into the ft structure * associated to the tx (input) port. Max one instance per port, * filtered on input (ioctl, poll or XXX). * Returns the next position in the ring. */ static int nm_bdg_preflush(struct netmap_kring *kring, u_int end) { struct netmap_vp_adapter *na = (struct netmap_vp_adapter*)kring->na; struct netmap_ring *ring = kring->ring; struct nm_bdg_fwd *ft; u_int ring_nr = kring->ring_id; u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1; u_int ft_i = 0; /* start from 0 */ u_int frags = 1; /* how many frags ? */ struct nm_bridge *b = na->na_bdg; /* To protect against modifications to the bridge we acquire a * shared lock, waiting if we can sleep (if the source port is * attached to a user process) or with a trylock otherwise (NICs). */ ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j); if (na->up.na_flags & NAF_BDG_MAYSLEEP) BDG_RLOCK(b); else if (!BDG_RTRYLOCK(b)) return 0; ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j); ft = kring->nkr_ft; for (; likely(j != end); j = nm_next(j, lim)) { struct netmap_slot *slot = &ring->slot[j]; char *buf; ft[ft_i].ft_len = slot->len; ft[ft_i].ft_flags = slot->flags; ND("flags is 0x%x", slot->flags); /* we do not use the buf changed flag, but we still need to reset it */ slot->flags &= ~NS_BUF_CHANGED; /* this slot goes into a list so initialize the link field */ ft[ft_i].ft_next = NM_FT_NULL; buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ? (void *)(uintptr_t)slot->ptr : NMB(&na->up, slot); if (unlikely(buf == NULL)) { RD(5, "NULL %s buffer pointer from %s slot %d len %d", (slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT", kring->name, j, ft[ft_i].ft_len); buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up); ft[ft_i].ft_len = 0; ft[ft_i].ft_flags = 0; } __builtin_prefetch(buf); ++ft_i; if (slot->flags & NS_MOREFRAG) { frags++; continue; } if (unlikely(netmap_verbose && frags > 1)) RD(5, "%d frags at %d", frags, ft_i - frags); ft[ft_i - frags].ft_frags = frags; frags = 1; if (unlikely((int)ft_i >= bridge_batch)) ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr); } if (frags > 1) { /* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we * have to fix frags count. */ frags--; ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG; ft[ft_i - frags].ft_frags = frags; D("Truncate incomplete fragment at %d (%d frags)", ft_i, frags); } if (ft_i) ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr); BDG_RUNLOCK(b); return j; } /* ----- FreeBSD if_bridge hash function ------- */ /* * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). * * http://www.burtleburtle.net/bob/hash/spooky.html */ #define mix(a, b, c) \ do { \ a -= b; a -= c; a ^= (c >> 13); \ b -= c; b -= a; b ^= (a << 8); \ c -= a; c -= b; c ^= (b >> 13); \ a -= b; a -= c; a ^= (c >> 12); \ b -= c; b -= a; b ^= (a << 16); \ c -= a; c -= b; c ^= (b >> 5); \ a -= b; a -= c; a ^= (c >> 3); \ b -= c; b -= a; b ^= (a << 10); \ c -= a; c -= b; c ^= (b >> 15); \ } while (/*CONSTCOND*/0) static __inline uint32_t nm_bridge_rthash(const uint8_t *addr) { uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key b += addr[5] << 8; b += addr[4]; a += addr[3] << 24; a += addr[2] << 16; a += addr[1] << 8; a += addr[0]; mix(a, b, c); #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1) return (c & BRIDGE_RTHASH_MASK); } #undef mix /* nm_register callback for VALE ports */ static int netmap_vp_reg(struct netmap_adapter *na, int onoff) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na; enum txrx t; int i; /* persistent ports may be put in netmap mode * before being attached to a bridge */ if (vpna->na_bdg) BDG_WLOCK(vpna->na_bdg); if (onoff) { for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_on(kring)) kring->nr_mode = NKR_NETMAP_ON; } } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; /* XXX on FreeBSD, persistent VALE ports should also * toggle IFCAP_NETMAP in na->ifp (2014-03-16) */ } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_off(kring)) kring->nr_mode = NKR_NETMAP_OFF; } } } if (vpna->na_bdg) BDG_WUNLOCK(vpna->na_bdg); return 0; } /* * Lookup function for a learning bridge. * Update the hash table with the source address, * and then returns the destination port index, and the * ring in *dst_ring (at the moment, always use ring 0) */ u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, struct netmap_vp_adapter *na) { uint8_t *buf = ft->ft_buf; u_int buf_len = ft->ft_len; struct nm_hash_ent *ht = na->na_bdg->ht; uint32_t sh, dh; u_int dst, mysrc = na->bdg_port; uint64_t smac, dmac; uint8_t indbuf[12]; /* safety check, unfortunately we have many cases */ if (buf_len >= 14 + na->up.virt_hdr_len) { /* virthdr + mac_hdr in the same slot */ buf += na->up.virt_hdr_len; buf_len -= na->up.virt_hdr_len; } else if (buf_len == na->up.virt_hdr_len && ft->ft_flags & NS_MOREFRAG) { /* only header in first fragment */ ft++; buf = ft->ft_buf; buf_len = ft->ft_len; } else { RD(5, "invalid buf format, length %d", buf_len); return NM_BDG_NOPORT; } if (ft->ft_flags & NS_INDIRECT) { if (copyin(buf, indbuf, sizeof(indbuf))) { return NM_BDG_NOPORT; } buf = indbuf; } dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff; smac = le64toh(*(uint64_t *)(buf + 4)); smac >>= 16; /* * The hash is somewhat expensive, there might be some * worthwhile optimizations here. */ if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */ uint8_t *s = buf+6; sh = nm_bridge_rthash(s); // XXX hash of source /* update source port forwarding entry */ na->last_smac = ht[sh].mac = smac; /* XXX expire ? */ ht[sh].ports = mysrc; if (netmap_verbose) D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d", s[0], s[1], s[2], s[3], s[4], s[5], mysrc); } dst = NM_BDG_BROADCAST; if ((buf[0] & 1) == 0) { /* unicast */ dh = nm_bridge_rthash(buf); // XXX hash of dst if (ht[dh].mac == dmac) { /* found dst */ dst = ht[dh].ports; } /* XXX otherwise return NM_BDG_UNKNOWN ? */ } return dst; } /* * Available space in the ring. Only used in VALE code * and only with is_rx = 1 */ static inline uint32_t nm_kr_space(struct netmap_kring *k, int is_rx) { int space; if (is_rx) { int busy = k->nkr_hwlease - k->nr_hwcur; if (busy < 0) busy += k->nkr_num_slots; space = k->nkr_num_slots - 1 - busy; } else { /* XXX never used in this branch */ space = k->nr_hwtail - k->nkr_hwlease; if (space < 0) space += k->nkr_num_slots; } #if 0 // sanity check if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_tail >= k->nkr_num_slots || busy < 0 || busy >= k->nkr_num_slots) { D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } #endif return space; } /* make a lease on the kring for N positions. return the * lease index * XXX only used in VALE code and with is_rx = 1 */ static inline uint32_t nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx) { uint32_t lim = k->nkr_num_slots - 1; uint32_t lease_idx = k->nkr_lease_idx; k->nkr_leases[lease_idx] = NR_NOSLOT; k->nkr_lease_idx = nm_next(lease_idx, lim); if (n > nm_kr_space(k, is_rx)) { D("invalid request for %d slots", n); panic("x"); } /* XXX verify that there are n slots */ k->nkr_hwlease += n; if (k->nkr_hwlease > lim) k->nkr_hwlease -= lim + 1; if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_hwtail >= k->nkr_num_slots || k->nkr_lease_idx >= k->nkr_num_slots) { D("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d", k->na->name, k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } return lease_idx; } /* * * This flush routine supports only unicast and broadcast but a large * number of ports, and lets us replace the learn and dispatch functions. */ int nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na, u_int ring_nr) { struct nm_bdg_q *dst_ents, *brddst; uint16_t num_dsts = 0, *dsts; struct nm_bridge *b = na->na_bdg; u_int i, me = na->bdg_port; /* * The work area (pointed by ft) is followed by an array of * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS * queues per port plus one for the broadcast traffic. * Then we have an array of destination indexes. */ dst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX); dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1); /* first pass: find a destination for each packet in the batch */ for (i = 0; likely(i < n); i += ft[i].ft_frags) { uint8_t dst_ring = ring_nr; /* default, same ring as origin */ uint16_t dst_port, d_i; struct nm_bdg_q *d; ND("slot %d frags %d", i, ft[i].ft_frags); /* Drop the packet if the virtio-net header is not into the first fragment nor at the very beginning of the second. */ if (unlikely(na->up.virt_hdr_len > ft[i].ft_len)) continue; dst_port = b->bdg_ops.lookup(&ft[i], &dst_ring, na); if (netmap_verbose > 255) RD(5, "slot %d port %d -> %d", i, me, dst_port); if (dst_port == NM_BDG_NOPORT) continue; /* this packet is identified to be dropped */ else if (unlikely(dst_port > NM_BDG_MAXPORTS)) continue; else if (dst_port == NM_BDG_BROADCAST) dst_ring = 0; /* broadcasts always go to ring 0 */ else if (unlikely(dst_port == me || !b->bdg_ports[dst_port])) continue; /* get a position in the scratch pad */ d_i = dst_port * NM_BDG_MAXRINGS + dst_ring; d = dst_ents + d_i; /* append the first fragment to the list */ if (d->bq_head == NM_FT_NULL) { /* new destination */ d->bq_head = d->bq_tail = i; /* remember this position to be scanned later */ if (dst_port != NM_BDG_BROADCAST) dsts[num_dsts++] = d_i; } else { ft[d->bq_tail].ft_next = i; d->bq_tail = i; } d->bq_len += ft[i].ft_frags; } /* * Broadcast traffic goes to ring 0 on all destinations. * So we need to add these rings to the list of ports to scan. * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is * expensive. We should keep a compact list of active destinations * so we could shorten this loop. */ brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS; if (brddst->bq_head != NM_FT_NULL) { u_int j; for (j = 0; likely(j < b->bdg_active_ports); j++) { uint16_t d_i; i = b->bdg_port_index[j]; if (unlikely(i == me)) continue; d_i = i * NM_BDG_MAXRINGS; if (dst_ents[d_i].bq_head == NM_FT_NULL) dsts[num_dsts++] = d_i; } } ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts); /* second pass: scan destinations */ for (i = 0; i < num_dsts; i++) { struct netmap_vp_adapter *dst_na; struct netmap_kring *kring; struct netmap_ring *ring; u_int dst_nr, lim, j, d_i, next, brd_next; u_int needed, howmany; int retry = netmap_txsync_retry; struct nm_bdg_q *d; uint32_t my_start = 0, lease_idx = 0; int nrings; int virt_hdr_mismatch = 0; d_i = dsts[i]; ND("second pass %d port %d", i, d_i); d = dst_ents + d_i; // XXX fix the division dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS]; /* protect from the lookup function returning an inactive * destination port */ if (unlikely(dst_na == NULL)) goto cleanup; if (dst_na->up.na_flags & NAF_SW_ONLY) goto cleanup; /* * The interface may be in !netmap mode in two cases: * - when na is attached but not activated yet; * - when na is being deactivated but is still attached. */ if (unlikely(!nm_netmap_on(&dst_na->up))) { ND("not in netmap mode!"); goto cleanup; } /* there is at least one either unicast or broadcast packet */ brd_next = brddst->bq_head; next = d->bq_head; /* we need to reserve this many slots. If fewer are * available, some packets will be dropped. * Packets may have multiple fragments, so we may not use * there is a chance that we may not use all of the slots * we have claimed, so we will need to handle the leftover * ones when we regain the lock. */ needed = d->bq_len + brddst->bq_len; if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) { RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len, dst_na->up.virt_hdr_len); /* There is a virtio-net header/offloadings mismatch between * source and destination. The slower mismatch datapath will * be used to cope with all the mismatches. */ virt_hdr_mismatch = 1; if (dst_na->mfs < na->mfs) { /* We may need to do segmentation offloadings, and so * we may need a number of destination slots greater * than the number of input slots ('needed'). * We look for the smallest integer 'x' which satisfies: * needed * na->mfs + x * H <= x * na->mfs * where 'H' is the length of the longest header that may * be replicated in the segmentation process (e.g. for * TCPv4 we must account for ethernet header, IP header * and TCPv4 header). */ needed = (needed * na->mfs) / (dst_na->mfs - WORST_CASE_GSO_HEADER) + 1; ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed); } } ND(5, "pass 2 dst %d is %x %s", i, d_i, is_vp ? "virtual" : "nic/host"); dst_nr = d_i & (NM_BDG_MAXRINGS-1); nrings = dst_na->up.num_rx_rings; if (dst_nr >= nrings) dst_nr = dst_nr % nrings; kring = &dst_na->up.rx_rings[dst_nr]; ring = kring->ring; lim = kring->nkr_num_slots - 1; retry: if (dst_na->retry && retry) { /* try to get some free slot from the previous run */ kring->nm_notify(kring, 0); /* actually useful only for bwraps, since there * the notify will trigger a txsync on the hwna. VALE ports * have dst_na->retry == 0 */ } /* reserve the buffers in the queue and an entry * to report completion, and drop lock. * XXX this might become a helper function. */ mtx_lock(&kring->q_lock); if (kring->nkr_stopped) { mtx_unlock(&kring->q_lock); goto cleanup; } my_start = j = kring->nkr_hwlease; howmany = nm_kr_space(kring, 1); if (needed < howmany) howmany = needed; lease_idx = nm_kr_lease(kring, howmany, 1); mtx_unlock(&kring->q_lock); /* only retry if we need more than available slots */ if (retry && needed <= howmany) retry = 0; /* copy to the destination queue */ while (howmany > 0) { struct netmap_slot *slot; struct nm_bdg_fwd *ft_p, *ft_end; u_int cnt; /* find the queue from which we pick next packet. * NM_FT_NULL is always higher than valid indexes * so we never dereference it if the other list * has packets (and if both are empty we never * get here). */ if (next < brd_next) { ft_p = ft + next; next = ft_p->ft_next; } else { /* insert broadcast */ ft_p = ft + brd_next; brd_next = ft_p->ft_next; } cnt = ft_p->ft_frags; // cnt > 0 if (unlikely(cnt > howmany)) break; /* no more space */ if (netmap_verbose && cnt > 1) RD(5, "rx %d frags to %d", cnt, j); ft_end = ft_p + cnt; if (unlikely(virt_hdr_mismatch)) { bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany); } else { howmany -= cnt; do { char *dst, *src = ft_p->ft_buf; size_t copy_len = ft_p->ft_len, dst_len = copy_len; slot = &ring->slot[j]; dst = NMB(&dst_na->up, slot); ND("send [%d] %d(%d) bytes at %s:%d", i, (int)copy_len, (int)dst_len, NM_IFPNAME(dst_ifp), j); /* round to a multiple of 64 */ copy_len = (copy_len + 63) & ~63; if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) || copy_len > NETMAP_BUF_SIZE(&na->up))) { RD(5, "invalid len %d, down to 64", (int)copy_len); copy_len = dst_len = 64; // XXX } if (ft_p->ft_flags & NS_INDIRECT) { if (copyin(src, dst, copy_len)) { // invalid user pointer, pretend len is 0 dst_len = 0; } } else { //memcpy(dst, src, copy_len); pkt_copy(src, dst, (int)copy_len); } slot->len = dst_len; slot->flags = (cnt << 8)| NS_MOREFRAG; j = nm_next(j, lim); needed--; ft_p++; } while (ft_p != ft_end); slot->flags = (cnt << 8); /* clear flag on last entry */ } /* are we done ? */ if (next == NM_FT_NULL && brd_next == NM_FT_NULL) break; } { /* current position */ uint32_t *p = kring->nkr_leases; /* shorthand */ uint32_t update_pos; int still_locked = 1; mtx_lock(&kring->q_lock); if (unlikely(howmany > 0)) { /* not used all bufs. If i am the last one * i can recover the slots, otherwise must * fill them with 0 to mark empty packets. */ ND("leftover %d bufs", howmany); if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) { /* yes i am the last one */ ND("roll back nkr_hwlease to %d", j); kring->nkr_hwlease = j; } else { while (howmany-- > 0) { ring->slot[j].len = 0; ring->slot[j].flags = 0; j = nm_next(j, lim); } } } p[lease_idx] = j; /* report I am done */ update_pos = kring->nr_hwtail; if (my_start == update_pos) { /* all slots before my_start have been reported, * so scan subsequent leases to see if other ranges * have been completed, and to a selwakeup or txsync. */ while (lease_idx != kring->nkr_lease_idx && p[lease_idx] != NR_NOSLOT) { j = p[lease_idx]; p[lease_idx] = NR_NOSLOT; lease_idx = nm_next(lease_idx, lim); } /* j is the new 'write' position. j != my_start * means there are new buffers to report */ if (likely(j != my_start)) { kring->nr_hwtail = j; still_locked = 0; mtx_unlock(&kring->q_lock); kring->nm_notify(kring, 0); /* this is netmap_notify for VALE ports and * netmap_bwrap_notify for bwrap. The latter will * trigger a txsync on the underlying hwna */ if (dst_na->retry && retry--) { /* XXX this is going to call nm_notify again. * Only useful for bwrap in virtual machines */ goto retry; } } } if (still_locked) mtx_unlock(&kring->q_lock); } cleanup: d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */ d->bq_len = 0; } brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */ brddst->bq_len = 0; return 0; } /* nm_txsync callback for VALE ports */ static int netmap_vp_txsync(struct netmap_kring *kring, int flags) { struct netmap_vp_adapter *na = (struct netmap_vp_adapter *)kring->na; u_int done; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; if (bridge_batch <= 0) { /* testing only */ done = head; // used all goto done; } if (!na->na_bdg) { done = head; goto done; } if (bridge_batch > NM_BDG_BATCH) bridge_batch = NM_BDG_BATCH; done = nm_bdg_preflush(kring, head); done: if (done != head) D("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail); /* * packets between 'done' and 'cur' are left unsent. */ kring->nr_hwcur = done; kring->nr_hwtail = nm_prev(done, lim); if (netmap_verbose) D("%s ring %d flags %d", na->up.name, kring->ring_id, flags); return 0; } /* rxsync code used by VALE ports nm_rxsync callback and also * internally by the brwap */ static int netmap_vp_rxsync_locked(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; u_int nm_i, lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; int n; if (head > lim) { D("ouch dangerous reset!!!"); n = netmap_ring_reinit(kring); goto done; } /* First part, import newly received packets. */ /* actually nothing to do here, they are already in the kring */ /* Second part, skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* consistency check, but nothing really important here */ for (n = 0; likely(nm_i != head); n++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = NMB(na, slot); if (addr == NETMAP_BUF_BASE(kring->na)) { /* bad buf */ D("bad buffer index %d, ignore ?", slot->buf_idx); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); } kring->nr_hwcur = head; } n = 0; done: return n; } /* * nm_rxsync callback for VALE ports * user process reading from a VALE switch. * Already protected against concurrent calls from userspace, * but we must acquire the queue's lock to protect against * writers on the same queue. */ static int netmap_vp_rxsync(struct netmap_kring *kring, int flags) { int n; mtx_lock(&kring->q_lock); n = netmap_vp_rxsync_locked(kring, flags); mtx_unlock(&kring->q_lock); return n; } /* nm_bdg_attach callback for VALE ports * The na_vp port is this same netmap_adapter. There is no host port. */ static int netmap_vp_bdg_attach(const char *name, struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; if (vpna->na_bdg) return EBUSY; na->na_vp = vpna; strncpy(na->name, name, sizeof(na->name)); na->na_hostvp = NULL; return 0; } /* create a netmap_vp_adapter that describes a VALE port. * Only persistent VALE ports have a non-null ifp. */ static int netmap_vp_create(struct nmreq *nmr, struct ifnet *ifp, struct netmap_vp_adapter **ret) { struct netmap_vp_adapter *vpna; struct netmap_adapter *na; int error; u_int npipes = 0; vpna = malloc(sizeof(*vpna), M_DEVBUF, M_NOWAIT | M_ZERO); if (vpna == NULL) return ENOMEM; na = &vpna->up; na->ifp = ifp; strncpy(na->name, nmr->nr_name, sizeof(na->name)); /* bound checking */ na->num_tx_rings = nmr->nr_tx_rings; nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); nmr->nr_tx_rings = na->num_tx_rings; // write back na->num_rx_rings = nmr->nr_rx_rings; nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); nmr->nr_rx_rings = na->num_rx_rings; // write back nm_bound_var(&nmr->nr_tx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); na->num_tx_desc = nmr->nr_tx_slots; nm_bound_var(&nmr->nr_rx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); /* validate number of pipes. We want at least 1, * but probably can do with some more. * So let's use 2 as default (when 0 is supplied) */ npipes = nmr->nr_arg1; nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL); nmr->nr_arg1 = npipes; /* write back */ /* validate extra bufs */ nm_bound_var(&nmr->nr_arg3, 0, 0, 128*NM_BDG_MAXSLOTS, NULL); na->num_rx_desc = nmr->nr_rx_slots; vpna->mfs = 1514; vpna->last_smac = ~0llu; /*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero?? vpna->mfs = netmap_buf_size; */ if (netmap_verbose) D("max frame size %u", vpna->mfs); na->na_flags |= NAF_BDG_MAYSLEEP; /* persistent VALE ports look like hw devices * with a native netmap adapter */ if (ifp) na->na_flags |= NAF_NATIVE; na->nm_txsync = netmap_vp_txsync; na->nm_rxsync = netmap_vp_rxsync; na->nm_register = netmap_vp_reg; na->nm_krings_create = netmap_vp_krings_create; na->nm_krings_delete = netmap_vp_krings_delete; na->nm_dtor = netmap_vp_dtor; na->nm_mem = netmap_mem_private_new(na->name, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc, nmr->nr_arg3, npipes, &error); if (na->nm_mem == NULL) goto err; na->nm_bdg_attach = netmap_vp_bdg_attach; /* other nmd fields are set in the common routine */ error = netmap_attach_common(na); if (error) goto err; *ret = vpna; return 0; err: if (na->nm_mem != NULL) netmap_mem_delete(na->nm_mem); free(vpna, M_DEVBUF); return error; } /* Bridge wrapper code (bwrap). * This is used to connect a non-VALE-port netmap_adapter (hwna) to a * VALE switch. * The main task is to swap the meaning of tx and rx rings to match the * expectations of the VALE switch code (see nm_bdg_flush). * * The bwrap works by interposing a netmap_bwrap_adapter between the * rest of the system and the hwna. The netmap_bwrap_adapter looks like * a netmap_vp_adapter to the rest the system, but, internally, it * translates all callbacks to what the hwna expects. * * Note that we have to intercept callbacks coming from two sides: * * - callbacks coming from the netmap module are intercepted by * passing around the netmap_bwrap_adapter instead of the hwna * * - callbacks coming from outside of the netmap module only know * about the hwna. This, however, only happens in interrupt * handlers, where only the hwna->nm_notify callback is called. * What the bwrap does is to overwrite the hwna->nm_notify callback * with its own netmap_bwrap_intr_notify. * XXX This assumes that the hwna->nm_notify callback was the * standard netmap_notify(), as it is the case for nic adapters. * Any additional action performed by hwna->nm_notify will not be * performed by netmap_bwrap_intr_notify. * * Additionally, the bwrap can optionally attach the host rings pair * of the wrapped adapter to a different port of the switch. */ static void netmap_bwrap_dtor(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na; struct netmap_adapter *hwna = bna->hwna; struct nm_bridge *b = bna->up.na_bdg, *bh = bna->host.na_bdg; if (b) { netmap_bdg_detach_common(b, bna->up.bdg_port, (bh ? bna->host.bdg_port : -1)); } ND("na %p", na); na->ifp = NULL; bna->host.up.ifp = NULL; hwna->na_private = NULL; hwna->na_vp = hwna->na_hostvp = NULL; hwna->na_flags &= ~NAF_BUSY; netmap_adapter_put(hwna); } /* * Intr callback for NICs connected to a bridge. * Simply ignore tx interrupts (maybe we could try to recover space ?) * and pass received packets from nic to the bridge. * * XXX TODO check locking: this is called from the interrupt * handler so we should make sure that the interface is not * disconnected while passing down an interrupt. * * Note, no user process can access this NIC or the host stack. * The only part of the ring that is significant are the slots, * and head/cur/tail are set from the kring as needed * (part as a receive ring, part as a transmit ring). * * callback that overwrites the hwna notify callback. * Packets come from the outside or from the host stack and are put on an * hwna rx ring. * The bridge wrapper then sends the packets through the bridge. */ static int netmap_bwrap_intr_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_bwrap_adapter *bna = na->na_private; struct netmap_kring *bkring; struct netmap_vp_adapter *vpna = &bna->up; u_int ring_nr = kring->ring_id; int ret = NM_IRQ_COMPLETED; int error; if (netmap_verbose) D("%s %s 0x%x", na->name, kring->name, flags); bkring = &vpna->up.tx_rings[ring_nr]; /* make sure the ring is not disabled */ if (nm_kr_tryget(kring, 0 /* can't sleep */, NULL)) { return EIO; } if (netmap_verbose) D("%s head %d cur %d tail %d", na->name, kring->rhead, kring->rcur, kring->rtail); /* simulate a user wakeup on the rx ring * fetch packets that have arrived. */ error = kring->nm_sync(kring, 0); if (error) goto put_out; if (kring->nr_hwcur == kring->nr_hwtail) { if (netmap_verbose) D("how strange, interrupt with no packets on %s", na->name); goto put_out; } /* new packets are kring->rcur to kring->nr_hwtail, and the bkring * had hwcur == bkring->rhead. So advance bkring->rhead to kring->nr_hwtail * to push all packets out. */ bkring->rhead = bkring->rcur = kring->nr_hwtail; netmap_vp_txsync(bkring, flags); /* mark all buffers as released on this ring */ kring->rhead = kring->rcur = kring->rtail = kring->nr_hwtail; /* another call to actually release the buffers */ error = kring->nm_sync(kring, 0); /* The second rxsync may have further advanced hwtail. If this happens, * return NM_IRQ_RESCHED, otherwise just return NM_IRQ_COMPLETED. */ if (kring->rcur != kring->nr_hwtail) { ret = NM_IRQ_RESCHED; } put_out: nm_kr_put(kring); return error ? error : ret; } /* nm_register callback for bwrap */ static int netmap_bwrap_reg(struct netmap_adapter *na, int onoff) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; struct netmap_vp_adapter *hostna = &bna->host; int error, i; enum txrx t; ND("%s %s", na->name, onoff ? "on" : "off"); if (onoff) { /* netmap_do_regif has been called on the bwrap na. * We need to pass the information about the * memory allocator down to the hwna before * putting it in netmap mode */ hwna->na_lut = na->na_lut; if (hostna->na_bdg) { /* if the host rings have been attached to switch, * we need to copy the memory allocator information * in the hostna also */ hostna->up.na_lut = na->na_lut; } /* cross-link the netmap rings * The original number of rings comes from hwna, * rx rings on one side equals tx rings on the other. */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) { NMR(hwna, r)[i].ring = NMR(na, t)[i].ring; } } if (na->na_flags & NAF_HOST_RINGS) { struct netmap_adapter *hna = &hostna->up; /* the hostna rings are the host rings of the bwrap. * The corresponding krings must point back to the * hostna */ hna->tx_rings = &na->tx_rings[na->num_tx_rings]; hna->tx_rings[0].na = hna; hna->rx_rings = &na->rx_rings[na->num_rx_rings]; hna->rx_rings[0].na = hna; } } /* pass down the pending ring state information */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) NMR(hwna, t)[i].nr_pending_mode = NMR(na, t)[i].nr_pending_mode; } /* forward the request to the hwna */ error = hwna->nm_register(hwna, onoff); if (error) return error; /* copy up the current ring state information */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) NMR(na, t)[i].nr_mode = NMR(hwna, t)[i].nr_mode; } /* impersonate a netmap_vp_adapter */ netmap_vp_reg(na, onoff); if (hostna->na_bdg) netmap_vp_reg(&hostna->up, onoff); if (onoff) { u_int i; /* intercept the hwna nm_nofify callback on the hw rings */ for (i = 0; i < hwna->num_rx_rings; i++) { hwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify; hwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify; } i = hwna->num_rx_rings; /* for safety */ /* save the host ring notify unconditionally */ hwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify; if (hostna->na_bdg) { /* also intercept the host ring notify */ hwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify; } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; } else { u_int i; if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; /* reset all notify callbacks (including host ring) */ for (i = 0; i <= hwna->num_rx_rings; i++) { hwna->rx_rings[i].nm_notify = hwna->rx_rings[i].save_notify; hwna->rx_rings[i].save_notify = NULL; } hwna->na_lut.lut = NULL; hwna->na_lut.objtotal = 0; hwna->na_lut.objsize = 0; } return 0; } /* nm_config callback for bwrap */ static int netmap_bwrap_config(struct netmap_adapter *na, u_int *txr, u_int *txd, u_int *rxr, u_int *rxd) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; /* forward the request */ netmap_update_config(hwna); /* swap the results */ *txr = hwna->num_rx_rings; *txd = hwna->num_rx_desc; *rxr = hwna->num_tx_rings; *rxd = hwna->num_rx_desc; return 0; } /* nm_krings_create callback for bwrap */ static int netmap_bwrap_krings_create(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; int i, error = 0; enum txrx t; ND("%s", na->name); /* impersonate a netmap_vp_adapter */ error = netmap_vp_krings_create(na); if (error) return error; /* also create the hwna krings */ error = hwna->nm_krings_create(hwna); if (error) { goto err_del_vp_rings; } /* get each ring slot number from the corresponding hwna ring */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) { NMR(na, t)[i].nkr_num_slots = NMR(hwna, r)[i].nkr_num_slots; } } return 0; err_del_vp_rings: netmap_vp_krings_delete(na); return error; } static void netmap_bwrap_krings_delete(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; ND("%s", na->name); hwna->nm_krings_delete(hwna); netmap_vp_krings_delete(na); } /* notify method for the bridge-->hwna direction */ static int netmap_bwrap_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_bwrap_adapter *bna = na->na_private; struct netmap_adapter *hwna = bna->hwna; u_int ring_n = kring->ring_id; u_int lim = kring->nkr_num_slots - 1; struct netmap_kring *hw_kring; int error; - ND("%s: na %s hwna %s", + ND("%s: na %s hwna %s", (kring ? kring->name : "NULL!"), (na ? na->name : "NULL!"), (hwna ? hwna->name : "NULL!")); hw_kring = &hwna->tx_rings[ring_n]; if (nm_kr_tryget(hw_kring, 0, NULL)) { return ENXIO; } /* first step: simulate a user wakeup on the rx ring */ netmap_vp_rxsync(kring, flags); ND("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", na->name, ring_n, kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease, ring->head, ring->cur, ring->tail, hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_ring->rtail); /* second step: the new packets are sent on the tx ring * (which is actually the same ring) */ hw_kring->rhead = hw_kring->rcur = kring->nr_hwtail; error = hw_kring->nm_sync(hw_kring, flags); if (error) goto put_out; /* third step: now we are back the rx ring */ /* claim ownership on all hw owned bufs */ kring->rhead = kring->rcur = nm_next(hw_kring->nr_hwtail, lim); /* skip past reserved slot */ /* fourth step: the user goes to sleep again, causing another rxsync */ netmap_vp_rxsync(kring, flags); ND("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", na->name, ring_n, kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease, ring->head, ring->cur, ring->tail, hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_kring->rtail); put_out: nm_kr_put(hw_kring); return error ? error : NM_IRQ_COMPLETED; } /* nm_bdg_ctl callback for the bwrap. * Called on bridge-attach and detach, as an effect of vale-ctl -[ahd]. * On attach, it needs to provide a fake netmap_priv_d structure and * perform a netmap_do_regif() on the bwrap. This will put both the * bwrap and the hwna in netmap mode, with the netmap rings shared * and cross linked. Moroever, it will start intercepting interrupts * directed to hwna. */ static int netmap_bwrap_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach) { struct netmap_priv_d *npriv; struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na; int error = 0; if (attach) { if (NETMAP_OWNED_BY_ANY(na)) { return EBUSY; } if (bna->na_kpriv) { /* nothing to do */ return 0; } npriv = netmap_priv_new(); if (npriv == NULL) return ENOMEM; npriv->np_ifp = na->ifp; /* let the priv destructor release the ref */ error = netmap_do_regif(npriv, na, 0, NR_REG_NIC_SW); if (error) { netmap_priv_delete(npriv); return error; } bna->na_kpriv = npriv; na->na_flags |= NAF_BUSY; } else { if (na->active_fds == 0) /* not registered */ return EINVAL; netmap_priv_delete(bna->na_kpriv); bna->na_kpriv = NULL; na->na_flags &= ~NAF_BUSY; } return error; } /* attach a bridge wrapper to the 'real' device */ int netmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna) { struct netmap_bwrap_adapter *bna; struct netmap_adapter *na = NULL; struct netmap_adapter *hostna = NULL; int error = 0; enum txrx t; /* make sure the NIC is not already in use */ if (NETMAP_OWNED_BY_ANY(hwna)) { D("NIC %s busy, cannot attach to bridge", hwna->name); return EBUSY; } bna = malloc(sizeof(*bna), M_DEVBUF, M_NOWAIT | M_ZERO); if (bna == NULL) { return ENOMEM; } na = &bna->up.up; /* make bwrap ifp point to the real ifp */ na->ifp = hwna->ifp; na->na_private = bna; strncpy(na->name, nr_name, sizeof(na->name)); /* fill the ring data for the bwrap adapter with rx/tx meanings * swapped. The real cross-linking will be done during register, * when all the krings will have been created. */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ nma_set_nrings(na, t, nma_get_nrings(hwna, r)); nma_set_ndesc(na, t, nma_get_ndesc(hwna, r)); } na->nm_dtor = netmap_bwrap_dtor; na->nm_register = netmap_bwrap_reg; // na->nm_txsync = netmap_bwrap_txsync; // na->nm_rxsync = netmap_bwrap_rxsync; na->nm_config = netmap_bwrap_config; na->nm_krings_create = netmap_bwrap_krings_create; na->nm_krings_delete = netmap_bwrap_krings_delete; na->nm_notify = netmap_bwrap_notify; na->nm_bdg_ctl = netmap_bwrap_bdg_ctl; na->pdev = hwna->pdev; na->nm_mem = hwna->nm_mem; na->virt_hdr_len = hwna->virt_hdr_len; bna->up.retry = 1; /* XXX maybe this should depend on the hwna */ bna->hwna = hwna; netmap_adapter_get(hwna); hwna->na_private = bna; /* weak reference */ hwna->na_vp = &bna->up; if (hwna->na_flags & NAF_HOST_RINGS) { if (hwna->na_flags & NAF_SW_ONLY) na->na_flags |= NAF_SW_ONLY; na->na_flags |= NAF_HOST_RINGS; hostna = &bna->host.up; snprintf(hostna->name, sizeof(hostna->name), "%s^", nr_name); hostna->ifp = hwna->ifp; for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); nma_set_nrings(hostna, t, 1); nma_set_ndesc(hostna, t, nma_get_ndesc(hwna, r)); } // hostna->nm_txsync = netmap_bwrap_host_txsync; // hostna->nm_rxsync = netmap_bwrap_host_rxsync; hostna->nm_notify = netmap_bwrap_notify; hostna->nm_mem = na->nm_mem; hostna->na_private = bna; hostna->na_vp = &bna->up; na->na_hostvp = hwna->na_hostvp = hostna->na_hostvp = &bna->host; hostna->na_flags = NAF_BUSY; /* prevent NIOCREGIF */ } ND("%s<->%s txr %d txd %d rxr %d rxd %d", na->name, ifp->if_xname, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc); error = netmap_attach_common(na); if (error) { goto err_free; } hwna->na_flags |= NAF_BUSY; return 0; err_free: hwna->na_vp = hwna->na_hostvp = NULL; netmap_adapter_put(hwna); free(bna, M_DEVBUF); return error; } struct nm_bridge * netmap_init_bridges2(u_int n) { int i; struct nm_bridge *b; b = malloc(sizeof(struct nm_bridge) * n, M_DEVBUF, M_NOWAIT | M_ZERO); if (b == NULL) return NULL; for (i = 0; i < n; i++) BDG_RWINIT(&b[i]); return b; } void netmap_uninit_bridges2(struct nm_bridge *b, u_int n) { int i; if (b == NULL) return; for (i = 0; i < n; i++) BDG_RWDESTROY(&b[i]); free(b, M_DEVBUF); } int netmap_init_bridges(void) { #ifdef CONFIG_NET_NS return netmap_bns_register(); #else nm_bridges = netmap_init_bridges2(NM_BRIDGES); if (nm_bridges == NULL) return ENOMEM; return 0; #endif } void netmap_uninit_bridges(void) { #ifdef CONFIG_NET_NS netmap_bns_unregister(); #else netmap_uninit_bridges2(nm_bridges, NM_BRIDGES); #endif } #endif /* WITH_VALE */ Index: head/sys/net/netmap_user.h =================================================================== --- head/sys/net/netmap_user.h (revision 307571) +++ head/sys/net/netmap_user.h (revision 307572) @@ -1,1084 +1,1084 @@ /* * Copyright (C) 2011-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Functions and macros to manipulate netmap structures and packets * in userspace. See netmap(4) for more information. * * The address of the struct netmap_if, say nifp, is computed from the * value returned from ioctl(.., NIOCREG, ...) and the mmap region: * ioctl(fd, NIOCREG, &req); * mem = mmap(0, ... ); * nifp = NETMAP_IF(mem, req.nr_nifp); * (so simple, we could just do it manually) * * From there: * struct netmap_ring *NETMAP_TXRING(nifp, index) * struct netmap_ring *NETMAP_RXRING(nifp, index) * we can access ring->cur, ring->head, ring->tail, etc. * * ring->slot[i] gives us the i-th slot (we can access * directly len, flags, buf_idx) * * char *buf = NETMAP_BUF(ring, x) returns a pointer to * the buffer numbered x * * All ring indexes (head, cur, tail) should always move forward. * To compute the next index in a circular ring you can use * i = nm_ring_next(ring, i); * * To ease porting apps from pcap to netmap we supply a few fuctions * that can be called to open, close, read and write on netmap in a way * similar to libpcap. Note that the read/write function depend on * an ioctl()/select()/poll() being issued to refill rings or push * packets out. * * In order to use these, include #define NETMAP_WITH_LIBS * in the source file that invokes these functions. */ #ifndef _NET_NETMAP_USER_H_ #define _NET_NETMAP_USER_H_ #define NETMAP_DEVICE_NAME "/dev/netmap" #ifdef __CYGWIN__ /* * we can compile userspace apps with either cygwin or msvc, * and we use _WIN32 to identify windows specific code */ #ifndef _WIN32 #define _WIN32 #endif /* _WIN32 */ #endif /* __CYGWIN__ */ #ifdef _WIN32 #undef NETMAP_DEVICE_NAME #define NETMAP_DEVICE_NAME "/proc/sys/DosDevices/Global/netmap" #include #include #include #endif /* _WIN32 */ #include #include /* apple needs sockaddr */ #include /* IFNAMSIZ */ #include #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif /* likely and unlikely */ #include /* helper macro */ #define _NETMAP_OFFSET(type, ptr, offset) \ ((type)(void *)((char *)(ptr) + (offset))) #define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs) #define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ nifp, (nifp)->ring_ofs[index] ) #define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] ) #define NETMAP_BUF(ring, index) \ ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size)) #define NETMAP_BUF_IDX(ring, buf) \ ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \ (ring)->nr_buf_size ) static inline uint32_t nm_ring_next(struct netmap_ring *r, uint32_t i) { return ( unlikely(i + 1 == r->num_slots) ? 0 : i + 1); } /* * Return 1 if we have pending transmissions in the tx ring. * When everything is complete ring->head = ring->tail + 1 (modulo ring size) */ static inline int nm_tx_pending(struct netmap_ring *r) { return nm_ring_next(r, r->tail) != r->head; } static inline uint32_t nm_ring_space(struct netmap_ring *ring) { int ret = ring->tail - ring->cur; if (ret < 0) ret += ring->num_slots; return ret; } #ifdef NETMAP_WITH_LIBS /* * Support for simple I/O libraries. * Include other system headers required for compiling this. */ #ifndef HAVE_NETMAP_WITH_LIBS #define HAVE_NETMAP_WITH_LIBS #include #include #include #include /* memset */ #include #include /* EINVAL */ #include /* O_RDWR */ #include /* close() */ #include #include #ifndef ND /* debug macros */ /* debug support */ #define ND(_fmt, ...) do {} while(0) #define D(_fmt, ...) \ do { \ struct timeval _t0; \ gettimeofday(&_t0, NULL); \ fprintf(stderr, "%03d.%06d %s [%d] " _fmt "\n", \ (int)(_t0.tv_sec % 1000), (int)_t0.tv_usec, \ __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0) /* Rate limited version of "D", lps indicates how many per second */ #define RD(lps, format, ...) \ do { \ static int __t0, __cnt; \ struct timeval __xxts; \ gettimeofday(&__xxts, NULL); \ if (__t0 != __xxts.tv_sec) { \ __t0 = __xxts.tv_sec; \ __cnt = 0; \ } \ if (__cnt++ < lps) { \ D(format, ##__VA_ARGS__); \ } \ } while (0) #endif struct nm_pkthdr { /* first part is the same as pcap_pkthdr */ struct timeval ts; uint32_t caplen; uint32_t len; uint64_t flags; /* NM_MORE_PKTS etc */ #define NM_MORE_PKTS 1 struct nm_desc *d; struct netmap_slot *slot; uint8_t *buf; }; struct nm_stat { /* same as pcap_stat */ u_int ps_recv; u_int ps_drop; u_int ps_ifdrop; #ifdef WIN32 /* XXX or _WIN32 ? */ u_int bs_capt; #endif /* WIN32 */ }; #define NM_ERRBUF_SIZE 512 struct nm_desc { struct nm_desc *self; /* point to self if netmap. */ int fd; void *mem; uint32_t memsize; int done_mmap; /* set if mem is the result of mmap */ struct netmap_if * const nifp; uint16_t first_tx_ring, last_tx_ring, cur_tx_ring; uint16_t first_rx_ring, last_rx_ring, cur_rx_ring; struct nmreq req; /* also contains the nr_name = ifname */ struct nm_pkthdr hdr; /* * The memory contains netmap_if, rings and then buffers. * Given a pointer (e.g. to nm_inject) we can compare with * mem/buf_start/buf_end to tell if it is a buffer or * some other descriptor in our region. * We also store a pointer to some ring as it helps in the * translation from buffer indexes to addresses. */ struct netmap_ring * const some_ring; void * const buf_start; void * const buf_end; /* parameters from pcap_open_live */ int snaplen; int promisc; int to_ms; char *errbuf; /* save flags so we can restore them on close */ uint32_t if_flags; uint32_t if_reqcap; uint32_t if_curcap; struct nm_stat st; char msg[NM_ERRBUF_SIZE]; }; /* * when the descriptor is open correctly, d->self == d * Eventually we should also use some magic number. */ #define P2NMD(p) ((struct nm_desc *)(p)) #define IS_NETMAP_DESC(d) ((d) && P2NMD(d)->self == P2NMD(d)) #define NETMAP_FD(d) (P2NMD(d)->fd) /* * this is a slightly optimized copy routine which rounds * to multiple of 64 bytes and is often faster than dealing * with other odd sizes. We assume there is enough room * in the source and destination buffers. * * XXX only for multiples of 64 bytes, non overlapped. */ static inline void nm_pkt_copy(const void *_src, void *_dst, int l) { const uint64_t *src = (const uint64_t *)_src; uint64_t *dst = (uint64_t *)_dst; if (unlikely(l >= 1024)) { memcpy(dst, src, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } /* * The callback, invoked on each received packet. Same as libpcap */ typedef void (*nm_cb_t)(u_char *, const struct nm_pkthdr *, const u_char *d); /* *--- the pcap-like API --- * * nm_open() opens a file descriptor, binds to a port and maps memory. * * ifname (netmap:foo or vale:foo) is the port name * a suffix can indicate the follwing: * ^ bind the host (sw) ring pair * * bind host and NIC ring pairs (transparent) * -NN bind individual NIC ring pair * {NN bind master side of pipe NN * }NN bind slave side of pipe NN * a suffix starting with / and the following flags, * in any order: * x exclusive access * z zero copy monitor * t monitor tx side * r monitor rx side * R bind only RX ring(s) * T bind only TX ring(s) * * req provides the initial values of nmreq before parsing ifname. * Remember that the ifname parsing will override the ring * number in nm_ringid, and part of nm_flags; * flags special functions, normally 0 * indicates which fields of *arg are significant * arg special functions, normally NULL * if passed a netmap_desc with mem != NULL, * use that memory instead of mmap. */ static struct nm_desc *nm_open(const char *ifname, const struct nmreq *req, uint64_t flags, const struct nm_desc *arg); /* * nm_open can import some fields from the parent descriptor. * These flags control which ones. * Also in flags you can specify NETMAP_NO_TX_POLL and NETMAP_DO_RX_POLL, * which set the initial value for these flags. * Note that the 16 low bits of the flags are reserved for data * that may go into the nmreq. */ enum { NM_OPEN_NO_MMAP = 0x040000, /* reuse mmap from parent */ NM_OPEN_IFNAME = 0x080000, /* nr_name, nr_ringid, nr_flags */ NM_OPEN_ARG1 = 0x100000, NM_OPEN_ARG2 = 0x200000, NM_OPEN_ARG3 = 0x400000, NM_OPEN_RING_CFG = 0x800000, /* tx|rx rings|slots */ }; /* * nm_close() closes and restores the port to its previous state */ static int nm_close(struct nm_desc *); /* * nm_mmap() do mmap or inherit from parent if the nr_arg2 * (memory block) matches. */ static int nm_mmap(struct nm_desc *, const struct nm_desc *); /* * nm_inject() is the same as pcap_inject() * nm_dispatch() is the same as pcap_dispatch() * nm_nextpkt() is the same as pcap_next() */ static int nm_inject(struct nm_desc *, const void *, size_t); static int nm_dispatch(struct nm_desc *, int, nm_cb_t, u_char *); static u_char *nm_nextpkt(struct nm_desc *, struct nm_pkthdr *); #ifdef _WIN32 intptr_t _get_osfhandle(int); /* defined in io.h in windows */ /* * In windows we do not have yet native poll support, so we keep track * of file descriptors associated to netmap ports to emulate poll on * them and fall back on regular poll on other file descriptors. */ struct win_netmap_fd_list { struct win_netmap_fd_list *next; int win_netmap_fd; HANDLE win_netmap_handle; }; -/* - * list head containing all the netmap opened fd and their +/* + * list head containing all the netmap opened fd and their * windows HANDLE counterparts */ static struct win_netmap_fd_list *win_netmap_fd_list_head; static void win_insert_fd_record(int fd) { struct win_netmap_fd_list *curr; for (curr = win_netmap_fd_list_head; curr; curr = curr->next) { if (fd == curr->win_netmap_fd) { return; } } curr = calloc(1, sizeof(*curr)); curr->next = win_netmap_fd_list_head; curr->win_netmap_fd = fd; curr->win_netmap_handle = IntToPtr(_get_osfhandle(fd)); win_netmap_fd_list_head = curr; } void win_remove_fd_record(int fd) { struct win_netmap_fd_list *curr = win_netmap_fd_list_head; struct win_netmap_fd_list *prev = NULL; for (; curr ; prev = curr, curr = curr->next) { if (fd != curr->win_netmap_fd) continue; /* found the entry */ if (prev == NULL) { /* we are freeing the first entry */ win_netmap_fd_list_head = curr->next; } else { prev->next = curr->next; } free(curr); break; } } HANDLE win_get_netmap_handle(int fd) { struct win_netmap_fd_list *curr; for (curr = win_netmap_fd_list_head; curr; curr = curr->next) { if (fd == curr->win_netmap_fd) { return curr->win_netmap_handle; } } return NULL; } /* * we need to wrap ioctl and mmap, at least for the netmap file descriptors */ /* * use this function only from netmap_user.h internal functions - * same as ioctl, returns 0 on success and -1 on error + * same as ioctl, returns 0 on success and -1 on error */ static int win_nm_ioctl_internal(HANDLE h, int32_t ctlCode, void *arg) { DWORD bReturn = 0, szIn, szOut; BOOL ioctlReturnStatus; void *inParam = arg, *outParam = arg; switch (ctlCode) { case NETMAP_POLL: szIn = sizeof(POLL_REQUEST_DATA); szOut = sizeof(POLL_REQUEST_DATA); break; case NETMAP_MMAP: szIn = 0; szOut = sizeof(void*); inParam = NULL; /* nothing on input */ break; case NIOCTXSYNC: case NIOCRXSYNC: szIn = 0; szOut = 0; break; case NIOCREGIF: szIn = sizeof(struct nmreq); szOut = sizeof(struct nmreq); break; case NIOCCONFIG: D("unsupported NIOCCONFIG!"); return -1; default: /* a regular ioctl */ D("invalid ioctl %x on netmap fd", ctlCode); return -1; } ioctlReturnStatus = DeviceIoControl(h, ctlCode, inParam, szIn, outParam, szOut, &bReturn, NULL); // XXX note windows returns 0 on error or async call, 1 on success // we could call GetLastError() to figure out what happened return ioctlReturnStatus ? 0 : -1; } -/* +/* * this function is what must be called from user-space programs - * same as ioctl, returns 0 on success and -1 on error + * same as ioctl, returns 0 on success and -1 on error */ static int win_nm_ioctl(int fd, int32_t ctlCode, void *arg) { HANDLE h = win_get_netmap_handle(fd); if (h == NULL) { return ioctl(fd, ctlCode, arg); } else { return win_nm_ioctl_internal(h, ctlCode, arg); } } #define ioctl win_nm_ioctl /* from now on, within this file ... */ /* * We cannot use the native mmap on windows * The only parameter used is "fd", the other ones are just declared to * make this signature comparable to the FreeBSD/Linux one */ static void * win32_mmap_emulated(void *addr, size_t length, int prot, int flags, int fd, int32_t offset) { HANDLE h = win_get_netmap_handle(fd); if (h == NULL) { return mmap(addr, length, prot, flags, fd, offset); } else { MEMORY_ENTRY ret; return win_nm_ioctl_internal(h, NETMAP_MMAP, &ret) ? NULL : ret.pUsermodeVirtualAddress; } } #define mmap win32_mmap_emulated #include /* XXX needed to use the structure pollfd */ -static int +static int win_nm_poll(struct pollfd *fds, int nfds, int timeout) { HANDLE h; if (nfds != 1 || fds == NULL || (h = win_get_netmap_handle(fds->fd)) == NULL) {; return poll(fds, nfds, timeout); } else { POLL_REQUEST_DATA prd; prd.timeout = timeout; prd.events = fds->events; win_nm_ioctl_internal(h, NETMAP_POLL, &prd); if ((prd.revents == POLLERR) || (prd.revents == STATUS_TIMEOUT)) { return -1; } return 1; } } #define poll win_nm_poll -static int +static int win_nm_open(char* pathname, int flags) { if (strcmp(pathname, NETMAP_DEVICE_NAME) == 0) { int fd = open(NETMAP_DEVICE_NAME, O_RDWR); if (fd < 0) { return -1; } win_insert_fd_record(fd); return fd; } else { return open(pathname, flags); } } #define open win_nm_open -static int +static int win_nm_close(int fd) { if (fd != -1) { close(fd); if (win_get_netmap_handle(fd) != NULL) { win_remove_fd_record(fd); } } return 0; } #define close win_nm_close #endif /* _WIN32 */ static int nm_is_identifier(const char *s, const char *e) { for (; s != e; s++) { if (!isalnum(*s) && *s != '_') { return 0; } } return 1; } /* * Try to open, return descriptor if successful, NULL otherwise. * An invalid netmap name will return errno = 0; * You can pass a pointer to a pre-filled nm_desc to add special * parameters. Flags is used as follows * NM_OPEN_NO_MMAP use the memory from arg, only XXX avoid mmap * if the nr_arg2 (memory block) matches. * NM_OPEN_ARG1 use req.nr_arg1 from arg * NM_OPEN_ARG2 use req.nr_arg2 from arg * NM_OPEN_RING_CFG user ring config from arg */ static struct nm_desc * nm_open(const char *ifname, const struct nmreq *req, uint64_t new_flags, const struct nm_desc *arg) { struct nm_desc *d = NULL; const struct nm_desc *parent = arg; u_int namelen; uint32_t nr_ringid = 0, nr_flags, nr_reg; const char *port = NULL; const char *vpname = NULL; #define MAXERRMSG 80 char errmsg[MAXERRMSG] = ""; enum { P_START, P_RNGSFXOK, P_GETNUM, P_FLAGS, P_FLAGSOK } p_state; int is_vale; long num; if (strncmp(ifname, "netmap:", 7) && strncmp(ifname, NM_BDG_NAME, strlen(NM_BDG_NAME))) { errno = 0; /* name not recognised, not an error */ return NULL; } is_vale = (ifname[0] == 'v'); if (is_vale) { port = index(ifname, ':'); if (port == NULL) { snprintf(errmsg, MAXERRMSG, "missing ':' in vale name"); goto fail; } if (!nm_is_identifier(ifname + 4, port)) { snprintf(errmsg, MAXERRMSG, "invalid bridge name"); goto fail; } vpname = ++port; } else { ifname += 7; port = ifname; } /* scan for a separator */ for (; *port && !index("-*^{}/", *port); port++) ; if (is_vale && !nm_is_identifier(vpname, port)) { snprintf(errmsg, MAXERRMSG, "invalid bridge port name"); goto fail; } namelen = port - ifname; if (namelen >= sizeof(d->req.nr_name)) { snprintf(errmsg, MAXERRMSG, "name too long"); goto fail; } p_state = P_START; nr_flags = NR_REG_ALL_NIC; /* default for no suffix */ while (*port) { switch (p_state) { case P_START: switch (*port) { case '^': /* only SW ring */ nr_flags = NR_REG_SW; p_state = P_RNGSFXOK; break; case '*': /* NIC and SW */ nr_flags = NR_REG_NIC_SW; p_state = P_RNGSFXOK; break; case '-': /* one NIC ring pair */ nr_flags = NR_REG_ONE_NIC; p_state = P_GETNUM; break; case '{': /* pipe (master endpoint) */ nr_flags = NR_REG_PIPE_MASTER; p_state = P_GETNUM; break; case '}': /* pipe (slave endoint) */ nr_flags = NR_REG_PIPE_SLAVE; p_state = P_GETNUM; break; case '/': /* start of flags */ p_state = P_FLAGS; break; default: snprintf(errmsg, MAXERRMSG, "unknown modifier: '%c'", *port); goto fail; } port++; break; case P_RNGSFXOK: switch (*port) { case '/': p_state = P_FLAGS; break; default: snprintf(errmsg, MAXERRMSG, "unexpected character: '%c'", *port); goto fail; } port++; break; case P_GETNUM: num = strtol(port, (char **)&port, 10); if (num < 0 || num >= NETMAP_RING_MASK) { snprintf(errmsg, MAXERRMSG, "'%ld' out of range [0, %d)", num, NETMAP_RING_MASK); goto fail; } nr_ringid = num & NETMAP_RING_MASK; p_state = P_RNGSFXOK; break; case P_FLAGS: case P_FLAGSOK: switch (*port) { case 'x': nr_flags |= NR_EXCLUSIVE; break; case 'z': nr_flags |= NR_ZCOPY_MON; break; case 't': nr_flags |= NR_MONITOR_TX; break; case 'r': nr_flags |= NR_MONITOR_RX; break; case 'R': nr_flags |= NR_RX_RINGS_ONLY; break; case 'T': nr_flags |= NR_TX_RINGS_ONLY; break; default: snprintf(errmsg, MAXERRMSG, "unrecognized flag: '%c'", *port); goto fail; } port++; p_state = P_FLAGSOK; break; } } if (p_state != P_START && p_state != P_RNGSFXOK && p_state != P_FLAGSOK) { snprintf(errmsg, MAXERRMSG, "unexpected end of port name"); goto fail; } if ((nr_flags & NR_ZCOPY_MON) && !(nr_flags & (NR_MONITOR_TX|NR_MONITOR_RX))) { snprintf(errmsg, MAXERRMSG, "'z' used but neither 'r', nor 't' found"); goto fail; } ND("flags: %s %s %s %s", (nr_flags & NR_EXCLUSIVE) ? "EXCLUSIVE" : "", (nr_flags & NR_ZCOPY_MON) ? "ZCOPY_MON" : "", (nr_flags & NR_MONITOR_TX) ? "MONITOR_TX" : "", (nr_flags & NR_MONITOR_RX) ? "MONITOR_RX" : ""); d = (struct nm_desc *)calloc(1, sizeof(*d)); if (d == NULL) { snprintf(errmsg, MAXERRMSG, "nm_desc alloc failure"); errno = ENOMEM; return NULL; } d->self = d; /* set this early so nm_close() works */ d->fd = open(NETMAP_DEVICE_NAME, O_RDWR); if (d->fd < 0) { snprintf(errmsg, MAXERRMSG, "cannot open /dev/netmap: %s", strerror(errno)); goto fail; } if (req) d->req = *req; d->req.nr_version = NETMAP_API; d->req.nr_ringid &= ~NETMAP_RING_MASK; /* these fields are overridden by ifname and flags processing */ d->req.nr_ringid |= nr_ringid; d->req.nr_flags |= nr_flags; memcpy(d->req.nr_name, ifname, namelen); d->req.nr_name[namelen] = '\0'; /* optionally import info from parent */ if (IS_NETMAP_DESC(parent) && new_flags) { if (new_flags & NM_OPEN_ARG1) D("overriding ARG1 %d", parent->req.nr_arg1); d->req.nr_arg1 = new_flags & NM_OPEN_ARG1 ? parent->req.nr_arg1 : 4; if (new_flags & NM_OPEN_ARG2) D("overriding ARG2 %d", parent->req.nr_arg2); d->req.nr_arg2 = new_flags & NM_OPEN_ARG2 ? parent->req.nr_arg2 : 0; if (new_flags & NM_OPEN_ARG3) D("overriding ARG3 %d", parent->req.nr_arg3); d->req.nr_arg3 = new_flags & NM_OPEN_ARG3 ? parent->req.nr_arg3 : 0; if (new_flags & NM_OPEN_RING_CFG) { D("overriding RING_CFG"); d->req.nr_tx_slots = parent->req.nr_tx_slots; d->req.nr_rx_slots = parent->req.nr_rx_slots; d->req.nr_tx_rings = parent->req.nr_tx_rings; d->req.nr_rx_rings = parent->req.nr_rx_rings; } if (new_flags & NM_OPEN_IFNAME) { D("overriding ifname %s ringid 0x%x flags 0x%x", parent->req.nr_name, parent->req.nr_ringid, parent->req.nr_flags); memcpy(d->req.nr_name, parent->req.nr_name, sizeof(d->req.nr_name)); d->req.nr_ringid = parent->req.nr_ringid; d->req.nr_flags = parent->req.nr_flags; } } /* add the *XPOLL flags */ d->req.nr_ringid |= new_flags & (NETMAP_NO_TX_POLL | NETMAP_DO_RX_POLL); if (ioctl(d->fd, NIOCREGIF, &d->req)) { snprintf(errmsg, MAXERRMSG, "NIOCREGIF failed: %s", strerror(errno)); goto fail; } /* if parent is defined, do nm_mmap() even if NM_OPEN_NO_MMAP is set */ if ((!(new_flags & NM_OPEN_NO_MMAP) || parent) && nm_mmap(d, parent)) { snprintf(errmsg, MAXERRMSG, "mmap failed: %s", strerror(errno)); goto fail; } nr_reg = d->req.nr_flags & NR_REG_MASK; if (nr_reg == NR_REG_SW) { /* host stack */ d->first_tx_ring = d->last_tx_ring = d->req.nr_tx_rings; d->first_rx_ring = d->last_rx_ring = d->req.nr_rx_rings; } else if (nr_reg == NR_REG_ALL_NIC) { /* only nic */ d->first_tx_ring = 0; d->first_rx_ring = 0; d->last_tx_ring = d->req.nr_tx_rings - 1; d->last_rx_ring = d->req.nr_rx_rings - 1; } else if (nr_reg == NR_REG_NIC_SW) { d->first_tx_ring = 0; d->first_rx_ring = 0; d->last_tx_ring = d->req.nr_tx_rings; d->last_rx_ring = d->req.nr_rx_rings; } else if (nr_reg == NR_REG_ONE_NIC) { /* XXX check validity */ d->first_tx_ring = d->last_tx_ring = d->first_rx_ring = d->last_rx_ring = d->req.nr_ringid & NETMAP_RING_MASK; } else { /* pipes */ d->first_tx_ring = d->last_tx_ring = 0; d->first_rx_ring = d->last_rx_ring = 0; } #ifdef DEBUG_NETMAP_USER { /* debugging code */ int i; D("%s tx %d .. %d %d rx %d .. %d %d", ifname, d->first_tx_ring, d->last_tx_ring, d->req.nr_tx_rings, d->first_rx_ring, d->last_rx_ring, d->req.nr_rx_rings); for (i = 0; i <= d->req.nr_tx_rings; i++) { struct netmap_ring *r = NETMAP_TXRING(d->nifp, i); D("TX%d %p h %d c %d t %d", i, r, r->head, r->cur, r->tail); } for (i = 0; i <= d->req.nr_rx_rings; i++) { struct netmap_ring *r = NETMAP_RXRING(d->nifp, i); D("RX%d %p h %d c %d t %d", i, r, r->head, r->cur, r->tail); } } #endif /* debugging */ d->cur_tx_ring = d->first_tx_ring; d->cur_rx_ring = d->first_rx_ring; return d; fail: nm_close(d); if (errmsg[0]) D("%s %s", errmsg, ifname); if (errno == 0) errno = EINVAL; return NULL; } static int nm_close(struct nm_desc *d) { /* * ugly trick to avoid unused warnings */ static void *__xxzt[] __attribute__ ((unused)) = { (void *)nm_open, (void *)nm_inject, (void *)nm_dispatch, (void *)nm_nextpkt } ; if (d == NULL || d->self != d) return EINVAL; if (d->done_mmap && d->mem) munmap(d->mem, d->memsize); if (d->fd != -1) { close(d->fd); } - + bzero(d, sizeof(*d)); free(d); return 0; } static int nm_mmap(struct nm_desc *d, const struct nm_desc *parent) { //XXX TODO: check if mmap is already done if (IS_NETMAP_DESC(parent) && parent->mem && parent->req.nr_arg2 == d->req.nr_arg2) { /* do not mmap, inherit from parent */ D("do not mmap, inherit from parent"); d->memsize = parent->memsize; d->mem = parent->mem; } else { /* XXX TODO: check if memsize is too large (or there is overflow) */ d->memsize = d->req.nr_memsize; d->mem = mmap(0, d->memsize, PROT_WRITE | PROT_READ, MAP_SHARED, d->fd, 0); if (d->mem == MAP_FAILED) { goto fail; } d->done_mmap = 1; } { struct netmap_if *nifp = NETMAP_IF(d->mem, d->req.nr_offset); struct netmap_ring *r = NETMAP_RXRING(nifp, ); *(struct netmap_if **)(uintptr_t)&(d->nifp) = nifp; *(struct netmap_ring **)(uintptr_t)&d->some_ring = r; *(void **)(uintptr_t)&d->buf_start = NETMAP_BUF(r, 0); *(void **)(uintptr_t)&d->buf_end = (char *)d->mem + d->memsize; } return 0; fail: return EINVAL; } /* * Same prototype as pcap_inject(), only need to cast. */ static int nm_inject(struct nm_desc *d, const void *buf, size_t size) { u_int c, n = d->last_tx_ring - d->first_tx_ring + 1; for (c = 0; c < n ; c++) { /* compute current ring to use */ struct netmap_ring *ring; uint32_t i, idx; uint32_t ri = d->cur_tx_ring + c; if (ri > d->last_tx_ring) ri = d->first_tx_ring; ring = NETMAP_TXRING(d->nifp, ri); if (nm_ring_empty(ring)) { continue; } i = ring->cur; idx = ring->slot[i].buf_idx; ring->slot[i].len = size; nm_pkt_copy(buf, NETMAP_BUF(ring, idx), size); d->cur_tx_ring = ri; ring->head = ring->cur = nm_ring_next(ring, i); return size; } return 0; /* fail */ } /* * Same prototype as pcap_dispatch(), only need to cast. */ static int nm_dispatch(struct nm_desc *d, int cnt, nm_cb_t cb, u_char *arg) { int n = d->last_rx_ring - d->first_rx_ring + 1; int c, got = 0, ri = d->cur_rx_ring; d->hdr.buf = NULL; d->hdr.flags = NM_MORE_PKTS; d->hdr.d = d; if (cnt == 0) cnt = -1; /* cnt == -1 means infinite, but rings have a finite amount * of buffers and the int is large enough that we never wrap, * so we can omit checking for -1 */ for (c=0; c < n && cnt != got; c++) { /* compute current ring to use */ struct netmap_ring *ring; ri = d->cur_rx_ring + c; if (ri > d->last_rx_ring) ri = d->first_rx_ring; ring = NETMAP_RXRING(d->nifp, ri); for ( ; !nm_ring_empty(ring) && cnt != got; got++) { u_int idx, i; if (d->hdr.buf) { /* from previous round */ cb(arg, &d->hdr, d->hdr.buf); } i = ring->cur; idx = ring->slot[i].buf_idx; d->hdr.slot = &ring->slot[i]; d->hdr.buf = (u_char *)NETMAP_BUF(ring, idx); // __builtin_prefetch(buf); d->hdr.len = d->hdr.caplen = ring->slot[i].len; d->hdr.ts = ring->ts; ring->head = ring->cur = nm_ring_next(ring, i); } } if (d->hdr.buf) { /* from previous round */ d->hdr.flags = 0; cb(arg, &d->hdr, d->hdr.buf); } d->cur_rx_ring = ri; return got; } static u_char * nm_nextpkt(struct nm_desc *d, struct nm_pkthdr *hdr) { int ri = d->cur_rx_ring; do { /* compute current ring to use */ struct netmap_ring *ring = NETMAP_RXRING(d->nifp, ri); if (!nm_ring_empty(ring)) { u_int i = ring->cur; u_int idx = ring->slot[i].buf_idx; u_char *buf = (u_char *)NETMAP_BUF(ring, idx); // __builtin_prefetch(buf); hdr->ts = ring->ts; hdr->len = hdr->caplen = ring->slot[i].len; ring->cur = nm_ring_next(ring, i); /* we could postpone advancing head if we want * to hold the buffer. This can be supported in * the future. */ ring->head = ring->cur; d->cur_rx_ring = ri; return buf; } ri++; if (ri > d->last_rx_ring) ri = d->first_rx_ring; } while (ri != d->cur_rx_ring); return NULL; /* nothing found */ } #endif /* !HAVE_NETMAP_WITH_LIBS */ #endif /* NETMAP_WITH_LIBS */ #endif /* _NET_NETMAP_USER_H_ */