diff --git a/sys/dev/netmap/netmap.c b/sys/dev/netmap/netmap.c index 982f76d45faa..075c27b7a597 100644 --- a/sys/dev/netmap/netmap.c +++ b/sys/dev/netmap/netmap.c @@ -1,4341 +1,4337 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Matteo Landi * Copyright (C) 2011-2016 Luigi Rizzo * Copyright (C) 2011-2016 Giuseppe Lettieri * Copyright (C) 2011-2016 Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * This module supports memory mapped access to network devices, * see netmap(4). * * The module uses a large, memory pool allocated by the kernel * and accessible as mmapped memory by multiple userspace threads/processes. * The memory pool contains packet buffers and "netmap rings", * i.e. user-accessible copies of the interface's queues. * * Access to the network card works like this: * 1. a process/thread issues one or more open() on /dev/netmap, to create * select()able file descriptor on which events are reported. * 2. on each descriptor, the process issues an ioctl() to identify * the interface that should report events to the file descriptor. * 3. on each descriptor, the process issues an mmap() request to * map the shared memory region within the process' address space. * The list of interesting queues is indicated by a location in * the shared memory region. * 4. using the functions in the netmap(4) userspace API, a process * can look up the occupation state of a queue, access memory buffers, * and retrieve received packets or enqueue packets to transmit. * 5. using some ioctl()s the process can synchronize the userspace view * of the queue with the actual status in the kernel. This includes both * receiving the notification of new packets, and transmitting new * packets on the output interface. * 6. select() or poll() can be used to wait for events on individual * transmit or receive queues (or all queues for a given interface). * SYNCHRONIZATION (USER) The netmap rings and data structures may be shared among multiple user threads or even independent processes. Any synchronization among those threads/processes is delegated to the threads themselves. Only one thread at a time can be in a system call on the same netmap ring. The OS does not enforce this and only guarantees against system crashes in case of invalid usage. LOCKING (INTERNAL) Within the kernel, access to the netmap rings is protected as follows: - a spinlock on each ring, to handle producer/consumer races on RX rings attached to the host stack (against multiple host threads writing from the host stack to the same ring), and on 'destination' rings attached to a VALE switch (i.e. RX rings in VALE ports, and TX rings in NIC/host ports) protecting multiple active senders for the same destination) - an atomic variable to guarantee that there is at most one instance of *_*xsync() on the ring at any time. For rings connected to user file descriptors, an atomic_test_and_set() protects this, and the lock on the ring is not actually used. For NIC RX rings connected to a VALE switch, an atomic_test_and_set() is also used to prevent multiple executions (the driver might indeed already guarantee this). For NIC TX rings connected to a VALE switch, the lock arbitrates access to the queue (both when allocating buffers and when pushing them out). - *xsync() should be protected against initializations of the card. On FreeBSD most devices have the reset routine protected by a RING lock (ixgbe, igb, em) or core lock (re). lem is missing the RING protection on rx_reset(), this should be added. On linux there is an external lock on the tx path, which probably also arbitrates access to the reset routine. XXX to be revised - a per-interface core_lock protecting access from the host stack while interfaces may be detached from netmap mode. XXX there should be no need for this lock if we detach the interfaces only while they are down. --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* --- internals ---- * * Roadmap to the code that implements the above. * * > 1. a process/thread issues one or more open() on /dev/netmap, to create * > select()able file descriptor on which events are reported. * * Internally, we allocate a netmap_priv_d structure, that will be * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d * structure for each open(). * * os-specific: * FreeBSD: see netmap_open() (netmap_freebsd.c) * linux: see linux_netmap_open() (netmap_linux.c) * * > 2. on each descriptor, the process issues an ioctl() to identify * > the interface that should report events to the file descriptor. * * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0. * Most important things happen in netmap_get_na() and * netmap_do_regif(), called from there. Additional details can be * found in the comments above those functions. * * In all cases, this action creates/takes-a-reference-to a * netmap_*_adapter describing the port, and allocates a netmap_if * and all necessary netmap rings, filling them with netmap buffers. * * In this phase, the sync callbacks for each ring are set (these are used * in steps 5 and 6 below). The callbacks depend on the type of adapter. * The adapter creation/initialization code puts them in the * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they * are copied from there to the netmap_kring's during netmap_do_regif(), by * the nm_krings_create() callback. All the nm_krings_create callbacks * actually call netmap_krings_create() to perform this and the other * common stuff. netmap_krings_create() also takes care of the host rings, * if needed, by setting their sync callbacks appropriately. * * Additional actions depend on the kind of netmap_adapter that has been * registered: * * - netmap_hw_adapter: [netmap.c] * This is a system netdev/ifp with native netmap support. * The ifp is detached from the host stack by redirecting: * - transmissions (from the network stack) to netmap_transmit() * - receive notifications to the nm_notify() callback for * this adapter. The callback is normally netmap_notify(), unless * the ifp is attached to a bridge using bwrap, in which case it * is netmap_bwrap_intr_notify(). * * - netmap_generic_adapter: [netmap_generic.c] * A system netdev/ifp without native netmap support. * * (the decision about native/non native support is taken in * netmap_get_hw_na(), called by netmap_get_na()) * * - netmap_vp_adapter [netmap_vale.c] * Returned by netmap_get_bdg_na(). * This is a persistent or ephemeral VALE port. Ephemeral ports * are created on the fly if they don't already exist, and are * always attached to a bridge. * Persistent VALE ports must must be created separately, and i * then attached like normal NICs. The NIOCREGIF we are examining * will find them only if they had previosly been created and * attached (see VALE_CTL below). * * - netmap_pipe_adapter [netmap_pipe.c] * Returned by netmap_get_pipe_na(). * Both pipe ends are created, if they didn't already exist. * * - netmap_monitor_adapter [netmap_monitor.c] * Returned by netmap_get_monitor_na(). * If successful, the nm_sync callbacks of the monitored adapter * will be intercepted by the returned monitor. * * - netmap_bwrap_adapter [netmap_vale.c] * Cannot be obtained in this way, see VALE_CTL below * * * os-specific: * linux: we first go through linux_netmap_ioctl() to * adapt the FreeBSD interface to the linux one. * * * > 3. on each descriptor, the process issues an mmap() request to * > map the shared memory region within the process' address space. * > The list of interesting queues is indicated by a location in * > the shared memory region. * * os-specific: * FreeBSD: netmap_mmap_single (netmap_freebsd.c). * linux: linux_netmap_mmap (netmap_linux.c). * * > 4. using the functions in the netmap(4) userspace API, a process * > can look up the occupation state of a queue, access memory buffers, * > and retrieve received packets or enqueue packets to transmit. * * these actions do not involve the kernel. * * > 5. using some ioctl()s the process can synchronize the userspace view * > of the queue with the actual status in the kernel. This includes both * > receiving the notification of new packets, and transmitting new * > packets on the output interface. * * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC * cases. They invoke the nm_sync callbacks on the netmap_kring * structures, as initialized in step 2 and maybe later modified * by a monitor. Monitors, however, will always call the original * callback before doing anything else. * * * > 6. select() or poll() can be used to wait for events on individual * > transmit or receive queues (or all queues for a given interface). * * Implemented in netmap_poll(). This will call the same nm_sync() * callbacks as in step 5 above. * * os-specific: * linux: we first go through linux_netmap_poll() to adapt * the FreeBSD interface to the linux one. * * * ---- VALE_CTL ----- * * VALE switches are controlled by issuing a NIOCREGIF with a non-null * nr_cmd in the nmreq structure. These subcommands are handled by * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF * subcommands, respectively. * * Any network interface known to the system (including a persistent VALE * port) can be attached to a VALE switch by issuing the * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports * look exactly like ephemeral VALE ports (as created in step 2 above). The * attachment of other interfaces, instead, requires the creation of a * netmap_bwrap_adapter. Moreover, the attached interface must be put in * netmap mode. This may require the creation of a netmap_generic_adapter if * we have no native support for the interface, or if generic adapters have * been forced by sysctl. * * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(), * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach() * callback. In the case of the bwrap, the callback creates the * netmap_bwrap_adapter. The initialization of the bwrap is then * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl() * callback (netmap_bwrap_bdg_ctl in netmap_vale.c). * A generic adapter for the wrapped ifp will be created if needed, when * netmap_get_bdg_na() calls netmap_get_hw_na(). * * * ---- DATAPATHS ----- * * -= SYSTEM DEVICE WITH NATIVE SUPPORT =- * * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach() * * - tx from netmap userspace: * concurrently: * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == DEVICE_netmap_txsync() * 2) device interrupt handler * na->nm_notify() == netmap_notify() * - rx from netmap userspace: * concurrently: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == DEVICE_netmap_rxsync() * 2) device interrupt handler * na->nm_notify() == netmap_notify() * - rx from host stack * concurrently: * 1) host stack * netmap_transmit() * na->nm_notify == netmap_notify() * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_rxsync_from_host * netmap_rxsync_from_host(na, NULL, NULL) * - tx to host stack * ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_txsync_to_host * netmap_txsync_to_host(na) * nm_os_send_up() * FreeBSD: na->if_input() == ether_input() * linux: netif_rx() with NM_MAGIC_PRIORITY_RX * * * -= SYSTEM DEVICE WITH GENERIC SUPPORT =- * * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach() * * - tx from netmap userspace: * concurrently: * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == generic_netmap_txsync() * nm_os_generic_xmit_frame() * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX * ifp->ndo_start_xmit == generic_ndo_start_xmit() * gna->save_start_xmit == orig. dev. start_xmit * FreeBSD: na->if_transmit() == orig. dev if_transmit * 2) generic_mbuf_destructor() * na->nm_notify() == netmap_notify() * - rx from netmap userspace: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == generic_netmap_rxsync() * mbq_safe_dequeue() * 2) device driver * generic_rx_handler() * mbq_safe_enqueue() * na->nm_notify() == netmap_notify() * - rx from host stack * FreeBSD: same as native * Linux: same as native except: * 1) host stack * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX * ifp->ndo_start_xmit == generic_ndo_start_xmit() * netmap_transmit() * na->nm_notify() == netmap_notify() * - tx to host stack (same as native): * * * -= VALE =- * * INCOMING: * * - VALE ports: * ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_vp_txsync() * * - system device with native support: * from cable: * interrupt * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) * kring->nm_sync() == DEVICE_netmap_rxsync() * netmap_vp_txsync() * kring->nm_sync() == DEVICE_netmap_rxsync() * from host stack: * netmap_transmit() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) * kring->nm_sync() == netmap_rxsync_from_host() * netmap_vp_txsync() * * - system device with generic support: * from device driver: * generic_rx_handler() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) * kring->nm_sync() == generic_netmap_rxsync() * netmap_vp_txsync() * kring->nm_sync() == generic_netmap_rxsync() * from host stack: * netmap_transmit() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) * kring->nm_sync() == netmap_rxsync_from_host() * netmap_vp_txsync() * * (all cases) --> nm_bdg_flush() * dest_na->nm_notify() == (see below) * * OUTGOING: * * - VALE ports: * concurrently: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_vp_rxsync() * 2) from nm_bdg_flush() * na->nm_notify() == netmap_notify() * * - system device with native support: * to cable: * na->nm_notify() == netmap_bwrap_notify() * netmap_vp_rxsync() * kring->nm_sync() == DEVICE_netmap_txsync() * netmap_vp_rxsync() * to host stack: * netmap_vp_rxsync() * kring->nm_sync() == netmap_txsync_to_host * netmap_vp_rxsync_locked() * * - system device with generic adapter: * to device driver: * na->nm_notify() == netmap_bwrap_notify() * netmap_vp_rxsync() * kring->nm_sync() == generic_netmap_txsync() * netmap_vp_rxsync() * to host stack: * netmap_vp_rxsync() * kring->nm_sync() == netmap_txsync_to_host * netmap_vp_rxsync() * */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include /* FIONBIO */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #include /* ETHER_BPF_MTAP */ #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined (_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include /* user-controlled variables */ int netmap_verbose; #ifdef CONFIG_NETMAP_DEBUG int netmap_debug; #endif /* CONFIG_NETMAP_DEBUG */ static int netmap_no_timestamp; /* don't timestamp on rxsync */ int netmap_no_pendintr = 1; int netmap_txsync_retry = 2; static int netmap_fwd = 0; /* force transparent forwarding */ /* * netmap_admode selects the netmap mode to use. * Invalid values are reset to NETMAP_ADMODE_BEST */ enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */ NETMAP_ADMODE_NATIVE, /* either native or none */ NETMAP_ADMODE_GENERIC, /* force generic */ NETMAP_ADMODE_LAST }; static int netmap_admode = NETMAP_ADMODE_BEST; /* netmap_generic_mit controls mitigation of RX notifications for * the generic netmap adapter. The value is a time interval in * nanoseconds. */ int netmap_generic_mit = 100*1000; /* We use by default netmap-aware qdiscs with generic netmap adapters, * even if there can be a little performance hit with hardware NICs. * However, using the qdisc is the safer approach, for two reasons: * 1) it prevents non-fifo qdiscs to break the TX notification * scheme, which is based on mbuf destructors when txqdisc is * not used. * 2) it makes it possible to transmit over software devices that * change skb->dev, like bridge, veth, ... * * Anyway users looking for the best performance should * use native adapters. */ #ifdef linux int netmap_generic_txqdisc = 1; #endif /* Default number of slots and queues for generic adapters. */ int netmap_generic_ringsize = 1024; int netmap_generic_rings = 1; /* Non-zero to enable checksum offloading in NIC drivers */ int netmap_generic_hwcsum = 0; /* Non-zero if ptnet devices are allowed to use virtio-net headers. */ int ptnet_vnet_hdr = 1; /* * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated * in some other operating systems */ SYSBEGIN(main_init); SYSCTL_DECL(_dev_netmap); SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); #ifdef CONFIG_NETMAP_DEBUG SYSCTL_INT(_dev_netmap, OID_AUTO, debug, CTLFLAG_RW, &netmap_debug, 0, "Debug messages"); #endif /* CONFIG_NETMAP_DEBUG */ SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW, &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush."); SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0, "Force NR_FORWARD mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0, "Adapter mode. 0 selects the best option available," "1 forces native adapter, 2 forces emulated adapter"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum, 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default)," "1 to enable checksum generation by the NIC"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit, 0, "RX notification interval in nanoseconds"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW, &netmap_generic_ringsize, 0, "Number of per-ring slots for emulated netmap mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW, &netmap_generic_rings, 0, "Number of TX/RX queues for emulated netmap adapters"); #ifdef linux SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW, &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters"); #endif SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr, 0, "Allow ptnet devices to use virtio-net headers"); SYSEND; NMG_LOCK_T netmap_global_lock; /* * mark the ring as stopped, and run through the locks * to make sure other users get to see it. * stopped must be either NR_KR_STOPPED (for unbounded stop) * of NR_KR_LOCKED (brief stop for mutual exclusion purposes) */ static void netmap_disable_ring(struct netmap_kring *kr, int stopped) { nm_kr_stop(kr, stopped); // XXX check if nm_kr_stop is sufficient mtx_lock(&kr->q_lock); mtx_unlock(&kr->q_lock); nm_kr_put(kr); } /* stop or enable a single ring */ void netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped) { if (stopped) netmap_disable_ring(NMR(na, t)[ring_id], stopped); else NMR(na, t)[ring_id]->nkr_stopped = 0; } /* stop or enable all the rings of na */ void netmap_set_all_rings(struct netmap_adapter *na, int stopped) { int i; enum txrx t; if (!nm_netmap_on(na)) return; for_rx_tx(t) { for (i = 0; i < netmap_real_rings(na, t); i++) { netmap_set_ring(na, i, t, stopped); } } } /* * Convenience function used in drivers. Waits for current txsync()s/rxsync()s * to finish and prevents any new one from starting. Call this before turning * netmap mode off, or before removing the hardware rings (e.g., on module * onload). */ void netmap_disable_all_rings(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { - netmap_set_all_rings(NA(ifp), NM_KR_STOPPED); + netmap_set_all_rings(NA(ifp), NM_KR_LOCKED); } } /* * Convenience function used in drivers. Re-enables rxsync and txsync on the * adapter's rings In linux drivers, this should be placed near each * napi_enable(). */ void netmap_enable_all_rings(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { netmap_set_all_rings(NA(ifp), 0 /* enabled */); } } void netmap_make_zombie(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { struct netmap_adapter *na = NA(ifp); netmap_set_all_rings(na, NM_KR_LOCKED); na->na_flags |= NAF_ZOMBIE; netmap_set_all_rings(na, 0); } } void netmap_undo_zombie(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { struct netmap_adapter *na = NA(ifp); if (na->na_flags & NAF_ZOMBIE) { netmap_set_all_rings(na, NM_KR_LOCKED); na->na_flags &= ~NAF_ZOMBIE; netmap_set_all_rings(na, 0); } } } /* * generic bound_checking function */ u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg) { u_int oldv = *v; const char *op = NULL; if (dflt < lo) dflt = lo; if (dflt > hi) dflt = hi; if (oldv < lo) { *v = dflt; op = "Bump"; } else if (oldv > hi) { *v = hi; op = "Clamp"; } if (op && msg) nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv); return *v; } /* * packet-dump function, user-supplied or static buffer. * The destination buffer must be at least 30+4*len */ const char * nm_dump_buf(char *p, int len, int lim, char *dst) { static char _dst[8192]; int i, j, i0; static char hex[] ="0123456789abcdef"; char *o; /* output position */ #define P_HI(x) hex[((x) & 0xf0)>>4] #define P_LO(x) hex[((x) & 0xf)] #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.') if (!dst) dst = _dst; if (lim <= 0 || lim > len) lim = len; o = dst; sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim); o += strlen(o); /* hexdump routine */ for (i = 0; i < lim; ) { sprintf(o, "%5d: ", i); o += strlen(o); memset(o, ' ', 48); i0 = i; for (j=0; j < 16 && i < lim; i++, j++) { o[j*3] = P_HI(p[i]); o[j*3+1] = P_LO(p[i]); } i = i0; for (j=0; j < 16 && i < lim; i++, j++) o[j + 48] = P_C(p[i]); o[j+48] = '\n'; o += j+49; } *o = '\0'; #undef P_HI #undef P_LO #undef P_C return dst; } /* * Fetch configuration from the device, to cope with dynamic * reconfigurations after loading the module. */ /* call with NMG_LOCK held */ int netmap_update_config(struct netmap_adapter *na) { struct nm_config_info info; bzero(&info, sizeof(info)); if (na->nm_config == NULL || na->nm_config(na, &info)) { /* take whatever we had at init time */ info.num_tx_rings = na->num_tx_rings; info.num_tx_descs = na->num_tx_desc; info.num_rx_rings = na->num_rx_rings; info.num_rx_descs = na->num_rx_desc; info.rx_buf_maxsize = na->rx_buf_maxsize; } if (na->num_tx_rings == info.num_tx_rings && na->num_tx_desc == info.num_tx_descs && na->num_rx_rings == info.num_rx_rings && na->num_rx_desc == info.num_rx_descs && na->rx_buf_maxsize == info.rx_buf_maxsize) return 0; /* nothing changed */ if (na->active_fds == 0) { na->num_tx_rings = info.num_tx_rings; na->num_tx_desc = info.num_tx_descs; na->num_rx_rings = info.num_rx_rings; na->num_rx_desc = info.num_rx_descs; na->rx_buf_maxsize = info.rx_buf_maxsize; if (netmap_verbose) nm_prinf("configuration changed for %s: txring %d x %d, " "rxring %d x %d, rxbufsz %d", na->name, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); return 0; } nm_prerr("WARNING: configuration changed for %s while active: " "txring %d x %d, rxring %d x %d, rxbufsz %d", na->name, info.num_tx_rings, info.num_tx_descs, info.num_rx_rings, info.num_rx_descs, info.rx_buf_maxsize); return 1; } /* nm_sync callbacks for the host rings */ static int netmap_txsync_to_host(struct netmap_kring *kring, int flags); static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags); /* create the krings array and initialize the fields common to all adapters. * The array layout is this: * * +----------+ * na->tx_rings ----->| | \ * | | } na->num_tx_ring * | | / * +----------+ * | | host tx kring * na->rx_rings ----> +----------+ * | | \ * | | } na->num_rx_rings * | | / * +----------+ * | | host rx kring * +----------+ * na->tailroom ----->| | \ * | | } tailroom bytes * | | / * +----------+ * * Note: for compatibility, host krings are created even when not needed. * The tailroom space is currently used by vale ports for allocating leases. */ /* call with NMG_LOCK held */ int netmap_krings_create(struct netmap_adapter *na, u_int tailroom) { u_int i, len, ndesc; struct netmap_kring *kring; u_int n[NR_TXRX]; enum txrx t; int err = 0; if (na->tx_rings != NULL) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("warning: krings were already created"); return 0; } /* account for the (possibly fake) host rings */ n[NR_TX] = netmap_all_rings(na, NR_TX); n[NR_RX] = netmap_all_rings(na, NR_RX); len = (n[NR_TX] + n[NR_RX]) * (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *)) + tailroom; na->tx_rings = nm_os_malloc((size_t)len); if (na->tx_rings == NULL) { nm_prerr("Cannot allocate krings"); return ENOMEM; } na->rx_rings = na->tx_rings + n[NR_TX]; na->tailroom = na->rx_rings + n[NR_RX]; /* link the krings in the krings array */ kring = (struct netmap_kring *)((char *)na->tailroom + tailroom); for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) { na->tx_rings[i] = kring; kring++; } /* * All fields in krings are 0 except the one initialized below. * but better be explicit on important kring fields. */ for_rx_tx(t) { ndesc = nma_get_ndesc(na, t); for (i = 0; i < n[t]; i++) { kring = NMR(na, t)[i]; bzero(kring, sizeof(*kring)); kring->notify_na = na; kring->ring_id = i; kring->tx = t; kring->nkr_num_slots = ndesc; kring->nr_mode = NKR_NETMAP_OFF; kring->nr_pending_mode = NKR_NETMAP_OFF; if (i < nma_get_nrings(na, t)) { kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync); } else { if (!(na->na_flags & NAF_HOST_RINGS)) kring->nr_kflags |= NKR_FAKERING; kring->nm_sync = (t == NR_TX ? netmap_txsync_to_host: netmap_rxsync_from_host); } kring->nm_notify = na->nm_notify; kring->rhead = kring->rcur = kring->nr_hwcur = 0; /* * IMPORTANT: Always keep one slot empty. */ kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0); snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name, nm_txrx2str(t), i); nm_prdis("ktx %s h %d c %d t %d", kring->name, kring->rhead, kring->rcur, kring->rtail); err = nm_os_selinfo_init(&kring->si, kring->name); if (err) { netmap_krings_delete(na); return err; } mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF); kring->na = na; /* setting this field marks the mutex as initialized */ } err = nm_os_selinfo_init(&na->si[t], na->name); if (err) { netmap_krings_delete(na); return err; } } return 0; } /* undo the actions performed by netmap_krings_create */ /* call with NMG_LOCK held */ void netmap_krings_delete(struct netmap_adapter *na) { struct netmap_kring **kring = na->tx_rings; enum txrx t; if (na->tx_rings == NULL) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("warning: krings were already deleted"); return; } for_rx_tx(t) nm_os_selinfo_uninit(&na->si[t]); /* we rely on the krings layout described above */ for ( ; kring != na->tailroom; kring++) { if ((*kring)->na != NULL) mtx_destroy(&(*kring)->q_lock); nm_os_selinfo_uninit(&(*kring)->si); } nm_os_free(na->tx_rings); na->tx_rings = na->rx_rings = na->tailroom = NULL; } /* * Destructor for NIC ports. They also have an mbuf queue * on the rings connected to the host so we need to purge * them first. */ /* call with NMG_LOCK held */ void netmap_hw_krings_delete(struct netmap_adapter *na) { u_int lim = netmap_real_rings(na, NR_RX), i; for (i = nma_get_nrings(na, NR_RX); i < lim; i++) { struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue; nm_prdis("destroy sw mbq with len %d", mbq_len(q)); mbq_purge(q); mbq_safe_fini(q); } netmap_krings_delete(na); } static void netmap_mem_drop(struct netmap_adapter *na) { int last = netmap_mem_deref(na->nm_mem, na); /* if the native allocator had been overrided on regif, * restore it now and drop the temporary one */ if (last && na->nm_mem_prev) { netmap_mem_put(na->nm_mem); na->nm_mem = na->nm_mem_prev; na->nm_mem_prev = NULL; } } /* * Undo everything that was done in netmap_do_regif(). In particular, * call nm_register(ifp,0) to stop netmap mode on the interface and * revert to normal operation. */ /* call with NMG_LOCK held */ static void netmap_unset_ringid(struct netmap_priv_d *); static void netmap_krings_put(struct netmap_priv_d *); void netmap_do_unregif(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; NMG_LOCK_ASSERT(); na->active_fds--; /* unset nr_pending_mode and possibly release exclusive mode */ netmap_krings_put(priv); #ifdef WITH_MONITOR /* XXX check whether we have to do something with monitor * when rings change nr_mode. */ if (na->active_fds <= 0) { /* walk through all the rings and tell any monitor * that the port is going to exit netmap mode */ netmap_monitor_stop(na); } #endif if (na->active_fds <= 0 || nm_kring_pending(priv)) { na->nm_register(na, 0); } /* delete rings and buffers that are no longer needed */ netmap_mem_rings_delete(na); if (na->active_fds <= 0) { /* last instance */ /* * (TO CHECK) We enter here * when the last reference to this file descriptor goes * away. This means we cannot have any pending poll() * or interrupt routine operating on the structure. * XXX The file may be closed in a thread while * another thread is using it. * Linux keeps the file opened until the last reference * by any outstanding ioctl/poll or mmap is gone. * FreeBSD does not track mmap()s (but we do) and * wakes up any sleeping poll(). Need to check what * happens if the close() occurs while a concurrent * syscall is running. */ if (netmap_debug & NM_DEBUG_ON) nm_prinf("deleting last instance for %s", na->name); if (nm_netmap_on(na)) { nm_prerr("BUG: netmap on while going to delete the krings"); } na->nm_krings_delete(na); /* restore the default number of host tx and rx rings */ if (na->na_flags & NAF_HOST_RINGS) { na->num_host_tx_rings = 1; na->num_host_rx_rings = 1; } else { na->num_host_tx_rings = 0; na->num_host_rx_rings = 0; } } /* possibily decrement counter of tx_si/rx_si users */ netmap_unset_ringid(priv); /* delete the nifp */ netmap_mem_if_delete(na, priv->np_nifp); /* drop the allocator */ netmap_mem_drop(na); /* mark the priv as unregistered */ priv->np_na = NULL; priv->np_nifp = NULL; } struct netmap_priv_d* netmap_priv_new(void) { struct netmap_priv_d *priv; priv = nm_os_malloc(sizeof(struct netmap_priv_d)); if (priv == NULL) return NULL; priv->np_refs = 1; nm_os_get_module(); return priv; } /* * Destructor of the netmap_priv_d, called when the fd is closed * Action: undo all the things done by NIOCREGIF, * On FreeBSD we need to track whether there are active mmap()s, * and we use np_active_mmaps for that. On linux, the field is always 0. * Return: 1 if we can free priv, 0 otherwise. * */ /* call with NMG_LOCK held */ void netmap_priv_delete(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; /* number of active references to this fd */ if (--priv->np_refs > 0) { return; } nm_os_put_module(); if (na) { netmap_do_unregif(priv); } netmap_unget_na(na, priv->np_ifp); bzero(priv, sizeof(*priv)); /* for safety */ nm_os_free(priv); } /* call with NMG_LOCK *not* held */ void netmap_dtor(void *data) { struct netmap_priv_d *priv = data; NMG_LOCK(); netmap_priv_delete(priv); NMG_UNLOCK(); } /* * Handlers for synchronization of the rings from/to the host stack. * These are associated to a network interface and are just another * ring pair managed by userspace. * * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD * flags): * * - Before releasing buffers on hw RX rings, the application can mark * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they * will be forwarded to the host stack, similarly to what happened if * the application moved them to the host TX ring. * * - Before releasing buffers on the host RX ring, the application can * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(), * they will be forwarded to the hw TX rings, saving the application * from doing the same task in user-space. * * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD * flag, or globally with the netmap_fwd sysctl. * * The transfer NIC --> host is relatively easy, just encapsulate * into mbufs and we are done. The host --> NIC side is slightly * harder because there might not be room in the tx ring so it * might take a while before releasing the buffer. */ /* * Pass a whole queue of mbufs to the host stack as coming from 'dst' * We do not need to lock because the queue is private. * After this call the queue is empty. */ static void netmap_send_up(struct ifnet *dst, struct mbq *q) { struct mbuf *m; struct mbuf *head = NULL, *prev = NULL; /* Send packets up, outside the lock; head/prev machinery * is only useful for Windows. */ while ((m = mbq_dequeue(q)) != NULL) { if (netmap_debug & NM_DEBUG_HOST) nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m)); prev = nm_os_send_up(dst, m, prev); if (head == NULL) head = prev; } if (head) nm_os_send_up(dst, NULL, head); mbq_fini(q); } /* * Scan the buffers from hwcur to ring->head, and put a copy of those * marked NS_FORWARD (or all of them if forced) into a queue of mbufs. * Drop remaining packets in the unlikely event * of an mbuf shortage. */ static void netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force) { u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; u_int n; struct netmap_adapter *na = kring->na; for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) { struct mbuf *m; struct netmap_slot *slot = &kring->ring->slot[n]; if ((slot->flags & NS_FORWARD) == 0 && !force) continue; if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) { nm_prlim(5, "bad pkt at %d len %d", n, slot->len); continue; } slot->flags &= ~NS_FORWARD; // XXX needed ? /* XXX TODO: adapt to the case of a multisegment packet */ m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL); if (m == NULL) break; mbq_enqueue(q, m); } } static inline int _nm_may_forward(struct netmap_kring *kring) { return ((netmap_fwd || kring->ring->flags & NR_FORWARD) && kring->na->na_flags & NAF_HOST_RINGS && kring->tx == NR_RX); } static inline int nm_may_forward_up(struct netmap_kring *kring) { return _nm_may_forward(kring) && kring->ring_id != kring->na->num_rx_rings; } static inline int nm_may_forward_down(struct netmap_kring *kring, int sync_flags) { return _nm_may_forward(kring) && (sync_flags & NAF_CAN_FORWARD_DOWN) && kring->ring_id == kring->na->num_rx_rings; } /* * Send to the NIC rings packets marked NS_FORWARD between * kring->nr_hwcur and kring->rhead. * Called under kring->rx_queue.lock on the sw rx ring. * * It can only be called if the user opened all the TX hw rings, * see NAF_CAN_FORWARD_DOWN flag. * We can touch the TX netmap rings (slots, head and cur) since * we are in poll/ioctl system call context, and the application * is not supposed to touch the ring (using a different thread) * during the execution of the system call. */ static u_int netmap_sw_to_nic(struct netmap_adapter *na) { struct netmap_kring *kring = na->rx_rings[na->num_rx_rings]; struct netmap_slot *rxslot = kring->ring->slot; u_int i, rxcur = kring->nr_hwcur; u_int const head = kring->rhead; u_int const src_lim = kring->nkr_num_slots - 1; u_int sent = 0; /* scan rings to find space, then fill as much as possible */ for (i = 0; i < na->num_tx_rings; i++) { struct netmap_kring *kdst = na->tx_rings[i]; struct netmap_ring *rdst = kdst->ring; u_int const dst_lim = kdst->nkr_num_slots - 1; /* XXX do we trust ring or kring->rcur,rtail ? */ for (; rxcur != head && !nm_ring_empty(rdst); rxcur = nm_next(rxcur, src_lim) ) { struct netmap_slot *src, *dst, tmp; u_int dst_head = rdst->head; src = &rxslot[rxcur]; if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd) continue; sent++; dst = &rdst->slot[dst_head]; tmp = *src; src->buf_idx = dst->buf_idx; src->flags = NS_BUF_CHANGED; dst->buf_idx = tmp.buf_idx; dst->len = tmp.len; dst->flags = NS_BUF_CHANGED; rdst->head = rdst->cur = nm_next(dst_head, dst_lim); } /* if (sent) XXX txsync ? it would be just an optimization */ } return sent; } /* * netmap_txsync_to_host() passes packets up. We are called from a * system call in user process context, and the only contention * can be among multiple user threads erroneously calling * this routine concurrently. */ static int netmap_txsync_to_host(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct mbq q; /* Take packets from hwcur to head and pass them up. * Force hwcur = head since netmap_grab_packets() stops at head */ mbq_init(&q); netmap_grab_packets(kring, &q, 1 /* force */); nm_prdis("have %d pkts in queue", mbq_len(&q)); kring->nr_hwcur = head; kring->nr_hwtail = head + lim; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; netmap_send_up(na->ifp, &q); return 0; } /* * rxsync backend for packets coming from the host stack. * They have been put in kring->rx_queue by netmap_transmit(). * We protect access to the kring using kring->rx_queue.lock * * also moves to the nic hw rings any packet the user has marked * for transparent-mode forwarding, then sets the NR_FORWARD * flag in the kring to let the caller push them out */ static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; u_int nm_i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int ret = 0; struct mbq *q = &kring->rx_queue, fq; mbq_init(&fq); /* fq holds packets to be freed */ mbq_lock(q); /* First part: import newly received packets */ n = mbq_len(q); if (n) { /* grab packets from the queue */ struct mbuf *m; uint32_t stop_i; nm_i = kring->nr_hwtail; stop_i = nm_prev(kring->nr_hwcur, lim); while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) { int len = MBUF_LEN(m); struct netmap_slot *slot = &ring->slot[nm_i]; m_copydata(m, 0, len, NMB(na, slot)); nm_prdis("nm %d len %d", nm_i, len); if (netmap_debug & NM_DEBUG_HOST) nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); slot->len = len; slot->flags = 0; nm_i = nm_next(nm_i, lim); mbq_enqueue(&fq, m); } kring->nr_hwtail = nm_i; } /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* something was released */ if (nm_may_forward_down(kring, flags)) { ret = netmap_sw_to_nic(na); if (ret > 0) { kring->nr_kflags |= NR_FORWARD; ret = 0; } } kring->nr_hwcur = head; } mbq_unlock(q); mbq_purge(&fq); mbq_fini(&fq); return ret; } /* Get a netmap adapter for the port. * * If it is possible to satisfy the request, return 0 * with *na containing the netmap adapter found. * Otherwise return an error code, with *na containing NULL. * * When the port is attached to a bridge, we always return * EBUSY. * Otherwise, if the port is already bound to a file descriptor, * then we unconditionally return the existing adapter into *na. * In all the other cases, we return (into *na) either native, * generic or NULL, according to the following table: * * native_support * active_fds dev.netmap.admode YES NO * ------------------------------------------------------- * >0 * NA(ifp) NA(ifp) * * 0 NETMAP_ADMODE_BEST NATIVE GENERIC * 0 NETMAP_ADMODE_NATIVE NATIVE NULL * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC * */ static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */ int netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na) { /* generic support */ int i = netmap_admode; /* Take a snapshot. */ struct netmap_adapter *prev_na; int error = 0; *na = NULL; /* default */ /* reset in case of invalid value */ if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST) i = netmap_admode = NETMAP_ADMODE_BEST; if (NM_NA_VALID(ifp)) { prev_na = NA(ifp); /* If an adapter already exists, return it if * there are active file descriptors or if * netmap is not forced to use generic * adapters. */ if (NETMAP_OWNED_BY_ANY(prev_na) || i != NETMAP_ADMODE_GENERIC || prev_na->na_flags & NAF_FORCE_NATIVE #ifdef WITH_PIPES /* ugly, but we cannot allow an adapter switch * if some pipe is referring to this one */ || prev_na->na_next_pipe > 0 #endif ) { *na = prev_na; goto assign_mem; } } /* If there isn't native support and netmap is not allowed * to use generic adapters, we cannot satisfy the request. */ if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE) return EOPNOTSUPP; /* Otherwise, create a generic adapter and return it, * saving the previously used netmap adapter, if any. * * Note that here 'prev_na', if not NULL, MUST be a * native adapter, and CANNOT be a generic one. This is * true because generic adapters are created on demand, and * destroyed when not used anymore. Therefore, if the adapter * currently attached to an interface 'ifp' is generic, it * must be that * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))). * Consequently, if NA(ifp) is generic, we will enter one of * the branches above. This ensures that we never override * a generic adapter with another generic adapter. */ error = generic_netmap_attach(ifp); if (error) return error; *na = NA(ifp); assign_mem: if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) && (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) { (*na)->nm_mem_prev = (*na)->nm_mem; (*na)->nm_mem = netmap_mem_get(nmd); } return 0; } /* * MUST BE CALLED UNDER NMG_LOCK() * * Get a refcounted reference to a netmap adapter attached * to the interface specified by req. * This is always called in the execution of an ioctl(). * * Return ENXIO if the interface specified by the request does * not exist, ENOTSUP if netmap is not supported by the interface, * EBUSY if the interface is already attached to a bridge, * EINVAL if parameters are invalid, ENOMEM if needed resources * could not be allocated. * If successful, hold a reference to the netmap adapter. * * If the interface specified by req is a system one, also keep * a reference to it and return a valid *ifp. */ int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct ifnet **ifp, struct netmap_mem_d *nmd, int create) { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; int error = 0; struct netmap_adapter *ret = NULL; int nmd_ref = 0; *na = NULL; /* default return value */ *ifp = NULL; if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { return EINVAL; } if (req->nr_mode == NR_REG_PIPE_MASTER || req->nr_mode == NR_REG_PIPE_SLAVE) { /* Do not accept deprecated pipe modes. */ nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax"); return EINVAL; } NMG_LOCK_ASSERT(); /* if the request contain a memid, try to find the * corresponding memory region */ if (nmd == NULL && req->nr_mem_id) { nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) return EINVAL; /* keep the rereference */ nmd_ref = 1; } /* We cascade through all possible types of netmap adapter. * All netmap_get_*_na() functions return an error and an na, * with the following combinations: * * error na * 0 NULL type doesn't match * !0 NULL type matches, but na creation/lookup failed * 0 !NULL type matches and na created/found * !0 !NULL impossible */ error = netmap_get_null_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a monitor port */ error = netmap_get_monitor_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a pipe port */ error = netmap_get_pipe_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a bridge port */ error = netmap_get_vale_na(hdr, na, nmd, create); if (error) goto out; if (*na != NULL) /* valid match in netmap_get_bdg_na() */ goto out; /* * This must be a hardware na, lookup the name in the system. * Note that by hardware we actually mean "it shows up in ifconfig". * This may still be a tap, a veth/epair, or even a * persistent VALE port. */ *ifp = ifunit_ref(hdr->nr_name); if (*ifp == NULL) { error = ENXIO; goto out; } error = netmap_get_hw_na(*ifp, nmd, &ret); if (error) goto out; *na = ret; netmap_adapter_get(ret); /* * if the adapter supports the host rings and it is not alread open, * try to set the number of host rings as requested by the user */ if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) { if (req->nr_host_tx_rings) (*na)->num_host_tx_rings = req->nr_host_tx_rings; if (req->nr_host_rx_rings) (*na)->num_host_rx_rings = req->nr_host_rx_rings; } nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings, (*na)->num_host_rx_rings); out: if (error) { if (ret) netmap_adapter_put(ret); if (*ifp) { if_rele(*ifp); *ifp = NULL; } } if (nmd_ref) netmap_mem_put(nmd); return error; } /* undo netmap_get_na() */ void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp) { if (ifp) if_rele(ifp); if (na) netmap_adapter_put(na); } #define NM_FAIL_ON(t) do { \ if (unlikely(t)) { \ nm_prlim(5, "%s: fail '" #t "' " \ "h %d c %d t %d " \ "rh %d rc %d rt %d " \ "hc %d ht %d", \ kring->name, \ head, cur, ring->tail, \ kring->rhead, kring->rcur, kring->rtail, \ kring->nr_hwcur, kring->nr_hwtail); \ return kring->nkr_num_slots; \ } \ } while (0) /* * validate parameters on entry for *_txsync() * Returns ring->cur if ok, or something >= kring->nkr_num_slots * in case of error. * * rhead, rcur and rtail=hwtail are stored from previous round. * hwcur is the next packet to send to the ring. * * We want * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail * * hwcur, rhead, rtail and hwtail are reliable */ u_int nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) { u_int head = ring->head; /* read only once */ u_int cur = ring->cur; /* read only once */ u_int n = kring->nkr_num_slots; nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, ring->head, ring->cur, ring->tail); #if 1 /* kernel sanity checks; but we can trust the kring. */ NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n || kring->rtail >= n || kring->nr_hwtail >= n); #endif /* kernel sanity checks */ /* * user sanity checks. We only use head, * A, B, ... are possible positions for head: * * 0 A rhead B rtail C n-1 * 0 D rtail E rhead F n-1 * * B, F, D are valid. A, C, E are wrong */ if (kring->rtail >= kring->rhead) { /* want rhead <= head <= rtail */ NM_FAIL_ON(head < kring->rhead || head > kring->rtail); /* and also head <= cur <= rtail */ NM_FAIL_ON(cur < head || cur > kring->rtail); } else { /* here rtail < rhead */ /* we need head outside rtail .. rhead */ NM_FAIL_ON(head > kring->rtail && head < kring->rhead); /* two cases now: head <= rtail or head >= rhead */ if (head <= kring->rtail) { /* want head <= cur <= rtail */ NM_FAIL_ON(cur < head || cur > kring->rtail); } else { /* head >= rhead */ /* cur must be outside rtail..head */ NM_FAIL_ON(cur > kring->rtail && cur < head); } } if (ring->tail != kring->rtail) { nm_prlim(5, "%s tail overwritten was %d need %d", kring->name, ring->tail, kring->rtail); ring->tail = kring->rtail; } kring->rhead = head; kring->rcur = cur; return head; } /* * validate parameters on entry for *_rxsync() * Returns ring->head if ok, kring->nkr_num_slots on error. * * For a valid configuration, * hwcur <= head <= cur <= tail <= hwtail * * We only consider head and cur. * hwcur and hwtail are reliable. * */ u_int nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) { uint32_t const n = kring->nkr_num_slots; uint32_t head, cur; nm_prdis(5,"%s kc %d kt %d h %d c %d t %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, ring->head, ring->cur, ring->tail); /* * Before storing the new values, we should check they do not * move backwards. However: * - head is not an issue because the previous value is hwcur; * - cur could in principle go back, however it does not matter * because we are processing a brand new rxsync() */ cur = kring->rcur = ring->cur; /* read only once */ head = kring->rhead = ring->head; /* read only once */ #if 1 /* kernel sanity checks */ NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n); #endif /* kernel sanity checks */ /* user sanity checks */ if (kring->nr_hwtail >= kring->nr_hwcur) { /* want hwcur <= rhead <= hwtail */ NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail); /* and also rhead <= rcur <= hwtail */ NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); } else { /* we need rhead outside hwtail..hwcur */ NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail); /* two cases now: head <= hwtail or head >= hwcur */ if (head <= kring->nr_hwtail) { /* want head <= cur <= hwtail */ NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); } else { /* cur must be outside hwtail..head */ NM_FAIL_ON(cur < head && cur > kring->nr_hwtail); } } if (ring->tail != kring->rtail) { nm_prlim(5, "%s tail overwritten was %d need %d", kring->name, ring->tail, kring->rtail); ring->tail = kring->rtail; } return head; } /* * Error routine called when txsync/rxsync detects an error. * Can't do much more than resetting head = cur = hwcur, tail = hwtail * Return 1 on reinit. * * This routine is only called by the upper half of the kernel. * It only reads hwcur (which is changed only by the upper half, too) * and hwtail (which may be changed by the lower half, but only on * a tx ring and only to increase it, so any error will be recovered * on the next call). For the above, we don't strictly need to call * it under lock. */ int netmap_ring_reinit(struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; u_int i, lim = kring->nkr_num_slots - 1; int errors = 0; // XXX KASSERT nm_kr_tryget nm_prlim(10, "called for %s", kring->name); // XXX probably wrong to trust userspace kring->rhead = ring->head; kring->rcur = ring->cur; kring->rtail = ring->tail; if (ring->cur > lim) errors++; if (ring->head > lim) errors++; if (ring->tail > lim) errors++; for (i = 0; i <= lim; i++) { u_int idx = ring->slot[i].buf_idx; u_int len = ring->slot[i].len; if (idx < 2 || idx >= kring->na->na_lut.objtotal) { nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len); ring->slot[i].buf_idx = 0; ring->slot[i].len = 0; } else if (len > NETMAP_BUF_SIZE(kring->na)) { ring->slot[i].len = 0; nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len); } } if (errors) { nm_prlim(10, "total %d errors", errors); nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d", kring->name, ring->cur, kring->nr_hwcur, ring->tail, kring->nr_hwtail); ring->head = kring->rhead = kring->nr_hwcur; ring->cur = kring->rcur = kring->nr_hwcur; ring->tail = kring->rtail = kring->nr_hwtail; } return (errors ? 1 : 0); } /* interpret the ringid and flags fields of an nmreq, by translating them * into a pair of intervals of ring indices: * * [priv->np_txqfirst, priv->np_txqlast) and * [priv->np_rxqfirst, priv->np_rxqlast) * */ int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_adapter *na = priv->np_na; int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY }; enum txrx t; u_int j; for_rx_tx(t) { if (nr_flags & excluded_direction[t]) { priv->np_qfirst[t] = priv->np_qlast[t] = 0; continue; } switch (nr_mode) { case NR_REG_ALL_NIC: case NR_REG_NULL: priv->np_qfirst[t] = 0; priv->np_qlast[t] = nma_get_nrings(na, t); nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_SW: case NR_REG_NIC_SW: if (!(na->na_flags & NAF_HOST_RINGS)) { nm_prerr("host rings not supported"); return EINVAL; } priv->np_qfirst[t] = (nr_mode == NR_REG_SW ? nma_get_nrings(na, t) : 0); priv->np_qlast[t] = netmap_all_rings(na, t); nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_ONE_NIC: if (nr_ringid >= na->num_tx_rings && nr_ringid >= na->num_rx_rings) { nm_prerr("invalid ring id %d", nr_ringid); return EINVAL; } /* if not enough rings, use the first one */ j = nr_ringid; if (j >= nma_get_nrings(na, t)) j = 0; priv->np_qfirst[t] = j; priv->np_qlast[t] = j + 1; nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_ONE_SW: if (!(na->na_flags & NAF_HOST_RINGS)) { nm_prerr("host rings not supported"); return EINVAL; } if (nr_ringid >= na->num_host_tx_rings && nr_ringid >= na->num_host_rx_rings) { nm_prerr("invalid ring id %d", nr_ringid); return EINVAL; } /* if not enough rings, use the first one */ j = nr_ringid; if (j >= nma_get_host_nrings(na, t)) j = 0; priv->np_qfirst[t] = nma_get_nrings(na, t) + j; priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1; nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; default: nm_prerr("invalid regif type %d", nr_mode); return EINVAL; } } priv->np_flags = nr_flags; /* Allow transparent forwarding mode in the host --> nic * direction only if all the TX hw rings have been opened. */ if (priv->np_qfirst[NR_TX] == 0 && priv->np_qlast[NR_TX] >= na->num_tx_rings) { priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN; } if (netmap_verbose) { nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[NR_RX], nr_ringid); } return 0; } /* * Set the ring ID. For devices with a single queue, a request * for all rings is the same as a single ring. */ static int netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_adapter *na = priv->np_na; int error; enum txrx t; error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags); if (error) { return error; } priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1; /* optimization: count the users registered for more than * one ring, which are the ones sleeping on the global queue. * The default netmap_notify() callback will then * avoid signaling the global queue if nobody is using it */ for_rx_tx(t) { if (nm_si_user(priv, t)) na->si_users[t]++; } return 0; } static void netmap_unset_ringid(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; enum txrx t; for_rx_tx(t) { if (nm_si_user(priv, t)) na->si_users[t]--; priv->np_qfirst[t] = priv->np_qlast[t] = 0; } priv->np_flags = 0; priv->np_txpoll = 0; priv->np_kloop_state = 0; } /* Set the nr_pending_mode for the requested rings. * If requested, also try to get exclusive access to the rings, provided * the rings we want to bind are not exclusively owned by a previous bind. */ static int netmap_krings_get(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; u_int i; struct netmap_kring *kring; int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; if (netmap_debug & NM_DEBUG_ON) nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[NR_RX]); /* first round: check that all the requested rings * are neither alread exclusively owned, nor we * want exclusive ownership when they are already in use */ for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if ((kring->nr_kflags & NKR_EXCLUSIVE) || (kring->users && excl)) { nm_prdis("ring %s busy", kring->name); return EBUSY; } } } /* second round: increment usage count (possibly marking them * as exclusive) and set the nr_pending_mode */ for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; kring->users++; if (excl) kring->nr_kflags |= NKR_EXCLUSIVE; kring->nr_pending_mode = NKR_NETMAP_ON; } } return 0; } /* Undo netmap_krings_get(). This is done by clearing the exclusive mode * if was asked on regif, and unset the nr_pending_mode if we are the * last users of the involved rings. */ static void netmap_krings_put(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; u_int i; struct netmap_kring *kring; int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[MR_RX]); for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if (excl) kring->nr_kflags &= ~NKR_EXCLUSIVE; kring->users--; if (kring->users == 0) kring->nr_pending_mode = NKR_NETMAP_OFF; } } } static int nm_priv_rx_enabled(struct netmap_priv_d *priv) { return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]); } /* Validate the CSB entries for both directions (atok and ktoa). * To be called under NMG_LOCK(). */ static int netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo) { struct nm_csb_atok *csb_atok_base = (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok; struct nm_csb_ktoa *csb_ktoa_base = (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa; enum txrx t; int num_rings[NR_TXRX], tot_rings; size_t entry_size[2]; void *csb_start[2]; int i; if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { nm_prerr("Cannot update CSB while kloop is running"); return EBUSY; } tot_rings = 0; for_rx_tx(t) { num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t]; tot_rings += num_rings[t]; } if (tot_rings <= 0) return 0; if (!(priv->np_flags & NR_EXCLUSIVE)) { nm_prerr("CSB mode requires NR_EXCLUSIVE"); return EINVAL; } entry_size[0] = sizeof(*csb_atok_base); entry_size[1] = sizeof(*csb_ktoa_base); csb_start[0] = (void *)csb_atok_base; csb_start[1] = (void *)csb_ktoa_base; for (i = 0; i < 2; i++) { /* On Linux we could use access_ok() to simplify * the validation. However, the advantage of * this approach is that it works also on * FreeBSD. */ size_t csb_size = tot_rings * entry_size[i]; void *tmp; int err; if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) { nm_prerr("Unaligned CSB address"); return EINVAL; } tmp = nm_os_malloc(csb_size); if (!tmp) return ENOMEM; if (i == 0) { /* Application --> kernel direction. */ err = copyin(csb_start[i], tmp, csb_size); } else { /* Kernel --> application direction. */ memset(tmp, 0, csb_size); err = copyout(tmp, csb_start[i], csb_size); } nm_os_free(tmp); if (err) { nm_prerr("Invalid CSB address"); return err; } } priv->np_csb_atok_base = csb_atok_base; priv->np_csb_ktoa_base = csb_ktoa_base; /* Initialize the CSB. */ for_rx_tx(t) { for (i = 0; i < num_rings[t]; i++) { struct netmap_kring *kring = NMR(priv->np_na, t)[i + priv->np_qfirst[t]]; struct nm_csb_atok *csb_atok = csb_atok_base + i; struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i; if (t == NR_RX) { csb_atok += num_rings[NR_TX]; csb_ktoa += num_rings[NR_TX]; } CSB_WRITE(csb_atok, head, kring->rhead); CSB_WRITE(csb_atok, cur, kring->rcur); CSB_WRITE(csb_atok, appl_need_kick, 1); CSB_WRITE(csb_atok, sync_flags, 1); CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur); CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail); CSB_WRITE(csb_ktoa, kern_need_kick, 1); nm_prinf("csb_init for kring %s: head %u, cur %u, " "hwcur %u, hwtail %u", kring->name, kring->rhead, kring->rcur, kring->nr_hwcur, kring->nr_hwtail); } } return 0; } /* Ensure that the netmap adapter can support the given MTU. * @return EINVAL if the na cannot be set to mtu, 0 otherwise. */ int netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) { unsigned nbs = NETMAP_BUF_SIZE(na); if (mtu <= na->rx_buf_maxsize) { /* The MTU fits a single NIC slot. We only * Need to check that netmap buffers are * large enough to hold an MTU. NS_MOREFRAG * cannot be used in this case. */ if (nbs < mtu) { nm_prerr("error: netmap buf size (%u) " "< device MTU (%u)", nbs, mtu); return EINVAL; } } else { /* More NIC slots may be needed to receive * or transmit a single packet. Check that * the adapter supports NS_MOREFRAG and that * netmap buffers are large enough to hold * the maximum per-slot size. */ if (!(na->na_flags & NAF_MOREFRAG)) { nm_prerr("error: large MTU (%d) needed " "but %s does not support " "NS_MOREFRAG", mtu, na->ifp->if_xname); return EINVAL; } else if (nbs < na->rx_buf_maxsize) { nm_prerr("error: using NS_MOREFRAG on " "%s requires netmap buf size " ">= %u", na->ifp->if_xname, na->rx_buf_maxsize); return EINVAL; } else { nm_prinf("info: netmap application on " "%s needs to support " "NS_MOREFRAG " "(MTU=%u,netmap_buf_size=%u)", na->ifp->if_xname, mtu, nbs); } } return 0; } /* * possibly move the interface to netmap-mode. * If success it returns a pointer to netmap_if, otherwise NULL. * This must be called with NMG_LOCK held. * * The following na callbacks are called in the process: * * na->nm_config() [by netmap_update_config] * (get current number and size of rings) * * We have a generic one for linux (netmap_linux_config). * The bwrap has to override this, since it has to forward * the request to the wrapped adapter (netmap_bwrap_config). * * * na->nm_krings_create() * (create and init the krings array) * * One of the following: * * * netmap_hw_krings_create, (hw ports) * creates the standard layout for the krings * and adds the mbq (used for the host rings). * * * netmap_vp_krings_create (VALE ports) * add leases and scratchpads * * * netmap_pipe_krings_create (pipes) * create the krings and rings of both ends and * cross-link them * * * netmap_monitor_krings_create (monitors) * avoid allocating the mbq * * * netmap_bwrap_krings_create (bwraps) * create both the brap krings array, * the krings array of the wrapped adapter, and * (if needed) the fake array for the host adapter * * na->nm_register(, 1) * (put the adapter in netmap mode) * * This may be one of the following: * * * netmap_hw_reg (hw ports) * checks that the ifp is still there, then calls * the hardware specific callback; * * * netmap_vp_reg (VALE ports) * If the port is connected to a bridge, * set the NAF_NETMAP_ON flag under the * bridge write lock. * * * netmap_pipe_reg (pipes) * inform the other pipe end that it is no * longer responsible for the lifetime of this * pipe end * * * netmap_monitor_reg (monitors) * intercept the sync callbacks of the monitored * rings * * * netmap_bwrap_reg (bwraps) * cross-link the bwrap and hwna rings, * forward the request to the hwna, override * the hwna notify callback (to get the frames * coming from outside go through the bridge). * * */ int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_if *nifp = NULL; int error; NMG_LOCK_ASSERT(); priv->np_na = na; /* store the reference */ error = netmap_mem_finalize(na->nm_mem, na); if (error) goto err; if (na->active_fds == 0) { /* cache the allocator info in the na */ error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); if (error) goto err_drop_mem; nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, na->na_lut.objsize); /* ring configuration may have changed, fetch from the card */ netmap_update_config(na); } /* compute the range of tx and rx rings to monitor */ error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags); if (error) goto err_put_lut; if (na->active_fds == 0) { /* * If this is the first registration of the adapter, * perform sanity checks and create the in-kernel view * of the netmap rings (the netmap krings). */ if (na->ifp && nm_priv_rx_enabled(priv)) { /* This netmap adapter is attached to an ifnet. */ unsigned mtu = nm_os_ifnet_mtu(na->ifp); nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d", na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na)); if (na->rx_buf_maxsize == 0) { nm_prerr("%s: error: rx_buf_maxsize == 0", na->name); error = EIO; goto err_drop_mem; } error = netmap_buf_size_validate(na, mtu); if (error) goto err_drop_mem; } /* * Depending on the adapter, this may also create * the netmap rings themselves */ error = na->nm_krings_create(na); if (error) goto err_put_lut; } /* now the krings must exist and we can check whether some * previous bind has exclusive ownership on them, and set * nr_pending_mode */ error = netmap_krings_get(priv); if (error) goto err_del_krings; /* create all needed missing netmap rings */ error = netmap_mem_rings_create(na); if (error) goto err_rel_excl; /* in all cases, create a new netmap if */ nifp = netmap_mem_if_new(na, priv); if (nifp == NULL) { error = ENOMEM; goto err_rel_excl; } if (nm_kring_pending(priv)) { /* Some kring is switching mode, tell the adapter to * react on this. */ error = na->nm_register(na, 1); if (error) goto err_del_if; } /* Commit the reference. */ na->active_fds++; /* * advertise that the interface is ready by setting np_nifp. * The barrier is needed because readers (poll, *SYNC and mmap) * check for priv->np_nifp != NULL without locking */ mb(); /* make sure previous writes are visible to all CPUs */ priv->np_nifp = nifp; return 0; err_del_if: netmap_mem_if_delete(na, nifp); err_rel_excl: netmap_krings_put(priv); netmap_mem_rings_delete(na); err_del_krings: if (na->active_fds == 0) na->nm_krings_delete(na); err_put_lut: if (na->active_fds == 0) memset(&na->na_lut, 0, sizeof(na->na_lut)); err_drop_mem: netmap_mem_drop(na); err: priv->np_na = NULL; return error; } /* * update kring and ring at the end of rxsync/txsync. */ static inline void nm_sync_finalize(struct netmap_kring *kring) { /* * Update ring tail to what the kernel knows * After txsync: head/rhead/hwcur might be behind cur/rcur * if no carrier. */ kring->ring->tail = kring->rtail = kring->nr_hwtail; nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); } /* set ring timestamp */ static inline void ring_timestamp_set(struct netmap_ring *ring) { if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) { microtime(&ring->ts); } } static int nmreq_copyin(struct nmreq_header *, int); static int nmreq_copyout(struct nmreq_header *, int); static int nmreq_checkoptions(struct nmreq_header *); /* * ioctl(2) support for the "netmap" device. * * Following a list of accepted commands: * - NIOCCTRL device control API * - NIOCTXSYNC sync TX rings * - NIOCRXSYNC sync RX rings * - SIOCGIFADDR just for convenience * - NIOCGINFO deprecated (legacy API) * - NIOCREGIF deprecated (legacy API) * * Return 0 on success, errno otherwise. */ int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *td, int nr_body_is_user) { struct mbq q; /* packets from RX hw queues to host stack */ struct netmap_adapter *na = NULL; struct netmap_mem_d *nmd = NULL; struct ifnet *ifp = NULL; int error = 0; u_int i, qfirst, qlast; struct netmap_kring **krings; int sync_flags; enum txrx t; switch (cmd) { case NIOCCTRL: { struct nmreq_header *hdr = (struct nmreq_header *)data; if (hdr->nr_version < NETMAP_MIN_API || hdr->nr_version > NETMAP_MAX_API) { nm_prerr("API mismatch: got %d need %d", hdr->nr_version, NETMAP_API); return EINVAL; } /* Make a kernel-space copy of the user-space nr_body. * For convenince, the nr_body pointer and the pointers * in the options list will be replaced with their * kernel-space counterparts. The original pointers are * saved internally and later restored by nmreq_copyout */ error = nmreq_copyin(hdr, nr_body_is_user); if (error) { return error; } /* Sanitize hdr->nr_name. */ hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0'; switch (hdr->nr_reqtype) { case NETMAP_REQ_REGISTER: { struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; struct netmap_if *nifp; /* Protect access to priv from concurrent requests. */ NMG_LOCK(); do { struct nmreq_option *opt; u_int memflags; if (priv->np_nifp != NULL) { /* thread already registered */ error = EBUSY; break; } #ifdef WITH_EXTMEM opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM); if (opt != NULL) { struct nmreq_opt_extmem *e = (struct nmreq_opt_extmem *)opt; nmd = netmap_mem_ext_create(e->nro_usrptr, &e->nro_info, &error); opt->nro_status = error; if (nmd == NULL) break; } #endif /* WITH_EXTMEM */ if (nmd == NULL && req->nr_mem_id) { /* find the allocator and get a reference */ nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) { if (netmap_verbose) { nm_prerr("%s: failed to find mem_id %u", hdr->nr_name, req->nr_mem_id); } error = EINVAL; break; } } /* find the interface and a reference */ error = netmap_get_na(hdr, &na, &ifp, nmd, 1 /* create */); /* keep reference */ if (error) break; if (NETMAP_OWNED_BY_KERN(na)) { error = EBUSY; break; } if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) { nm_prerr("virt_hdr_len=%d, but application does " "not accept it", na->virt_hdr_len); error = EIO; break; } error = netmap_do_regif(priv, na, req->nr_mode, req->nr_ringid, req->nr_flags); if (error) { /* reg. failed, release priv and ref */ break; } opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB); if (opt != NULL) { struct nmreq_opt_csb *csbo = (struct nmreq_opt_csb *)opt; error = netmap_csb_validate(priv, csbo); opt->nro_status = error; if (error) { netmap_do_unregif(priv); break; } } nifp = priv->np_nifp; /* return the offset of the netmap_if object */ req->nr_rx_rings = na->num_rx_rings; req->nr_tx_rings = na->num_tx_rings; req->nr_rx_slots = na->num_rx_desc; req->nr_tx_slots = na->num_tx_desc; req->nr_host_tx_rings = na->num_host_tx_rings; req->nr_host_rx_rings = na->num_host_rx_rings; error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags, &req->nr_mem_id); if (error) { netmap_do_unregif(priv); break; } if (memflags & NETMAP_MEM_PRIVATE) { *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM; } for_rx_tx(t) { priv->np_si[t] = nm_si_user(priv, t) ? &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si; } if (req->nr_extra_bufs) { if (netmap_verbose) nm_prinf("requested %d extra buffers", req->nr_extra_bufs); req->nr_extra_bufs = netmap_extra_alloc(na, &nifp->ni_bufs_head, req->nr_extra_bufs); if (netmap_verbose) nm_prinf("got %d extra buffers", req->nr_extra_bufs); } req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); error = nmreq_checkoptions(hdr); if (error) { netmap_do_unregif(priv); break; } /* store ifp reference so that priv destructor may release it */ priv->np_ifp = ifp; } while (0); if (error) { netmap_unget_na(na, ifp); } /* release the reference from netmap_mem_find() or * netmap_mem_ext_create() */ if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); break; } case NETMAP_REQ_PORT_INFO_GET: { struct nmreq_port_info_get *req = (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body; NMG_LOCK(); do { u_int memflags; if (hdr->nr_name[0] != '\0') { /* Build a nmreq_register out of the nmreq_port_info_get, * so that we can call netmap_get_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_mode = NR_REG_ALL_NIC; regreq.nr_tx_slots = req->nr_tx_slots; regreq.nr_rx_slots = req->nr_rx_slots; regreq.nr_tx_rings = req->nr_tx_rings; regreq.nr_rx_rings = req->nr_rx_rings; regreq.nr_host_tx_rings = req->nr_host_tx_rings; regreq.nr_host_rx_rings = req->nr_host_rx_rings; regreq.nr_mem_id = req->nr_mem_id; /* get a refcount */ hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */ hdr->nr_body = (uintptr_t)req; /* reset nr_body */ if (error) { na = NULL; ifp = NULL; break; } nmd = na->nm_mem; /* get memory allocator */ } else { nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1); if (nmd == NULL) { if (netmap_verbose) nm_prerr("%s: failed to find mem_id %u", hdr->nr_name, req->nr_mem_id ? req->nr_mem_id : 1); error = EINVAL; break; } } error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags, &req->nr_mem_id); if (error) break; if (na == NULL) /* only memory info */ break; netmap_update_config(na); req->nr_rx_rings = na->num_rx_rings; req->nr_tx_rings = na->num_tx_rings; req->nr_rx_slots = na->num_rx_desc; req->nr_tx_slots = na->num_tx_desc; req->nr_host_tx_rings = na->num_host_tx_rings; req->nr_host_rx_rings = na->num_host_rx_rings; } while (0); netmap_unget_na(na, ifp); NMG_UNLOCK(); break; } #ifdef WITH_VALE case NETMAP_REQ_VALE_ATTACH: { error = netmap_vale_attach(hdr, NULL /* userspace request */); break; } case NETMAP_REQ_VALE_DETACH: { error = netmap_vale_detach(hdr, NULL /* userspace request */); break; } case NETMAP_REQ_VALE_LIST: { error = netmap_vale_list(hdr); break; } case NETMAP_REQ_PORT_HDR_SET: { struct nmreq_port_hdr *req = (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; /* Build a nmreq_register out of the nmreq_port_hdr, * so that we can call netmap_get_bdg_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_mode = NR_REG_ALL_NIC; /* For now we only support virtio-net headers, and only for * VALE ports, but this may change in future. Valid lengths * for the virtio-net header are 0 (no header), 10 and 12. */ if (req->nr_hdr_len != 0 && req->nr_hdr_len != sizeof(struct nm_vnet_hdr) && req->nr_hdr_len != 12) { if (netmap_verbose) nm_prerr("invalid hdr_len %u", req->nr_hdr_len); error = EINVAL; break; } NMG_LOCK(); hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_vale_na(hdr, &na, NULL, 0); hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET; hdr->nr_body = (uintptr_t)req; if (na && !error) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; na->virt_hdr_len = req->nr_hdr_len; if (na->virt_hdr_len) { vpna->mfs = NETMAP_BUF_SIZE(na); } if (netmap_verbose) nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); netmap_adapter_put(na); } else if (!na) { error = ENXIO; } NMG_UNLOCK(); break; } case NETMAP_REQ_PORT_HDR_GET: { /* Get vnet-header length for this netmap port */ struct nmreq_port_hdr *req = (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; /* Build a nmreq_register out of the nmreq_port_hdr, * so that we can call netmap_get_bdg_na(). */ struct nmreq_register regreq; struct ifnet *ifp; bzero(®req, sizeof(regreq)); regreq.nr_mode = NR_REG_ALL_NIC; NMG_LOCK(); hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_na(hdr, &na, &ifp, NULL, 0); hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET; hdr->nr_body = (uintptr_t)req; if (na && !error) { req->nr_hdr_len = na->virt_hdr_len; } netmap_unget_na(na, ifp); NMG_UNLOCK(); break; } case NETMAP_REQ_VALE_NEWIF: { error = nm_vi_create(hdr); break; } case NETMAP_REQ_VALE_DELIF: { error = nm_vi_destroy(hdr->nr_name); break; } case NETMAP_REQ_VALE_POLLING_ENABLE: case NETMAP_REQ_VALE_POLLING_DISABLE: { error = nm_bdg_polling(hdr); break; } #endif /* WITH_VALE */ case NETMAP_REQ_POOLS_INFO_GET: { /* Get information from the memory allocator used for * hdr->nr_name. */ struct nmreq_pools_info *req = (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body; NMG_LOCK(); do { /* Build a nmreq_register out of the nmreq_pools_info, * so that we can call netmap_get_na(). */ struct nmreq_register regreq; bzero(®req, sizeof(regreq)); regreq.nr_mem_id = req->nr_mem_id; regreq.nr_mode = NR_REG_ALL_NIC; hdr->nr_reqtype = NETMAP_REQ_REGISTER; hdr->nr_body = (uintptr_t)®req; error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */ hdr->nr_body = (uintptr_t)req; /* reset nr_body */ if (error) { na = NULL; ifp = NULL; break; } nmd = na->nm_mem; /* grab the memory allocator */ if (nmd == NULL) { error = EINVAL; break; } /* Finalize the memory allocator, get the pools * information and release the allocator. */ error = netmap_mem_finalize(nmd, na); if (error) { break; } error = netmap_mem_pools_info_get(req, nmd); netmap_mem_drop(na); } while (0); netmap_unget_na(na, ifp); NMG_UNLOCK(); break; } case NETMAP_REQ_CSB_ENABLE: { struct nmreq_option *opt; opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB); if (opt == NULL) { error = EINVAL; } else { struct nmreq_opt_csb *csbo = (struct nmreq_opt_csb *)opt; NMG_LOCK(); error = netmap_csb_validate(priv, csbo); NMG_UNLOCK(); opt->nro_status = error; } break; } case NETMAP_REQ_SYNC_KLOOP_START: { error = netmap_sync_kloop(priv, hdr); break; } case NETMAP_REQ_SYNC_KLOOP_STOP: { error = netmap_sync_kloop_stop(priv); break; } default: { error = EINVAL; break; } } /* Write back request body to userspace and reset the * user-space pointer. */ error = nmreq_copyout(hdr, error); break; } case NIOCTXSYNC: case NIOCRXSYNC: { if (unlikely(priv->np_nifp == NULL)) { error = ENXIO; break; } mb(); /* make sure following reads are not from cache */ if (unlikely(priv->np_csb_atok_base)) { nm_prerr("Invalid sync in CSB mode"); error = EBUSY; break; } na = priv->np_na; /* we have a reference */ mbq_init(&q); t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX); krings = NMR(na, t); qfirst = priv->np_qfirst[t]; qlast = priv->np_qlast[t]; sync_flags = priv->np_sync_flags; for (i = qfirst; i < qlast; i++) { struct netmap_kring *kring = krings[i]; struct netmap_ring *ring = kring->ring; if (unlikely(nm_kr_tryget(kring, 1, &error))) { error = (error ? EIO : 0); continue; } if (cmd == NIOCTXSYNC) { if (netmap_debug & NM_DEBUG_TXSYNC) nm_prinf("pre txsync ring %d cur %d hwcur %d", i, ring->cur, kring->nr_hwcur); if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) { nm_sync_finalize(kring); } if (netmap_debug & NM_DEBUG_TXSYNC) nm_prinf("post txsync ring %d cur %d hwcur %d", i, ring->cur, kring->nr_hwcur); } else { if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); } if (nm_may_forward_up(kring)) { /* transparent forwarding, see netmap_poll() */ netmap_grab_packets(kring, &q, netmap_fwd); } if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) { nm_sync_finalize(kring); } ring_timestamp_set(ring); } nm_kr_put(kring); } if (mbq_peek(&q)) { netmap_send_up(na->ifp, &q); } break; } default: { return netmap_ioctl_legacy(priv, cmd, data, td); break; } } return (error); } size_t nmreq_size_by_type(uint16_t nr_reqtype) { switch (nr_reqtype) { case NETMAP_REQ_REGISTER: return sizeof(struct nmreq_register); case NETMAP_REQ_PORT_INFO_GET: return sizeof(struct nmreq_port_info_get); case NETMAP_REQ_VALE_ATTACH: return sizeof(struct nmreq_vale_attach); case NETMAP_REQ_VALE_DETACH: return sizeof(struct nmreq_vale_detach); case NETMAP_REQ_VALE_LIST: return sizeof(struct nmreq_vale_list); case NETMAP_REQ_PORT_HDR_SET: case NETMAP_REQ_PORT_HDR_GET: return sizeof(struct nmreq_port_hdr); case NETMAP_REQ_VALE_NEWIF: return sizeof(struct nmreq_vale_newif); case NETMAP_REQ_VALE_DELIF: case NETMAP_REQ_SYNC_KLOOP_STOP: case NETMAP_REQ_CSB_ENABLE: return 0; case NETMAP_REQ_VALE_POLLING_ENABLE: case NETMAP_REQ_VALE_POLLING_DISABLE: return sizeof(struct nmreq_vale_polling); case NETMAP_REQ_POOLS_INFO_GET: return sizeof(struct nmreq_pools_info); case NETMAP_REQ_SYNC_KLOOP_START: return sizeof(struct nmreq_sync_kloop_start); } return 0; } static size_t nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size) { size_t rv = sizeof(struct nmreq_option); #ifdef NETMAP_REQ_OPT_DEBUG if (nro_reqtype & NETMAP_REQ_OPT_DEBUG) return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG); #endif /* NETMAP_REQ_OPT_DEBUG */ switch (nro_reqtype) { #ifdef WITH_EXTMEM case NETMAP_REQ_OPT_EXTMEM: rv = sizeof(struct nmreq_opt_extmem); break; #endif /* WITH_EXTMEM */ case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS: if (nro_size >= rv) rv = nro_size; break; case NETMAP_REQ_OPT_CSB: rv = sizeof(struct nmreq_opt_csb); break; case NETMAP_REQ_OPT_SYNC_KLOOP_MODE: rv = sizeof(struct nmreq_opt_sync_kloop_mode); break; } /* subtract the common header */ return rv - sizeof(struct nmreq_option); } /* * nmreq_copyin: create an in-kernel version of the request. * * We build the following data structure: * * hdr -> +-------+ buf * | | +---------------+ * +-------+ |usr body ptr | * |options|-. +---------------+ * +-------+ | |usr options ptr| * |body |--------->+---------------+ * +-------+ | | | * | | copy of body | * | | | * | +---------------+ * | | NULL | * | +---------------+ * | .---| |\ * | | +---------------+ | * | .------| | | * | | | +---------------+ \ option table * | | | | ... | / indexed by option * | | | +---------------+ | type * | | | | | | * | | | +---------------+/ * | | | |usr next ptr 1 | * `-|----->+---------------+ * | | | copy of opt 1 | * | | | | * | | .-| nro_next | * | | | +---------------+ * | | | |usr next ptr 2 | * | `-`>+---------------+ * | | copy of opt 2 | * | | | * | .-| nro_next | * | | +---------------+ * | | | | * ~ ~ ~ ... ~ * | .-| | * `----->+---------------+ * | |usr next ptr n | * `>+---------------+ * | copy of opt n | * | | * | nro_next(NULL)| * +---------------+ * * The options and body fields of the hdr structure are overwritten * with in-kernel valid pointers inside the buf. The original user * pointers are saved in the buf and restored on copyout. * The list of options is copied and the pointers adjusted. The * original pointers are saved before the option they belonged. * * The option table has an entry for every availabe option. Entries * for options that have not been passed contain NULL. * */ int nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user) { size_t rqsz, optsz, bufsz; int error = 0; char *ker = NULL, *p; struct nmreq_option **next, *src, **opt_tab; struct nmreq_option buf; uint64_t *ptrs; if (hdr->nr_reserved) { if (netmap_verbose) nm_prerr("nr_reserved must be zero"); return EINVAL; } if (!nr_body_is_user) return 0; hdr->nr_reserved = nr_body_is_user; /* compute the total size of the buffer */ rqsz = nmreq_size_by_type(hdr->nr_reqtype); if (rqsz > NETMAP_REQ_MAXSIZE) { error = EMSGSIZE; goto out_err; } if ((rqsz && hdr->nr_body == (uintptr_t)NULL) || (!rqsz && hdr->nr_body != (uintptr_t)NULL)) { /* Request body expected, but not found; or * request body found but unexpected. */ if (netmap_verbose) nm_prerr("nr_body expected but not found, or vice versa"); error = EINVAL; goto out_err; } bufsz = 2 * sizeof(void *) + rqsz + NETMAP_REQ_OPT_MAX * sizeof(opt_tab); /* compute the size of the buf below the option table. * It must contain a copy of every received option structure. * For every option we also need to store a copy of the user * list pointer. */ optsz = 0; for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src; src = (struct nmreq_option *)(uintptr_t)buf.nro_next) { error = copyin(src, &buf, sizeof(*src)); if (error) goto out_err; optsz += sizeof(*src); optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size); if (rqsz + optsz > NETMAP_REQ_MAXSIZE) { error = EMSGSIZE; goto out_err; } bufsz += sizeof(void *); } bufsz += optsz; ker = nm_os_malloc(bufsz); if (ker == NULL) { error = ENOMEM; goto out_err; } p = ker; /* write pointer into the buffer */ /* make a copy of the user pointers */ ptrs = (uint64_t*)p; *ptrs++ = hdr->nr_body; *ptrs++ = hdr->nr_options; p = (char *)ptrs; /* copy the body */ error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz); if (error) goto out_restore; /* overwrite the user pointer with the in-kernel one */ hdr->nr_body = (uintptr_t)p; p += rqsz; /* start of the options table */ opt_tab = (struct nmreq_option **)p; p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX; /* copy the options */ next = (struct nmreq_option **)&hdr->nr_options; src = *next; while (src) { struct nmreq_option *opt; /* copy the option header */ ptrs = (uint64_t *)p; opt = (struct nmreq_option *)(ptrs + 1); error = copyin(src, opt, sizeof(*src)); if (error) goto out_restore; /* make a copy of the user next pointer */ *ptrs = opt->nro_next; /* overwrite the user pointer with the in-kernel one */ *next = opt; /* initialize the option as not supported. * Recognized options will update this field. */ opt->nro_status = EOPNOTSUPP; /* check for invalid types */ if (opt->nro_reqtype < 1) { if (netmap_verbose) nm_prinf("invalid option type: %u", opt->nro_reqtype); opt->nro_status = EINVAL; error = EINVAL; goto next; } if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) { /* opt->nro_status is already EOPNOTSUPP */ error = EOPNOTSUPP; goto next; } /* if the type is valid, index the option in the table * unless it is a duplicate. */ if (opt_tab[opt->nro_reqtype] != NULL) { if (netmap_verbose) nm_prinf("duplicate option: %u", opt->nro_reqtype); opt->nro_status = EINVAL; opt_tab[opt->nro_reqtype]->nro_status = EINVAL; error = EINVAL; goto next; } opt_tab[opt->nro_reqtype] = opt; p = (char *)(opt + 1); /* copy the option body */ optsz = nmreq_opt_size_by_type(opt->nro_reqtype, opt->nro_size); if (optsz) { /* the option body follows the option header */ error = copyin(src + 1, p, optsz); if (error) goto out_restore; p += optsz; } next: /* move to next option */ next = (struct nmreq_option **)&opt->nro_next; src = *next; } if (error) nmreq_copyout(hdr, error); return error; out_restore: ptrs = (uint64_t *)ker; hdr->nr_body = *ptrs++; hdr->nr_options = *ptrs++; hdr->nr_reserved = 0; nm_os_free(ker); out_err: return error; } static int nmreq_copyout(struct nmreq_header *hdr, int rerror) { struct nmreq_option *src, *dst; void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart; uint64_t *ptrs; size_t bodysz; int error; if (!hdr->nr_reserved) return rerror; /* restore the user pointers in the header */ ptrs = (uint64_t *)ker - 2; bufstart = ptrs; hdr->nr_body = *ptrs++; src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; hdr->nr_options = *ptrs; if (!rerror) { /* copy the body */ bodysz = nmreq_size_by_type(hdr->nr_reqtype); error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz); if (error) { rerror = error; goto out; } } /* copy the options */ dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options; while (src) { size_t optsz; uint64_t next; /* restore the user pointer */ next = src->nro_next; ptrs = (uint64_t *)src - 1; src->nro_next = *ptrs; /* always copy the option header */ error = copyout(src, dst, sizeof(*src)); if (error) { rerror = error; goto out; } /* copy the option body only if there was no error */ if (!rerror && !src->nro_status) { optsz = nmreq_opt_size_by_type(src->nro_reqtype, src->nro_size); if (optsz) { error = copyout(src + 1, dst + 1, optsz); if (error) { rerror = error; goto out; } } } src = (struct nmreq_option *)(uintptr_t)next; dst = (struct nmreq_option *)(uintptr_t)*ptrs; } out: hdr->nr_reserved = 0; nm_os_free(bufstart); return rerror; } struct nmreq_option * nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype) { struct nmreq_option **opt_tab; if (!hdr->nr_options) return NULL; opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) - (NETMAP_REQ_OPT_MAX + 1); return opt_tab[reqtype]; } static int nmreq_checkoptions(struct nmreq_header *hdr) { struct nmreq_option *opt; /* return error if there is still any option * marked as not supported */ for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt; opt = (struct nmreq_option *)(uintptr_t)opt->nro_next) if (opt->nro_status == EOPNOTSUPP) return EOPNOTSUPP; return 0; } /* * select(2) and poll(2) handlers for the "netmap" device. * * Can be called for one or more queues. * Return true the event mask corresponding to ready events. * If there are no ready events (and 'sr' is not NULL), do a * selrecord on either individual selinfo or on the global one. * Device-dependent parts (locking and sync of tx/rx rings) * are done through callbacks. * * On linux, arguments are really pwait, the poll table, and 'td' is struct file * * The first one is remapped to pwait as selrecord() uses the name as an * hidden argument. */ int netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr) { struct netmap_adapter *na; struct netmap_kring *kring; struct netmap_ring *ring; u_int i, want[NR_TXRX], revents = 0; NM_SELINFO_T *si[NR_TXRX]; #define want_tx want[NR_TX] #define want_rx want[NR_RX] struct mbq q; /* packets from RX hw queues to host stack */ /* * In order to avoid nested locks, we need to "double check" * txsync and rxsync if we decide to do a selrecord(). * retry_tx (and retry_rx, later) prevent looping forever. */ int retry_tx = 1, retry_rx = 1; /* Transparent mode: send_down is 1 if we have found some * packets to forward (host RX ring --> NIC) during the rx * scan and we have not sent them down to the NIC yet. * Transparent mode requires to bind all rings to a single * file descriptor. */ int send_down = 0; int sync_flags = priv->np_sync_flags; mbq_init(&q); if (unlikely(priv->np_nifp == NULL)) { return POLLERR; } mb(); /* make sure following reads are not from cache */ na = priv->np_na; if (unlikely(!nm_netmap_on(na))) return POLLERR; if (unlikely(priv->np_csb_atok_base)) { nm_prerr("Invalid poll in CSB mode"); return POLLERR; } if (netmap_debug & NM_DEBUG_ON) nm_prinf("device %s events 0x%x", na->name, events); want_tx = events & (POLLOUT | POLLWRNORM); want_rx = events & (POLLIN | POLLRDNORM); /* * If the card has more than one queue AND the file descriptor is * bound to all of them, we sleep on the "global" selinfo, otherwise * we sleep on individual selinfo (FreeBSD only allows two selinfo's * per file descriptor). * The interrupt routine in the driver wake one or the other * (or both) depending on which clients are active. * * rxsync() is only called if we run out of buffers on a POLLIN. * txsync() is called if we run out of buffers on POLLOUT, or * there are pending packets to send. The latter can be disabled * passing NETMAP_NO_TX_POLL in the NIOCREG call. */ si[NR_RX] = priv->np_si[NR_RX]; si[NR_TX] = priv->np_si[NR_TX]; #ifdef __FreeBSD__ /* * We start with a lock free round which is cheap if we have * slots available. If this fails, then lock and call the sync * routines. We can't do this on Linux, as the contract says * that we must call nm_os_selrecord() unconditionally. */ if (want_tx) { const enum txrx t = NR_TX; for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if (kring->ring->cur != kring->ring->tail) { /* Some unseen TX space is available, so what * we don't need to run txsync. */ revents |= want[t]; want[t] = 0; break; } } } if (want_rx) { const enum txrx t = NR_RX; int rxsync_needed = 0; for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { kring = NMR(na, t)[i]; if (kring->ring->cur == kring->ring->tail || kring->rhead != kring->ring->head) { /* There are no unseen packets on this ring, * or there are some buffers to be returned * to the netmap port. We therefore go ahead * and run rxsync. */ rxsync_needed = 1; break; } } if (!rxsync_needed) { revents |= want_rx; want_rx = 0; } } #endif #ifdef linux /* The selrecord must be unconditional on linux. */ nm_os_selrecord(sr, si[NR_RX]); nm_os_selrecord(sr, si[NR_TX]); #endif /* linux */ /* * If we want to push packets out (priv->np_txpoll) or * want_tx is still set, we must issue txsync calls * (on all rings, to avoid that the tx rings stall). * Fortunately, normal tx mode has np_txpoll set. */ if (priv->np_txpoll || want_tx) { /* * The first round checks if anyone is ready, if not * do a selrecord and another round to handle races. * want_tx goes to 0 if any space is found, and is * used to skip rings with no pending transmissions. */ flush_tx: for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) { int found = 0; kring = na->tx_rings[i]; ring = kring->ring; /* * Don't try to txsync this TX ring if we already found some * space in some of the TX rings (want_tx == 0) and there are no * TX slots in this ring that need to be flushed to the NIC * (head == hwcur). */ if (!send_down && !want_tx && ring->head == kring->nr_hwcur) continue; if (nm_kr_tryget(kring, 1, &revents)) continue; if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); revents |= POLLERR; } else { if (kring->nm_sync(kring, sync_flags)) revents |= POLLERR; else nm_sync_finalize(kring); } /* * If we found new slots, notify potential * listeners on the same ring. * Since we just did a txsync, look at the copies * of cur,tail in the kring. */ found = kring->rcur != kring->rtail; nm_kr_put(kring); if (found) { /* notify other listeners */ revents |= want_tx; want_tx = 0; #ifndef linux kring->nm_notify(kring, 0); #endif /* linux */ } } /* if there were any packet to forward we must have handled them by now */ send_down = 0; if (want_tx && retry_tx && sr) { #ifndef linux nm_os_selrecord(sr, si[NR_TX]); #endif /* !linux */ retry_tx = 0; goto flush_tx; } } /* * If want_rx is still set scan receive rings. * Do it on all rings because otherwise we starve. */ if (want_rx) { /* two rounds here for race avoidance */ do_retry_rx: for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) { int found = 0; kring = na->rx_rings[i]; ring = kring->ring; if (unlikely(nm_kr_tryget(kring, 1, &revents))) continue; if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); revents |= POLLERR; } /* now we can use kring->rcur, rtail */ /* * transparent mode support: collect packets from * hw rxring(s) that have been released by the user */ if (nm_may_forward_up(kring)) { netmap_grab_packets(kring, &q, netmap_fwd); } /* Clear the NR_FORWARD flag anyway, it may be set by * the nm_sync() below only on for the host RX ring (see * netmap_rxsync_from_host()). */ kring->nr_kflags &= ~NR_FORWARD; if (kring->nm_sync(kring, sync_flags)) revents |= POLLERR; else nm_sync_finalize(kring); send_down |= (kring->nr_kflags & NR_FORWARD); ring_timestamp_set(ring); found = kring->rcur != kring->rtail; nm_kr_put(kring); if (found) { revents |= want_rx; retry_rx = 0; #ifndef linux kring->nm_notify(kring, 0); #endif /* linux */ } } #ifndef linux if (retry_rx && sr) { nm_os_selrecord(sr, si[NR_RX]); } #endif /* !linux */ if (send_down || retry_rx) { retry_rx = 0; if (send_down) goto flush_tx; /* and retry_rx */ else goto do_retry_rx; } } /* * Transparent mode: released bufs (i.e. between kring->nr_hwcur and * ring->head) marked with NS_FORWARD on hw rx rings are passed up * to the host stack. */ if (mbq_peek(&q)) { netmap_send_up(na->ifp, &q); } return (revents); #undef want_tx #undef want_rx } int nma_intr_enable(struct netmap_adapter *na, int onoff) { bool changed = false; enum txrx t; int i; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[i]; int on = !(kring->nr_kflags & NKR_NOINTR); if (!!onoff != !!on) { changed = true; } if (onoff) { kring->nr_kflags &= ~NKR_NOINTR; } else { kring->nr_kflags |= NKR_NOINTR; } } } if (!changed) { return 0; /* nothing to do */ } if (!na->nm_intr) { nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable", na->name); return -1; } na->nm_intr(na, onoff); return 0; } /*-------------------- driver support routines -------------------*/ /* default notify callback */ static int netmap_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->notify_na; enum txrx t = kring->tx; nm_os_selwakeup(&kring->si); /* optimization: avoid a wake up on the global * queue if nobody has registered for more * than one ring */ if (na->si_users[t] > 0) nm_os_selwakeup(&na->si[t]); return NM_IRQ_COMPLETED; } /* called by all routines that create netmap_adapters. * provide some defaults and get a reference to the * memory allocator */ int netmap_attach_common(struct netmap_adapter *na) { if (!na->rx_buf_maxsize) { /* Set a conservative default (larger is safer). */ na->rx_buf_maxsize = PAGE_SIZE; } #ifdef __FreeBSD__ if (na->na_flags & NAF_HOST_RINGS && na->ifp) { na->if_input = na->ifp->if_input; /* for netmap_send_up */ } na->pdev = na; /* make sure netmap_mem_map() is called */ #endif /* __FreeBSD__ */ if (na->na_flags & NAF_HOST_RINGS) { if (na->num_host_rx_rings == 0) na->num_host_rx_rings = 1; if (na->num_host_tx_rings == 0) na->num_host_tx_rings = 1; } if (na->nm_krings_create == NULL) { /* we assume that we have been called by a driver, * since other port types all provide their own * nm_krings_create */ na->nm_krings_create = netmap_hw_krings_create; na->nm_krings_delete = netmap_hw_krings_delete; } if (na->nm_notify == NULL) na->nm_notify = netmap_notify; na->active_fds = 0; if (na->nm_mem == NULL) { /* use the global allocator */ na->nm_mem = netmap_mem_get(&nm_mem); } #ifdef WITH_VALE if (na->nm_bdg_attach == NULL) /* no special nm_bdg_attach callback. On VALE * attach, we need to interpose a bwrap */ na->nm_bdg_attach = netmap_default_bdg_attach; #endif return 0; } /* Wrapper for the register callback provided netmap-enabled * hardware drivers. * nm_iszombie(na) means that the driver module has been * unloaded, so we cannot call into it. * nm_os_ifnet_lock() must guarantee mutual exclusion with * module unloading. */ static int netmap_hw_reg(struct netmap_adapter *na, int onoff) { struct netmap_hw_adapter *hwna = (struct netmap_hw_adapter*)na; int error = 0; nm_os_ifnet_lock(); if (nm_iszombie(na)) { if (onoff) { error = ENXIO; } else if (na != NULL) { na->na_flags &= ~NAF_NETMAP_ON; } goto out; } error = hwna->nm_hw_register(na, onoff); out: nm_os_ifnet_unlock(); return error; } static void netmap_hw_dtor(struct netmap_adapter *na) { if (na->ifp == NULL) return; NM_DETACH_NA(na->ifp); } /* * Allocate a netmap_adapter object, and initialize it from the * 'arg' passed by the driver on attach. * We allocate a block of memory of 'size' bytes, which has room * for struct netmap_adapter plus additional room private to * the caller. * Return 0 on success, ENOMEM otherwise. */ int netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg) { struct netmap_hw_adapter *hwna = NULL; struct ifnet *ifp = NULL; if (size < sizeof(struct netmap_hw_adapter)) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("Invalid netmap adapter size %d", (int)size); return EINVAL; } if (arg == NULL || arg->ifp == NULL) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("either arg or arg->ifp is NULL"); return EINVAL; } if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) { if (netmap_debug & NM_DEBUG_ON) nm_prerr("%s: invalid rings tx %d rx %d", arg->name, arg->num_tx_rings, arg->num_rx_rings); return EINVAL; } ifp = arg->ifp; if (NM_NA_CLASH(ifp)) { /* If NA(ifp) is not null but there is no valid netmap * adapter it means that someone else is using the same * pointer (e.g. ax25_ptr on linux). This happens for * instance when also PF_RING is in use. */ nm_prerr("Error: netmap adapter hook is busy"); return EBUSY; } hwna = nm_os_malloc(size); if (hwna == NULL) goto fail; hwna->up = *arg; hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE; strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name)); if (override_reg) { hwna->nm_hw_register = hwna->up.nm_register; hwna->up.nm_register = netmap_hw_reg; } if (netmap_attach_common(&hwna->up)) { nm_os_free(hwna); goto fail; } netmap_adapter_get(&hwna->up); NM_ATTACH_NA(ifp, &hwna->up); nm_os_onattach(ifp); if (arg->nm_dtor == NULL) { hwna->up.nm_dtor = netmap_hw_dtor; } if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n", hwna->up.num_tx_rings, hwna->up.num_tx_desc, hwna->up.num_rx_rings, hwna->up.num_rx_desc); return 0; fail: nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna); return (hwna ? EINVAL : ENOMEM); } int netmap_attach(struct netmap_adapter *arg) { return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter), 1 /* override nm_reg */); } void NM_DBG(netmap_adapter_get)(struct netmap_adapter *na) { if (!na) { return; } refcount_acquire(&na->na_refcount); } /* returns 1 iff the netmap_adapter is destroyed */ int NM_DBG(netmap_adapter_put)(struct netmap_adapter *na) { if (!na) return 1; if (!refcount_release(&na->na_refcount)) return 0; if (na->nm_dtor) na->nm_dtor(na); if (na->tx_rings) { /* XXX should not happen */ if (netmap_debug & NM_DEBUG_ON) nm_prerr("freeing leftover tx_rings"); na->nm_krings_delete(na); } netmap_pipe_dealloc(na); if (na->nm_mem) netmap_mem_put(na->nm_mem); bzero(na, sizeof(*na)); nm_os_free(na); return 1; } /* nm_krings_create callback for all hardware native adapters */ int netmap_hw_krings_create(struct netmap_adapter *na) { int ret = netmap_krings_create(na, 0); if (ret == 0) { /* initialize the mbq for the sw rx ring */ u_int lim = netmap_real_rings(na, NR_RX), i; for (i = na->num_rx_rings; i < lim; i++) { mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue); } nm_prdis("initialized sw rx queue %d", na->num_rx_rings); } return ret; } /* * Called on module unload by the netmap-enabled drivers */ void netmap_detach(struct ifnet *ifp) { struct netmap_adapter *na = NA(ifp); if (!na) return; NMG_LOCK(); netmap_set_all_rings(na, NM_KR_LOCKED); /* * if the netmap adapter is not native, somebody * changed it, so we can not release it here. * The NAF_ZOMBIE flag will notify the new owner that * the driver is gone. */ if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) { na->na_flags |= NAF_ZOMBIE; } /* give active users a chance to notice that NAF_ZOMBIE has been * turned on, so that they can stop and return an error to userspace. * Note that this becomes a NOP if there are no active users and, * therefore, the put() above has deleted the na, since now NA(ifp) is * NULL. */ netmap_enable_all_rings(ifp); NMG_UNLOCK(); } /* * Intercept packets from the network stack and pass them * to netmap as incoming packets on the 'software' ring. * * We only store packets in a bounded mbq and then copy them * in the relevant rxsync routine. * * We rely on the OS to make sure that the ifp and na do not go * away (typically the caller checks for IFF_DRV_RUNNING or the like). * In nm_register() or whenever there is a reinitialization, * we make sure to make the mode change visible here. */ int netmap_transmit(struct ifnet *ifp, struct mbuf *m) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring, *tx_kring; u_int len = MBUF_LEN(m); u_int error = ENOBUFS; unsigned int txr; struct mbq *q; int busy; u_int i; i = MBUF_TXQ(m); if (i >= na->num_host_rx_rings) { i = i % na->num_host_rx_rings; } kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i]; // XXX [Linux] we do not need this lock // if we follow the down/configure/up protocol -gl // mtx_lock(&na->core_lock); if (!nm_netmap_on(na)) { nm_prerr("%s not in netmap mode anymore", na->name); error = ENXIO; goto done; } txr = MBUF_TXQ(m); if (txr >= na->num_tx_rings) { txr %= na->num_tx_rings; } tx_kring = NMR(na, NR_TX)[txr]; if (tx_kring->nr_mode == NKR_NETMAP_OFF) { return MBUF_TRANSMIT(na, ifp, m); } q = &kring->rx_queue; // XXX reconsider long packets if we handle fragments if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */ nm_prerr("%s from_host, drop packet size %d > %d", na->name, len, NETMAP_BUF_SIZE(na)); goto done; } if (!netmap_generic_hwcsum) { if (nm_os_mbuf_has_csum_offld(m)) { nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name); goto done; } } if (nm_os_mbuf_has_seg_offld(m)) { nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name); goto done; } #ifdef __FreeBSD__ ETHER_BPF_MTAP(ifp, m); #endif /* __FreeBSD__ */ /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic() * and maybe other instances of netmap_transmit (the latter * not possible on Linux). * We enqueue the mbuf only if we are sure there is going to be * enough room in the host RX ring, otherwise we drop it. */ mbq_lock(q); busy = kring->nr_hwtail - kring->nr_hwcur; if (busy < 0) busy += kring->nkr_num_slots; if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) { nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name, kring->nr_hwcur, kring->nr_hwtail, mbq_len(q)); } else { mbq_enqueue(q, m); nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q)); /* notify outside the lock */ m = NULL; error = 0; } mbq_unlock(q); done: if (m) m_freem(m); /* unconditionally wake up listeners */ kring->nm_notify(kring, 0); /* this is normally netmap_notify(), but for nics * connected to a bridge it is netmap_bwrap_intr_notify(), * that possibly forwards the frames through the switch */ return (error); } /* - * netmap_reset() is called by the driver routines when reinitializing - * a ring. The driver is in charge of locking to protect the kring. - * If native netmap mode is not set just return NULL. - * If native netmap mode is set, in particular, we have to set nr_mode to - * NKR_NETMAP_ON. + * Reset function to be called by the driver routines when reinitializing + * a hardware ring. The driver is in charge of locking to protect the kring + * while this operation is being performed. This is normally achieved by + * calling netmap_disable_all_rings() before triggering a reset. + * If the kring is not in netmap mode, return NULL to inform the caller + * that this is the case. + * If the kring is in netmap mode, set hwofs so that the netmap indices + * seen by userspace (head/cut/tail) do not change, although the internal + * NIC indices have been reset to 0. + * In any case, adjust kring->nr_mode. */ struct netmap_slot * netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur) { struct netmap_kring *kring; - int new_hwofs, lim; + u_int new_hwtail, new_hwofs; if (!nm_native_on(na)) { nm_prdis("interface not in native netmap mode"); return NULL; /* nothing to reinitialize */ } - /* XXX note- in the new scheme, we are not guaranteed to be - * under lock (e.g. when called on a device reset). - * In this case, we should set a flag and do not trust too - * much the values. In practice: TODO - * - set a RESET flag somewhere in the kring - * - do the processing in a conservative way - * - let the *sync() fixup at the end. - */ if (tx == NR_TX) { if (n >= na->num_tx_rings) return NULL; - kring = na->tx_rings[n]; - - if (kring->nr_pending_mode == NKR_NETMAP_OFF) { - kring->nr_mode = NKR_NETMAP_OFF; - return NULL; - } - - // XXX check whether we should use hwcur or rcur - new_hwofs = kring->nr_hwcur - new_cur; + /* + * Set hwofs to rhead, so that slots[rhead] is mapped to + * the NIC internal slot 0, and thus the netmap buffer + * at rhead is the next to be transmitted. Transmissions + * that were pending before the reset are considered as + * sent, so that we can have hwcur = rhead. All the slots + * are now owned by the user, so we can also reinit hwtail. + */ + new_hwofs = kring->rhead; + new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1); } else { if (n >= na->num_rx_rings) return NULL; kring = na->rx_rings[n]; - - if (kring->nr_pending_mode == NKR_NETMAP_OFF) { - kring->nr_mode = NKR_NETMAP_OFF; - return NULL; - } - - new_hwofs = kring->nr_hwtail - new_cur; + /* + * Set hwofs to hwtail, so that slots[hwtail] is mapped to + * the NIC internal slot 0, and thus the netmap buffer + * at hwtail is the next to be given to the NIC. + * Unread slots (the ones in [rhead,hwtail[) are owned by + * the user, and thus the caller cannot give them + * to the NIC right now. + */ + new_hwofs = kring->nr_hwtail; + new_hwtail = kring->nr_hwtail; } - lim = kring->nkr_num_slots - 1; - if (new_hwofs > lim) - new_hwofs -= lim + 1; - - /* Always set the new offset value and realign the ring. */ - if (netmap_debug & NM_DEBUG_ON) - nm_prinf("%s %s%d hwofs %d -> %d, hwtail %d -> %d", - na->name, - tx == NR_TX ? "TX" : "RX", n, - kring->nkr_hwofs, new_hwofs, - kring->nr_hwtail, - tx == NR_TX ? lim : kring->nr_hwtail); - kring->nkr_hwofs = new_hwofs; - if (tx == NR_TX) { - kring->nr_hwtail = kring->nr_hwcur + lim; - if (kring->nr_hwtail > lim) - kring->nr_hwtail -= lim + 1; + if (kring->nr_pending_mode == NKR_NETMAP_OFF) { + kring->nr_mode = NKR_NETMAP_OFF; + return NULL; } + if (netmap_verbose) { + nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name, + kring->nr_hwcur, kring->rhead, + kring->nr_hwtail, new_hwtail, + kring->nkr_hwofs, new_hwofs); + } + kring->nr_hwcur = kring->rhead; + kring->nr_hwtail = new_hwtail; + kring->nkr_hwofs = new_hwofs; /* * Wakeup on the individual and global selwait * We do the wakeup here, but the ring is not yet reconfigured. * However, we are under lock so there are no races. */ kring->nr_mode = NKR_NETMAP_ON; kring->nm_notify(kring, 0); return kring->ring->slot; } /* * Dispatch rx/tx interrupts to the netmap rings. * * "work_done" is non-null on the RX path, NULL for the TX path. * We rely on the OS to make sure that there is only one active * instance per queue, and that there is appropriate locking. * * The 'notify' routine depends on what the ring is attached to. * - for a netmap file descriptor, do a selwakeup on the individual * waitqueue, plus one on the global one if needed * (see netmap_notify) * - for a nic connected to a switch, call the proper forwarding routine * (see netmap_bwrap_intr_notify) */ int netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done) { struct netmap_kring *kring; enum txrx t = (work_done ? NR_RX : NR_TX); q &= NETMAP_RING_MASK; if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) { nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q); } if (q >= nma_get_nrings(na, t)) return NM_IRQ_PASS; // not a physical queue kring = NMR(na, t)[q]; if (kring->nr_mode == NKR_NETMAP_OFF) { return NM_IRQ_PASS; } if (t == NR_RX) { kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ? *work_done = 1; /* do not fire napi again */ } return kring->nm_notify(kring, 0); } /* * Default functions to handle rx/tx interrupts from a physical device. * "work_done" is non-null on the RX path, NULL for the TX path. * * If the card is not in netmap mode, simply return NM_IRQ_PASS, * so that the caller proceeds with regular processing. * Otherwise call netmap_common_irq(). * * If the card is connected to a netmap file descriptor, * do a selwakeup on the individual queue, plus one on the global one * if needed (multiqueue card _and_ there are multiqueue listeners), * and return NR_IRQ_COMPLETED. * * Finally, if called on rx from an interface connected to a switch, * calls the proper forwarding routine. */ int netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done) { struct netmap_adapter *na = NA(ifp); /* * XXX emulated netmap mode sets NAF_SKIP_INTR so * we still use the regular driver even though the previous * check fails. It is unclear whether we should use * nm_native_on() here. */ if (!nm_netmap_on(na)) return NM_IRQ_PASS; if (na->na_flags & NAF_SKIP_INTR) { nm_prdis("use regular interrupt"); return NM_IRQ_PASS; } return netmap_common_irq(na, q, work_done); } /* set/clear native flags and if_transmit/netdev_ops */ void nm_set_native_flags(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; /* We do the setup for intercepting packets only if we are the - * first user of this adapapter. */ + * first user of this adapter. */ if (na->active_fds > 0) { return; } na->na_flags |= NAF_NETMAP_ON; nm_os_onenter(ifp); nm_update_hostrings_mode(na); } void nm_clear_native_flags(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; /* We undo the setup for intercepting packets only if we are the * last user of this adapter. */ if (na->active_fds > 0) { return; } nm_update_hostrings_mode(na); nm_os_onexit(ifp); na->na_flags &= ~NAF_NETMAP_ON; } void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff) { enum txrx t; for_rx_tx(t) { int i; for (i = 0; i < netmap_real_rings(na, t); i++) { struct netmap_kring *kring = NMR(na, t)[i]; if (onoff && nm_kring_pending_on(kring)) kring->nr_mode = NKR_NETMAP_ON; else if (!onoff && nm_kring_pending_off(kring)) kring->nr_mode = NKR_NETMAP_OFF; } } } /* * Module loader and unloader * * netmap_init() creates the /dev/netmap device and initializes * all global variables. Returns 0 on success, errno on failure * (but there is no chance) * * netmap_fini() destroys everything. */ static struct cdev *netmap_dev; /* /dev/netmap character device. */ extern struct cdevsw netmap_cdevsw; void netmap_fini(void) { if (netmap_dev) destroy_dev(netmap_dev); /* we assume that there are no longer netmap users */ nm_os_ifnet_fini(); netmap_uninit_bridges(); netmap_mem_fini(); NMG_LOCK_DESTROY(); nm_prinf("netmap: unloaded module."); } int netmap_init(void) { int error; NMG_LOCK_INIT(); error = netmap_mem_init(); if (error != 0) goto fail; /* * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls * when the module is compiled in. * XXX could use make_dev_credv() to get error number */ netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "netmap"); if (!netmap_dev) goto fail; error = netmap_init_bridges(); if (error) goto fail; #ifdef __FreeBSD__ nm_os_vi_init_index(); #endif error = nm_os_ifnet_init(); if (error) goto fail; nm_prinf("netmap: loaded module"); return (0); fail: netmap_fini(); return (EINVAL); /* may be incorrect */ } diff --git a/sys/net/iflib.c b/sys/net/iflib.c index 3e115e992e4a..1653fd31a7f7 100644 --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -1,6888 +1,6919 @@ /*- * Copyright (c) 2014-2018, Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #ifdef PCI_IOV #include #endif #include /* * enable accounting of every mbuf as it comes in to and goes out of * iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* * Enable mbuf vectors for compressing long mbuf chains */ /* * NB: * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead * we prefetch needs to be determined by the time spent in m_free vis a vis * the cost of a prefetch. This will of course vary based on the workload: * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which * is quite expensive, thus suggesting very little prefetch. * - small packet forwarding which is just returning a single mbuf to * UMA will typically be very fast vis a vis the cost of a memory * access. */ /* * File organization: * - private structures * - iflib private utility functions * - ifnet functions * - vlan registry and other exported functions * - iflib public core functions * * */ MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); #define IFLIB_RXEOF_MORE (1U << 0) #define IFLIB_RXEOF_EMPTY (2U << 0) struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); static void iflib_timer(void *arg); static void iflib_tqg_detach(if_ctx_t ctx); typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; void *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { KOBJ_FIELDS; /* * Pointer to hardware driver's softc */ void *ifc_softc; device_t ifc_dev; if_t ifc_ifp; cpuset_t ifc_cpus; if_shared_ctx_t ifc_sctx; struct if_softc_ctx ifc_softc_ctx; struct sx ifc_ctx_sx; struct mtx ifc_state_mtx; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; uint32_t ifc_if_flags; uint32_t ifc_flags; uint32_t ifc_max_fl_buf_size; uint32_t ifc_rx_mbuf_sz; int ifc_link_state; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; struct if_irq ifc_legacy_irq; struct grouptask ifc_admin_task; struct grouptask ifc_vflr_task; struct iflib_filter_info ifc_filter_info; struct ifmedia ifc_media; struct sysctl_oid *ifc_sysctl_node; uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; uint16_t ifc_sysctl_rx_budget; uint16_t ifc_sysctl_tx_abdicate; uint16_t ifc_sysctl_core_offset; #define CORE_OFFSET_UNSPECIFIED 0xffff uint8_t ifc_sysctl_separate_txrx; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update #define isc_rxd_available ifc_txrx.ift_rxd_available #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; uint8_t ifc_mac[ETHER_ADDR_LEN]; }; void * iflib_get_softc(if_ctx_t ctx) { return (ctx->ifc_softc); } device_t iflib_get_dev(if_ctx_t ctx) { return (ctx->ifc_dev); } if_t iflib_get_ifp(if_ctx_t ctx) { return (ctx->ifc_ifp); } struct ifmedia * iflib_get_media(if_ctx_t ctx) { return (&ctx->ifc_media); } uint32_t iflib_get_flags(if_ctx_t ctx) { return (ctx->ifc_flags); } void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) { bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); } if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx) { return (&ctx->ifc_softc_ctx); } if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx) { return (ctx->ifc_sctx); } #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) typedef struct iflib_sw_rx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ caddr_t *ifsd_cl; /* direct cluster pointer for rx */ bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */ } iflib_rxsd_array_t; typedef struct iflib_sw_tx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ } if_txsd_vec_t; /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 /* The minimum descriptors per second before we start coalescing */ #define IFLIB_MIN_DESC_SEC 16384 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 #define IFLIB_QUEUE_IDLE 0 #define IFLIB_QUEUE_HUNG 1 #define IFLIB_QUEUE_WORKING 2 /* maximum number of txqs that can share an rx interrupt */ #define IFLIB_MAX_TX_SHARED_INTR 4 /* this should really scale with ring size - this is a fairly arbitrary value */ #define TX_BATCH_SIZE 32 #define IFLIB_RESTART_BUDGET 8 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) struct iflib_txq { qidx_t ift_in_use; qidx_t ift_cidx; qidx_t ift_cidx_processed; qidx_t ift_pidx; uint8_t ift_gen; uint8_t ift_br_offset; uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; /* implicit pad */ uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; uint64_t ift_cleaned_prev; #if MEMORY_LOGGING uint64_t ift_enqueued; uint64_t ift_dequeued; #endif uint64_t ift_no_tx_dma_setup; uint64_t ift_no_desc_avail; uint64_t ift_mbuf_defrag_failed; uint64_t ift_mbuf_defrag; uint64_t ift_map_failed; uint64_t ift_txd_encap_efbig; uint64_t ift_pullups; uint64_t ift_last_timer_tick; struct mtx ift_mtx; struct mtx ift_db_mtx; /* constant values */ if_ctx_t ift_ctx; struct ifmp_ring *ift_br; struct grouptask ift_task; qidx_t ift_size; uint16_t ift_id; struct callout ift_timer; #ifdef DEV_NETMAP struct callout ift_netmap_timer; #endif /* DEV_NETMAP */ if_txsd_vec_t ift_sds; uint8_t ift_qstatus; uint8_t ift_closed; uint8_t ift_update_freq; struct iflib_filter_info ift_filter_info; bus_dma_tag_t ift_buf_tag; bus_dma_tag_t ift_tso_buf_tag; iflib_dma_info_t ift_ifdi; #define MTX_NAME_LEN 32 char ift_mtx_name[MTX_NAME_LEN]; bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ift_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); struct iflib_fl { qidx_t ifl_cidx; qidx_t ifl_pidx; qidx_t ifl_credits; uint8_t ifl_gen; uint8_t ifl_rxd_size; #if MEMORY_LOGGING uint64_t ifl_m_enqueued; uint64_t ifl_m_dequeued; uint64_t ifl_cl_enqueued; uint64_t ifl_cl_dequeued; #endif /* implicit pad */ bitstr_t *ifl_rx_bitmap; qidx_t ifl_fragidx; /* constant */ qidx_t ifl_size; uint16_t ifl_buf_size; uint16_t ifl_cltype; uma_zone_t ifl_zone; iflib_rxsd_array_t ifl_sds; iflib_rxq_t ifl_rxq; uint8_t ifl_id; bus_dma_tag_t ifl_buf_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline qidx_t get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) { qidx_t used; if (pidx > cidx) used = pidx - cidx; else if (pidx < cidx) used = size - cidx + pidx; else if (gen == 0 && pidx == cidx) used = 0; else if (gen == 1 && pidx == cidx) used = size; else panic("bad state"); return (used); } #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) struct iflib_rxq { if_ctx_t ifr_ctx; iflib_fl_t ifr_fl; uint64_t ifr_rx_irq; /* * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is * the completion queue consumer index. Otherwise it's unused. */ qidx_t ifr_cq_cidx; uint16_t ifr_id; uint8_t ifr_nfl; uint8_t ifr_ntxqirq; uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; uint8_t ifr_fl_offset; struct lro_ctrl ifr_lc; struct grouptask ifr_task; struct callout ifr_watchdog; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; /* dynamically allocate if any drivers need a value substantially larger than this */ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ifr_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); typedef struct if_rxsd { caddr_t *ifsd_cl; struct mbuf **ifsd_m; iflib_fl_t ifsd_fl; } *if_rxsd_t; /* multiple of word size */ #ifdef __LP64__ #define PKT_INFO_SIZE 6 #define RXD_INFO_SIZE 5 #define PKT_TYPE uint64_t #else #define PKT_INFO_SIZE 11 #define RXD_INFO_SIZE 8 #define PKT_TYPE uint32_t #endif #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) typedef struct if_pkt_info_pad { PKT_TYPE pkt_val[PKT_INFO_SIZE]; } *if_pkt_info_pad_t; typedef struct if_rxd_info_pad { PKT_TYPE rxd_val[RXD_INFO_SIZE]; } *if_rxd_info_pad_t; CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); static inline void pkt_info_zero(if_pkt_info_t pi) { if_pkt_info_pad_t pi_pad; pi_pad = (if_pkt_info_pad_t)pi; pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; #ifndef __LP64__ pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; #endif } static device_method_t iflib_pseudo_methods[] = { DEVMETHOD(device_attach, noop_attach), DEVMETHOD(device_detach, iflib_pseudo_detach), DEVMETHOD_END }; driver_t iflib_pseudodriver = { "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), }; static inline void rxd_info_zero(if_rxd_info_t ri) { if_rxd_info_pad_t ri_pad; int i; ri_pad = (if_rxd_info_pad_t)ri; for (i = 0; i < RXD_LOOP_BOUND; i += 4) { ri_pad->rxd_val[i] = 0; ri_pad->rxd_val[i+1] = 0; ri_pad->rxd_val[i+2] = 0; ri_pad->rxd_val[i+3] = 0; } #ifdef __LP64__ ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; #endif } /* * Only allow a single packet to take up most 1/nth of the tx ring */ #define MAX_SINGLE_PACKET_FRACTION 12 #define IF_BAD_DMA (bus_addr_t)-1 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) void iflib_set_detach(if_ctx_t ctx) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_IN_DETACH; STATE_UNLOCK(ctx); } /* Our boot-time initialization hook */ static int iflib_module_event_handler(module_t, int, void *); static moduledata_t iflib_moduledata = { "iflib", iflib_module_event_handler, NULL }; DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(iflib, 1); MODULE_DEPEND(iflib, pci, 1, 1, 1); MODULE_DEPEND(iflib, ether, 1, 1, 1); TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); TASKQGROUP_DEFINE(if_config_tqg, 1, 1); #ifndef IFLIB_DEBUG_COUNTERS #ifdef INVARIANTS #define IFLIB_DEBUG_COUNTERS 1 #else #define IFLIB_DEBUG_COUNTERS 0 #endif /* !INVARIANTS */ #endif static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, "iflib driver parameters"); /* * XXX need to ensure that this can't accidentally cause the head to be moved backwards */ static int iflib_min_tx_latency = 0; SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); static int iflib_no_tx_batch = 0; SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); #if IFLIB_DEBUG_COUNTERS static int iflib_tx_seen; static int iflib_tx_sent; static int iflib_tx_encap; static int iflib_rx_allocs; static int iflib_fl_refills; static int iflib_fl_refills_large; static int iflib_tx_frees; SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0, "# TX mbufs seen"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0, "# TX mbufs sent"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0, "# TX mbufs encapped"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0, "# TX frees"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0, "# RX allocations"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0, "# refills"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, &iflib_fl_refills_large, 0, "# large refills"); static int iflib_txq_drain_flushing; static int iflib_txq_drain_oactive; static int iflib_txq_drain_notready; SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, &iflib_txq_drain_flushing, 0, "# drain flushes"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, &iflib_txq_drain_oactive, 0, "# drain oactives"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, &iflib_txq_drain_notready, 0, "# drain notready"); static int iflib_encap_load_mbuf_fail; static int iflib_encap_pad_mbuf_fail; static int iflib_encap_txq_avail_fail; static int iflib_encap_txd_encap_fail; SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); static int iflib_task_fn_rxs; static int iflib_rx_intr_enables; static int iflib_fast_intrs; static int iflib_rx_unavail; static int iflib_rx_ctx_inactive; static int iflib_rx_if_input; static int iflib_rx_mbuf_null; static int iflib_rxd_flush; static int iflib_verbose_debug; SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, &iflib_rx_intr_enables, 0, "# RX intr enables"); SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0, "# fast_intr calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0, "# times rxeof called with no available data"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input, 0, "# times rxeof called if_input"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0, "# times rxd_flush called"); SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, &iflib_verbose_debug, 0, "enable verbose debugging"); #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) static void iflib_debug_reset(void) { iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = iflib_txq_drain_flushing = iflib_txq_drain_oactive = iflib_txq_drain_notready = iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = iflib_rx_unavail = iflib_rx_ctx_inactive = iflib_rx_if_input = iflib_rx_mbuf_null = iflib_rxd_flush = 0; } #else #define DBG_COUNTER_INC(name) static void iflib_debug_reset(void) {} #endif #define IFLIB_DEBUG 0 static void iflib_tx_structures_free(if_ctx_t ctx); static void iflib_rx_structures_free(if_ctx_t ctx); static int iflib_queues_alloc(if_ctx_t ctx); static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); static int iflib_qset_structures_setup(if_ctx_t ctx); static int iflib_msix_init(if_ctx_t ctx); static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); static void iflib_txq_check_drain(iflib_txq_t txq, int budget); static uint32_t iflib_txq_can_drain(struct ifmp_ring *); #ifdef ALTQ static void iflib_altq_if_start(if_t ifp); static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); #endif static int iflib_register(if_ctx_t); static void iflib_deregister(if_ctx_t); static void iflib_unregister_vlan_handlers(if_ctx_t ctx); static uint16_t iflib_get_mbuf_size_for(unsigned int size); static void iflib_init_locked(if_ctx_t ctx); static void iflib_add_device_sysctl_pre(if_ctx_t ctx); static void iflib_add_device_sysctl_post(if_ctx_t ctx); static void iflib_ifmp_purge(iflib_txq_t txq); static void _iflib_pre_assert(if_softc_ctx_t scctx); static void iflib_if_init_locked(if_ctx_t ctx); static void iflib_free_intr_mem(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m); #endif static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets = SLIST_HEAD_INITIALIZER(cpu_offsets); struct cpu_offset { SLIST_ENTRY(cpu_offset) entries; cpuset_t set; unsigned int refcount; uint16_t offset; }; static struct mtx cpu_offset_mtx; MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock", MTX_DEF); NETDUMP_DEFINE(iflib); static int iflib_num_rx_descs(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; return scctx->isc_nrxd[first_rxq]; } static int iflib_num_tx_descs(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; return scctx->isc_ntxd[first_txq]; } #ifdef DEV_NETMAP #include #include #include MODULE_DEPEND(iflib, netmap, 1, 1, 1); static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init); static void iflib_netmap_timer(void *arg); /* * device-specific sysctl variables: * * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * iflib_rx_miss, iflib_rx_miss_bufs: * count packets that might be missed due to lost interrupts. */ SYSCTL_DECL(_dev_netmap); /* * The xl driver by default strips CRCs and we do not override it. */ int iflib_crcstrip = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames"); int iflib_rx_miss, iflib_rx_miss_bufs; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs"); /* * Register/unregister. We are already under netmap lock. * Only called on the first register or the last unregister. */ static int iflib_netmap_register(struct netmap_adapter *na, int onoff) { if_t ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; int status; CTX_LOCK(ctx); - IFDI_INTR_DISABLE(ctx); - - /* Tell the stack that the interface is no longer active */ - ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - if (!CTX_IS_VF(ctx)) IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); iflib_stop(ctx); /* * Enable (or disable) netmap flags, and intercept (or restore) * ifp->if_transmit. This is done once the device has been stopped - * to prevent race conditions. + * to prevent race conditions. Also, this must be done after + * calling netmap_disable_all_rings() and before calling + * netmap_enable_all_rings(), so that these two functions see the + * updated state of the NAF_NETMAP_ON bit. */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } iflib_init_locked(ctx); IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; if (status) nm_clear_native_flags(na); CTX_UNLOCK(ctx); return (status); } static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; - u_int nm_i = kring->nr_hwcur; struct netmap_ring *ring = kring->ring; bus_dmamap_t *map; struct if_rxd_update iru; if_ctx_t ctx = rxq->ifr_ctx; iflib_fl_t fl = &rxq->ifr_fl[0]; u_int nic_i_first, nic_i; + u_int nm_i; int i, n; #if IFLIB_DEBUG_COUNTERS int rf_count = 0; #endif /* * This function is used both at initialization and in rxsync. * At initialization we need to prepare (with isc_rxd_refill()) - * all the (N) netmap buffers in the ring, in such a way to keep - * fl->ifl_pidx and kring->nr_hwcur in sync (except for - * kring->nkr_hwofs); at rxsync time, both indexes point to the - * next buffer to be refilled. + * all the netmap buffers currently owned by the kernel, in + * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync + * (except for kring->nkr_hwofs). These may be less than + * kring->nkr_num_slots if netmap_reset() was called while + * an application using the kring that still owned some + * buffers. + * At rxsync time, both indexes point to the next buffer to be + * refilled. * In any case we publish (with isc_rxd_flush()) up to * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod * pointer to overrun the head/cons pointer, although this is * not necessary for some NICs (e.g. vmx). */ - if (__predict_false(init)) - n = kring->nkr_num_slots; - else { - n = kring->rhead - nm_i; + if (__predict_false(init)) { + n = kring->nkr_num_slots - nm_kr_rxspace(kring); + } else { + n = kring->rhead - kring->nr_hwcur; if (n == 0) return (0); /* Nothing to do. */ if (n < 0) n += kring->nkr_num_slots; } - /* Start to refill from nr_hwcur, publishing n buffers. */ iru_init(&iru, rxq, 0 /* flid */); map = fl->ifl_sds.ifsd_map; nic_i = fl->ifl_pidx; - MPASS(nic_i == netmap_idx_k2n(kring, nm_i)); + nm_i = netmap_idx_n2k(kring, nic_i); + if (__predict_false(init)) { + /* + * On init/reset, nic_i must be 0, and we must + * start to refill from hwtail (see netmap_reset()). + */ + MPASS(nic_i == 0); + MPASS(nm_i == kring->nr_hwtail); + } else + MPASS(nm_i == kring->nr_hwcur); DBG_COUNTER_INC(fl_refills); while (n > 0) { #if IFLIB_DEBUG_COUNTERS if (++rf_count == 9) DBG_COUNTER_INC(fl_refills_large); #endif nic_i_first = nic_i; for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[i]); MPASS(i < IFLIB_MAX_RX_REFRESH); if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ return netmap_ring_reinit(kring); fl->ifl_rxd_idxs[i] = nic_i; if (__predict_false(init)) { netmap_load_map(na, fl->ifl_buf_tag, map[nic_i], addr); } else if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, fl->ifl_buf_tag, map[nic_i], addr); } bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i], BUS_DMASYNC_PREREAD); slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } iru.iru_pidx = nic_i_first; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); } fl->ifl_pidx = nic_i; - MPASS(!init || nm_i == 0); + /* + * At the end of the loop we must have refilled everything + * we could possibly refill. + */ MPASS(nm_i == kring->rhead); kring->nr_hwcur = nm_i; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nm_prev(nic_i, lim)); DBG_COUNTER_INC(rxd_flush); return (0); } #define NETMAP_TX_TIMER_US 90 /* * Reconcile kernel and user view of the transmit ring. * * All information is in the kring. * Userspace wants to send packets up to the one before kring->rhead, * kernel knows kring->nr_hwcur is the first unsent packet. * * Here we push packets out (as many as possible), and possibly * reclaim buffers from previously completed transmission. * * The caller (netmap) guarantees that there is only one instance * running at any time. Any interference with other driver * methods should be handled by the individual drivers. */ static int iflib_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; if_t ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap kring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct if_pkt_info pi; /* * interrupts on every tx packet are expensive so request * them every half ring, or where NS_REPORT is set */ u_int report_frequency = kring->nkr_num_slots >> 1; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: process new packets to send. * nm_i is the current index in the netmap kring, * nic_i is the corresponding index in the NIC ring. * * If we have packets to send (nm_i != head) * iterate over the netmap ring, fetch length and update * the corresponding slot in the NIC ring. Some drivers also * need to update the buffer's physical address in the NIC slot * even NS_BUF_CHANGED is not set (PNMB computes the addresses). * * The netmap_reload_map() calls is especially expensive, * even when (as in this case) the tag is 0, so do only * when the buffer has actually changed. * * If possible do not set the report/intr bit on all slots, * but only a few times per ring or when NS_REPORT is set. * * Finally, on 10G and faster drivers, it might be useful * to prefetch the next slot and txr entry. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ pkt_info_zero(&pi); pi.ipi_segs = txq->ift_segs; pi.ipi_qsidx = kring->ring_id; nic_i = netmap_idx_k2n(kring, nm_i); __builtin_prefetch(&ring->slot[nm_i]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? IPI_TX_INTR : 0; /* device-specific */ pi.ipi_len = len; pi.ipi_segs[0].ds_addr = paddr; pi.ipi_segs[0].ds_len = len; pi.ipi_nsegs = 1; pi.ipi_ndescs = 0; pi.ipi_pidx = nic_i; pi.ipi_flags = flags; /* Fill the slot in the NIC ring. */ ctx->isc_txd_encap(ctx->ifc_softc, &pi); DBG_COUNTER_INC(tx_encap); /* prefetch for next round */ __builtin_prefetch(&ring->slot[nm_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); NM_CHECK_ADDR_LEN(na, addr, len); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[nic_i], addr); } /* make sure changes to the buffer are synced */ bus_dmamap_sync(txq->ift_buf_tag, txq->ift_sds.ifsd_map[nic_i], BUS_DMASYNC_PREWRITE); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = nm_i; /* synchronize the NIC ring */ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the tx unit up to slot nic_i (excluded) */ ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); } /* * Second part: reclaim buffers for completed transmissions. * * If there are unclaimed buffers, attempt to reclaim them. * If we don't manage to reclaim them all, and TX IRQs are not in use, * trigger a per-tx-queue timer to try again later. */ if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { if (iflib_tx_credits_update(ctx, txq)) { /* some tx completed, increment avail */ nic_i = txq->ift_cidx_processed; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } } if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { callout_reset_sbt_on(&txq->ift_netmap_timer, NETMAP_TX_TIMER_US * SBT_1US, SBT_1US, iflib_netmap_timer, txq, txq->ift_netmap_timer.c_cpu, 0); } return (0); } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient. * The caller guarantees a single invocations, but races against * the rest of the driver should be handled here. * * On call, kring->rhead is the first packet that userspace wants * to keep, and kring->rcur is the wakeup point. * The kernel has previously reported packets up to kring->rtail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int iflib_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; if_t ifp = na->ifp; uint32_t nm_i; /* index into the netmap ring */ uint32_t nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; if_ctx_t ctx = ifp->if_softc; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; iflib_fl_t fl = &rxq->ifr_fl[0]; struct if_rxd_info ri; qidx_t *cidxp; /* * netmap only uses free list 0, to avoid out of order consumption * of receive buffers */ bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: import newly received packets. * * nm_i is the index of the next free slot in the netmap ring, * nic_i is the index of the next received packet in the NIC ring * (or in the free list 0 if IFLIB_HAS_RXCQ is set), and they may * differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * nic_i = fl->ifl_cidx; * nm_i = kring->nr_hwtail (previous) * and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size * * fl->ifl_cidx is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ; int crclen = iflib_crcstrip ? 0 : 4; int error, avail; /* * For the free list consumer index, we use the same * logic as in iflib_rxeof(). */ if (have_rxcq) cidxp = &rxq->ifr_cq_cidx; else cidxp = &fl->ifl_cidx; avail = ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, *cidxp, USHRT_MAX); nic_i = fl->ifl_cidx; nm_i = netmap_idx_n2k(kring, nic_i); MPASS(nm_i == kring->nr_hwtail); for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) { rxd_info_zero(&ri); ri.iri_frags = rxq->ifr_frags; ri.iri_qsidx = kring->ring_id; ri.iri_ifp = ctx->ifc_ifp; ri.iri_cidx = *cidxp; error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; ring->slot[nm_i].flags = 0; if (have_rxcq) { *cidxp = ri.iri_cidx; while (*cidxp >= scctx->isc_nrxd[0]) *cidxp -= scctx->isc_nrxd[0]; } bus_dmamap_sync(fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); nm_i = nm_next(nm_i, lim); fl->ifl_cidx = nic_i = nm_next(nic_i, lim); } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ iflib_rx_miss ++; iflib_rx_miss_bufs += n; } kring->nr_hwtail = nm_i; } kring->nr_kflags &= ~NKR_PENDINTR; } /* * Second part: skip past packets that userspace has released. * (kring->nr_hwcur to head excluded), * and make the buffers available for reception. * As usual nm_i is the index in the netmap ring, * nic_i is the index in the NIC ring, and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size */ netmap_fl_refill(rxq, kring, false); return (0); } static void iflib_netmap_intr(struct netmap_adapter *na, int onoff) { if_ctx_t ctx = na->ifp->if_softc; CTX_LOCK(ctx); if (onoff) { IFDI_INTR_ENABLE(ctx); } else { IFDI_INTR_DISABLE(ctx); } CTX_UNLOCK(ctx); } static int iflib_netmap_attach(if_ctx_t ctx) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = ctx->ifc_ifp; na.na_flags = NAF_BDG_MAYSLEEP; MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); na.num_tx_desc = iflib_num_tx_descs(ctx); na.num_rx_desc = iflib_num_rx_descs(ctx); na.nm_txsync = iflib_netmap_txsync; na.nm_rxsync = iflib_netmap_rxsync; na.nm_register = iflib_netmap_register; na.nm_intr = iflib_netmap_intr; na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; return (netmap_attach(&na)); } static int iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, txq->ift_id, 0); if (slot == NULL) return (0); for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); } return (1); } static int iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_kring *kring; struct netmap_slot *slot; slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); if (slot == NULL) return (0); kring = na->rx_rings[rxq->ifr_id]; netmap_fl_refill(rxq, kring, true); return (1); } static void iflib_netmap_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; /* * Wake up the netmap application, to give it a chance to * call txsync and reclaim more completed TX buffers. */ netmap_tx_irq(ctx->ifc_ifp, txq->ift_id); } #define iflib_netmap_detach(ifp) netmap_detach(ifp) #else #define iflib_netmap_txq_init(ctx, txq) (0) #define iflib_netmap_rxq_init(ctx, rxq) (0) #define iflib_netmap_detach(ifp) +#define netmap_enable_all_rings(ifp) +#define netmap_disable_all_rings(ifp) #define iflib_netmap_attach(ctx) (0) #define netmap_rx_irq(ifp, qid, budget) (0) #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } static __inline void prefetch2cachelines(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); #if (CACHE_LINE_SIZE < 128) __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); #endif } #else #define prefetch(x) #define prefetch2cachelines(x) #endif static void iflib_gen_mac(if_ctx_t ctx) { struct thread *td; MD5_CTX mdctx; char uuid[HOSTUUIDLEN+1]; char buf[HOSTUUIDLEN+16]; uint8_t *mac; unsigned char digest[16]; td = curthread; mac = ctx->ifc_mac; uuid[HOSTUUIDLEN] = 0; bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN); snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev)); /* * Generate a pseudo-random, deterministic MAC * address based on the UUID and unit number. * The FreeBSD Foundation OUI of 58-9C-FC is used. */ MD5Init(&mdctx); MD5Update(&mdctx, buf, strlen(buf)); MD5Final(digest, &mdctx); mac[0] = 0x58; mac[1] = 0x9C; mac[2] = 0xFC; mac[3] = digest[0]; mac[4] = digest[1]; mac[5] = digest[2]; } static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) { iflib_fl_t fl; fl = &rxq->ifr_fl[flid]; iru->iru_paddrs = fl->ifl_bus_addrs; iru->iru_idxs = fl->ifl_rxd_idxs; iru->iru_qsidx = rxq->ifr_id; iru->iru_buf_size = fl->ifl_buf_size; iru->iru_flidx = fl->ifl_id; } static void _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { if (err) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags) { int err; device_t dev = ctx->ifc_dev; err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->idi_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", __func__, err); goto fail_0; } err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); if (err) { device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, err); goto fail_1; } dma->idi_paddr = IF_BAD_DMA; err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); if (err || dma->idi_paddr == IF_BAD_DMA) { device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); goto fail_2; } dma->idi_size = size; return (0); fail_2: bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); fail_1: bus_dma_tag_destroy(dma->idi_tag); fail_0: dma->idi_tag = NULL; return (err); } int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) { if_shared_ctx_t sctx = ctx->ifc_sctx; KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); } int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { int i, err; iflib_dma_info_t *dmaiter; dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) { if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) break; } if (err) iflib_dma_free_multi(dmalist, i); return (err); } void iflib_dma_free(iflib_dma_info_t dma) { if (dma->idi_tag == NULL) return; if (dma->idi_paddr != IF_BAD_DMA) { bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->idi_tag, dma->idi_map); dma->idi_paddr = IF_BAD_DMA; } if (dma->idi_vaddr != NULL) { bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); dma->idi_vaddr = NULL; } bus_dma_tag_destroy(dma->idi_tag); dma->idi_tag = NULL; } void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) { int i; iflib_dma_info_t *dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) iflib_dma_free(*dmaiter); } #ifdef EARLY_AP_STARTUP static const int iflib_started = 1; #else /* * We used to abuse the smp_started flag to decide if the queues have been * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). * That gave bad races, since the SYSINIT() runs strictly after smp_started * is set. Run a SYSINIT() strictly after that to just set a usable * completion flag. */ static int iflib_started; static void iflib_record_started(void *arg) { iflib_started = 1; } SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, iflib_record_started, NULL); #endif static int iflib_fast_intr(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; int result; if (!iflib_started) return (FILTER_STRAY); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL) { result = info->ifi_filter(info->ifi_filter_arg); if ((result & FILTER_SCHEDULE_THREAD) == 0) return (result); } GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int iflib_fast_intr_rxtx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if_ctx_t ctx; iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; iflib_txq_t txq; void *sc; int i, cidx, result; qidx_t txqid; bool intr_enable, intr_legacy; if (!iflib_started) return (FILTER_STRAY); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL) { result = info->ifi_filter(info->ifi_filter_arg); if ((result & FILTER_SCHEDULE_THREAD) == 0) return (result); } ctx = rxq->ifr_ctx; sc = ctx->ifc_softc; intr_enable = false; intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY); MPASS(rxq->ifr_ntxqirq); for (i = 0; i < rxq->ifr_ntxqirq; i++) { txqid = rxq->ifr_txqid[i]; txq = &ctx->ifc_txqs[txqid]; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if (!ctx->isc_txd_credits_update(sc, txqid, false)) { if (intr_legacy) intr_enable = true; else IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); continue; } GROUPTASK_ENQUEUE(&txq->ift_task); } if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) cidx = rxq->ifr_cq_cidx; else cidx = rxq->ifr_fl[0].ifl_cidx; if (iflib_rxd_avail(ctx, rxq, cidx, 1)) GROUPTASK_ENQUEUE(gtask); else { if (intr_legacy) intr_enable = true; else IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } if (intr_enable) IFDI_INTR_ENABLE(ctx); return (FILTER_HANDLED); } static int iflib_fast_intr_ctx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; int result; if (!iflib_started) return (FILTER_STRAY); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL) { result = info->ifi_filter(info->ifi_filter_arg); if ((result & FILTER_SCHEDULE_THREAD) == 0) return (result); } GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, driver_intr_t handler, void *arg, const char *name) { int rc, flags; struct resource *res; void *tag = NULL; device_t dev = ctx->ifc_dev; flags = RF_ACTIVE; if (ctx->ifc_flags & IFC_LEGACY) flags |= RF_SHAREABLE; MPASS(rid < 512); irq->ii_rid = rid; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); if (res == NULL) { device_printf(dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } irq->ii_res = res; KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, filter, handler, arg, &tag); if (rc != 0) { device_printf(dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name ? name : "unknown", rc); return (rc); } else if (name) bus_describe_intr(dev, res, tag, "%s", name); irq->ii_tag = tag; return (0); } /********************************************************************* * * Allocate DMA resources for TX buffers as well as memory for the TX * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a * iflib_sw_tx_desc_array structure, storing all the information that * is needed to transmit a packet on the wire. This is called only * once at attach, setup is done every reset. * **********************************************************************/ static int iflib_txsd_alloc(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; bus_size_t tsomaxsize; int err, nsegments, ntsosegments; bool tso; nsegments = scctx->isc_tx_nsegments; ntsosegments = scctx->isc_tx_tso_segments_max; tsomaxsize = scctx->isc_tx_tso_size_max; if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) tsomaxsize += sizeof(struct ether_vlan_header); MPASS(scctx->isc_ntxd[0] > 0); MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); MPASS(nsegments > 0); if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { MPASS(ntsosegments > 0); MPASS(sctx->isc_tso_maxsize >= tsomaxsize); } /* * Set up DMA tags for TX buffers. */ if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_tx_maxsize, /* maxsize */ nsegments, /* nsegments */ sctx->isc_tx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_buf_tag))) { device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); goto fail; } tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0; if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tsomaxsize, /* maxsize */ ntsosegments, /* nsegments */ sctx->isc_tso_maxsegsize,/* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_tso_buf_tag))) { device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n", err); goto fail; } /* Allocate memory for the TX mbuf map. */ if (!(txq->ift_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX mbuf map memory\n"); err = ENOMEM; goto fail; } /* * Create the DMA maps for TX buffers. */ if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc( sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TX buffer DMA map memory\n"); err = ENOMEM; goto fail; } if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc( sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TSO TX buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { err = bus_dmamap_create(txq->ift_buf_tag, 0, &txq->ift_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } if (!tso) continue; err = bus_dmamap_create(txq->ift_tso_buf_tag, 0, &txq->ift_sds.ifsd_tso_map[i]); if (err != 0) { device_printf(dev, "Unable to create TSO TX DMA map\n"); goto fail; } } return (0); fail: /* We free all, it handles case where we are in the middle */ iflib_tx_structures_free(ctx); return (err); } static void iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) { bus_dmamap_t map; if (txq->ift_sds.ifsd_map != NULL) { map = txq->ift_sds.ifsd_map[i]; bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_buf_tag, map); bus_dmamap_destroy(txq->ift_buf_tag, map); txq->ift_sds.ifsd_map[i] = NULL; } if (txq->ift_sds.ifsd_tso_map != NULL) { map = txq->ift_sds.ifsd_tso_map[i]; bus_dmamap_sync(txq->ift_tso_buf_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_buf_tag, map); bus_dmamap_destroy(txq->ift_tso_buf_tag, map); txq->ift_sds.ifsd_tso_map[i] = NULL; } } static void iflib_txq_destroy(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; for (int i = 0; i < txq->ift_size; i++) iflib_txsd_destroy(ctx, txq, i); if (txq->ift_br != NULL) { ifmp_ring_free(txq->ift_br); txq->ift_br = NULL; } mtx_destroy(&txq->ift_mtx); if (txq->ift_sds.ifsd_map != NULL) { free(txq->ift_sds.ifsd_map, M_IFLIB); txq->ift_sds.ifsd_map = NULL; } if (txq->ift_sds.ifsd_tso_map != NULL) { free(txq->ift_sds.ifsd_tso_map, M_IFLIB); txq->ift_sds.ifsd_tso_map = NULL; } if (txq->ift_sds.ifsd_m != NULL) { free(txq->ift_sds.ifsd_m, M_IFLIB); txq->ift_sds.ifsd_m = NULL; } if (txq->ift_buf_tag != NULL) { bus_dma_tag_destroy(txq->ift_buf_tag); txq->ift_buf_tag = NULL; } if (txq->ift_tso_buf_tag != NULL) { bus_dma_tag_destroy(txq->ift_tso_buf_tag); txq->ift_tso_buf_tag = NULL; } if (txq->ift_ifdi != NULL) { free(txq->ift_ifdi, M_IFLIB); } } static void iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) { struct mbuf **mp; mp = &txq->ift_sds.ifsd_m[i]; if (*mp == NULL) return; if (txq->ift_sds.ifsd_map != NULL) { bus_dmamap_sync(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]); } if (txq->ift_sds.ifsd_tso_map != NULL) { bus_dmamap_sync(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[i]); } m_freem(*mp); DBG_COUNTER_INC(tx_frees); *mp = NULL; } static int iflib_txq_setup(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; iflib_dma_info_t di; int i; /* Set number of descriptors available */ txq->ift_qstatus = IFLIB_QUEUE_IDLE; /* XXX make configurable */ txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; /* Reset indices */ txq->ift_cidx_processed = 0; txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) bzero((void *)di->idi_vaddr, di->idi_size); IFDI_TXQ_SETUP(ctx, txq->ift_id); for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Allocate DMA resources for RX buffers as well as memory for the RX * mbuf map, direct RX cluster pointer map and RX cluster bus address * map. RX DMA map, RX mbuf map, direct RX cluster pointer map and * RX cluster map are kept in a iflib_sw_rx_desc_array structure. * Since we use use one entry in iflib_sw_rx_desc_array per received * packet, the maximum number of entries we'll need is equal to the * number of hardware receive descriptors that we've allocated. * **********************************************************************/ static int iflib_rxsd_alloc(iflib_rxq_t rxq) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; iflib_fl_t fl; int err; MPASS(scctx->isc_nrxd[0] > 0); MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); fl = rxq->ifr_fl; for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ /* Set up DMA tag for RX buffers. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_rx_maxsize, /* maxsize */ sctx->isc_rx_nsegments, /* nsegments */ sctx->isc_rx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &fl->ifl_buf_tag); if (err) { device_printf(dev, "Unable to allocate RX DMA tag: %d\n", err); goto fail; } /* Allocate memory for the RX mbuf map. */ if (!(fl->ifl_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX mbuf map memory\n"); err = ENOMEM; goto fail; } /* Allocate memory for the direct RX cluster pointer map. */ if (!(fl->ifl_sds.ifsd_cl = (caddr_t *) malloc(sizeof(caddr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX cluster map memory\n"); err = ENOMEM; goto fail; } /* Allocate memory for the RX cluster bus address map. */ if (!(fl->ifl_sds.ifsd_ba = (bus_addr_t *) malloc(sizeof(bus_addr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX bus address map memory\n"); err = ENOMEM; goto fail; } /* * Create the DMA maps for RX buffers. */ if (!(fl->ifl_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX buffer DMA map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { err = bus_dmamap_create(fl->ifl_buf_tag, 0, &fl->ifl_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create RX buffer DMA map\n"); goto fail; } } } return (0); fail: iflib_rx_structures_free(ctx); return (err); } /* * Internal service routines */ struct rxq_refill_cb_arg { int error; bus_dma_segment_t seg; int nseg; }; static void _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rxq_refill_cb_arg *cb_arg = arg; cb_arg->error = error; cb_arg->seg = segs[0]; cb_arg->nseg = nseg; } /** * iflib_fl_refill - refill an rxq free-buffer list * @ctx: the iflib context * @fl: the free list to refill * @count: the number of new buffers to allocate * * (Re)populate an rxq free-buffer list with up to @count new packet buffers. * The caller must assure that @count does not exceed the queue's capacity * minus one (since we always leave a descriptor unavailable). */ static uint8_t iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct if_rxd_update iru; struct rxq_refill_cb_arg cb_arg; struct mbuf *m; caddr_t cl, *sd_cl; struct mbuf **sd_m; bus_dmamap_t *sd_map; bus_addr_t bus_addr, *sd_ba; int err, frag_idx, i, idx, n, pidx; qidx_t credits; MPASS(count <= fl->ifl_size - fl->ifl_credits - 1); sd_m = fl->ifl_sds.ifsd_m; sd_map = fl->ifl_sds.ifsd_map; sd_cl = fl->ifl_sds.ifsd_cl; sd_ba = fl->ifl_sds.ifsd_ba; pidx = fl->ifl_pidx; idx = pidx; frag_idx = fl->ifl_fragidx; credits = fl->ifl_credits; i = 0; n = count; MPASS(n > 0); MPASS(credits + n <= fl->ifl_size); if (pidx < fl->ifl_cidx) MPASS(pidx + n <= fl->ifl_cidx); if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) MPASS(fl->ifl_gen == 0); if (pidx > fl->ifl_cidx) MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); DBG_COUNTER_INC(fl_refills); if (n > 8) DBG_COUNTER_INC(fl_refills_large); iru_init(&iru, fl->ifl_rxq, fl->ifl_id); while (n-- > 0) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * * If the cluster is still set then we know a minimum sized * packet was received */ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); if (frag_idx < 0) bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); MPASS(frag_idx >= 0); if ((cl = sd_cl[frag_idx]) == NULL) { cl = uma_zalloc(fl->ifl_zone, M_NOWAIT); if (__predict_false(cl == NULL)) break; cb_arg.error = 0; MPASS(sd_map != NULL); err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx], cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, BUS_DMA_NOWAIT); if (__predict_false(err != 0 || cb_arg.error)) { uma_zfree(fl->ifl_zone, cl); break; } sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; sd_cl[frag_idx] = cl; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; #endif } else { bus_addr = sd_ba[frag_idx]; } bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx], BUS_DMASYNC_PREREAD); MPASS(sd_m[frag_idx] == NULL); m = m_gethdr(M_NOWAIT, MT_NOINIT); if (__predict_false(m == NULL)) break; sd_m[frag_idx] = m; bit_set(fl->ifl_rx_bitmap, frag_idx); #if MEMORY_LOGGING fl->ifl_m_enqueued++; #endif DBG_COUNTER_INC(rx_allocs); fl->ifl_rxd_idxs[i] = frag_idx; fl->ifl_bus_addrs[i] = bus_addr; credits++; i++; MPASS(credits <= fl->ifl_size); if (++idx == fl->ifl_size) { #ifdef INVARIANTS fl->ifl_gen = 1; #endif idx = 0; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); fl->ifl_pidx = idx; fl->ifl_credits = credits; pidx = idx; i = 0; } } if (n < count - 1) { if (i != 0) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); fl->ifl_pidx = idx; fl->ifl_credits = credits; } DBG_COUNTER_INC(rxd_flush); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, fl->ifl_pidx); if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) { fl->ifl_fragidx = frag_idx + 1; if (fl->ifl_fragidx == fl->ifl_size) fl->ifl_fragidx = 0; } else { fl->ifl_fragidx = frag_idx; } } return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY); } static inline uint8_t iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl) { /* * We leave an unused descriptor to avoid pidx to catch up with cidx. * This is important as it confuses most NICs. For instance, * Intel NICs have (per receive ring) RDH and RDT registers, where * RDH points to the next receive descriptor to be used by the NIC, * and RDT for the next receive descriptor to be published by the * driver to the NIC (RDT - 1 is thus the last valid one). * The condition RDH == RDT means no descriptors are available to * the NIC, and thus it would be ambiguous if it also meant that * all the descriptors are available to the NIC. */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; #ifdef INVARIANTS int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; #endif MPASS(fl->ifl_credits <= fl->ifl_size); MPASS(reclaimable == delta); if (reclaimable > 0) return (iflib_fl_refill(ctx, fl, reclaimable)); return (0); } uint8_t iflib_in_detach(if_ctx_t ctx) { bool in_detach; STATE_LOCK(ctx); in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); STATE_UNLOCK(ctx); return (in_detach); } static void iflib_fl_bufs_free(iflib_fl_t fl) { iflib_dma_info_t idi = fl->ifl_ifdi; bus_dmamap_t sd_map; uint32_t i; for (i = 0; i < fl->ifl_size; i++) { struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; if (*sd_cl != NULL) { sd_map = fl->ifl_sds.ifsd_map[i]; bus_dmamap_sync(fl->ifl_buf_tag, sd_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fl->ifl_buf_tag, sd_map); uma_zfree(fl->ifl_zone, *sd_cl); *sd_cl = NULL; if (*sd_m != NULL) { m_init(*sd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, *sd_m); *sd_m = NULL; } } else { MPASS(*sd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif } #ifdef INVARIANTS for (i = 0; i < fl->ifl_size; i++) { MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); MPASS(fl->ifl_sds.ifsd_m[i] == NULL); } #endif /* * Reset free list values */ fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; bzero(idi->idi_vaddr, idi->idi_size); } /********************************************************************* * * Initialize a free list and its buffers. * **********************************************************************/ static int iflib_fl_setup(iflib_fl_t fl) { iflib_rxq_t rxq = fl->ifl_rxq; if_ctx_t ctx = rxq->ifr_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int qidx; bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); /* ** Free current RX buffer structs and their mbufs */ iflib_fl_bufs_free(fl); /* Now replenish the mbufs */ MPASS(fl->ifl_credits == 0); qidx = rxq->ifr_fl_offset + fl->ifl_id; if (scctx->isc_rxd_buf_size[qidx] != 0) fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx]; else fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz; /* * ifl_buf_size may be a driver-supplied value, so pull it up * to the selected mbuf size. */ fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size); if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; fl->ifl_cltype = m_gettype(fl->ifl_buf_size); fl->ifl_zone = m_getzone(fl->ifl_buf_size); /* * Avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach. In any case make sure * to leave a descriptor unavailable. See the comment in * iflib_fl_refill_all(). */ MPASS(fl->ifl_size > 0); (void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1)); if (min(128, fl->ifl_size - 1) != fl->ifl_credits) return (ENOBUFS); /* * handle failure */ MPASS(rxq != NULL); MPASS(fl->ifl_ifdi != NULL); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void iflib_rx_sds_free(iflib_rxq_t rxq) { iflib_fl_t fl; int i, j; if (rxq->ifr_fl != NULL) { for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; if (fl->ifl_buf_tag != NULL) { if (fl->ifl_sds.ifsd_map != NULL) { for (j = 0; j < fl->ifl_size; j++) { bus_dmamap_sync( fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[j], BUS_DMASYNC_POSTREAD); bus_dmamap_unload( fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[j]); bus_dmamap_destroy( fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[j]); } } bus_dma_tag_destroy(fl->ifl_buf_tag); fl->ifl_buf_tag = NULL; } free(fl->ifl_sds.ifsd_m, M_IFLIB); free(fl->ifl_sds.ifsd_cl, M_IFLIB); free(fl->ifl_sds.ifsd_ba, M_IFLIB); free(fl->ifl_sds.ifsd_map, M_IFLIB); free(fl->ifl_rx_bitmap, M_IFLIB); fl->ifl_sds.ifsd_m = NULL; fl->ifl_sds.ifsd_cl = NULL; fl->ifl_sds.ifsd_ba = NULL; fl->ifl_sds.ifsd_map = NULL; fl->ifl_rx_bitmap = NULL; } free(rxq->ifr_fl, M_IFLIB); rxq->ifr_fl = NULL; free(rxq->ifr_ifdi, M_IFLIB); rxq->ifr_ifdi = NULL; rxq->ifr_cq_cidx = 0; } } /* * Timer routine */ static void iflib_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; uint64_t this_tick = ticks; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ if (this_tick - txq->ift_last_timer_tick >= hz / 2) { txq->ift_last_timer_tick = this_tick; IFDI_TIMER(ctx, txq->ift_id); if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && ((txq->ift_cleaned_prev == txq->ift_cleaned) || (sctx->isc_pause_frames == 0))) goto hung; if (txq->ift_qstatus != IFLIB_QUEUE_IDLE && ifmp_ring_is_stalled(txq->ift_br)) { KASSERT(ctx->ifc_link_state == LINK_STATE_UP, ("queue can't be marked as hung if interface is down")); txq->ift_qstatus = IFLIB_QUEUE_HUNG; } txq->ift_cleaned_prev = txq->ift_cleaned; } /* handle any laggards */ if (txq->ift_db_pending) GROUPTASK_ENQUEUE(&txq->ift_task); sctx->isc_pause_frames = 0; if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) callout_reset_on(&txq->ift_timer, hz / 2, iflib_timer, txq, txq->ift_timer.c_cpu); return; hung: device_printf(ctx->ifc_dev, "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n", txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); STATE_LOCK(ctx); if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); } static uint16_t iflib_get_mbuf_size_for(unsigned int size) { if (size <= MCLBYTES) return (MCLBYTES); else return (MJUMPAGESIZE); } static void iflib_calc_rx_mbuf_sz(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; /* * XXX don't set the max_frame_size to larger * than the hardware can handle */ ctx->ifc_rx_mbuf_sz = iflib_get_mbuf_size_for(sctx->isc_max_frame_size); } uint32_t iflib_get_rx_mbuf_sz(if_ctx_t ctx) { return (ctx->ifc_rx_mbuf_sz); } static void iflib_init_locked(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_t ifp = ctx->ifc_ifp; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); + /* + * See iflib_stop(). Useful in case iflib_init_locked() is + * called without first calling iflib_stop(). + */ + netmap_disable_all_rings(ifp); + tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, tx_ip_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); #ifdef DEV_NETMAP callout_stop(&txq->ift_netmap_timer); #endif /* DEV_NETMAP */ CALLOUT_UNLOCK(txq); iflib_netmap_txq_init(ctx, txq); } /* * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so * that drivers can use the value when setting up the hardware receive * buffers. */ iflib_calc_rx_mbuf_sz(ctx); #ifdef INVARIANTS i = if_getdrvflags(ifp); #endif IFDI_INIT(ctx); MPASS(if_getdrvflags(ifp) == i); for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { if (iflib_netmap_rxq_init(ctx, rxq) > 0) { /* This rxq is in netmap mode. Skip normal init. */ continue; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { if (iflib_fl_setup(fl)) { device_printf(ctx->ifc_dev, "setting up free list %d failed - " "check cluster settings\n", j); goto done; } } } done: if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); IFDI_INTR_ENABLE(ctx); txq = ctx->ifc_txqs; for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); + + /* Re-enable txsync/rxsync. */ + netmap_enable_all_rings(ifp); } static int iflib_media_change(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); int err; CTX_LOCK(ctx); if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) iflib_init_locked(ctx); CTX_UNLOCK(ctx); return (err); } static void iflib_media_status(if_t ifp, struct ifmediareq *ifmr) { if_ctx_t ctx = if_getsoftc(ifp); CTX_LOCK(ctx); IFDI_UPDATE_ADMIN_STATUS(ctx); IFDI_MEDIA_STATUS(ctx, ifmr); CTX_UNLOCK(ctx); } void iflib_stop(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; iflib_dma_info_t di; iflib_fl_t fl; int i, j; /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); DELAY(1000); IFDI_STOP(ctx); DELAY(1000); + /* + * Stop any pending txsync/rxsync and prevent new ones + * form starting. Processes blocked in poll() will get + * POLLERR. + */ + netmap_disable_all_rings(ctx->ifc_ifp); + iflib_debug_reset(); /* Wait for current tx queue users to exit to disarm watchdog timer. */ for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { /* make sure all transmitters have completed before proceeding XXX */ CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); #ifdef DEV_NETMAP callout_stop(&txq->ift_netmap_timer); #endif /* DEV_NETMAP */ CALLOUT_UNLOCK(txq); /* clean any enqueued buffers */ iflib_ifmp_purge(txq); /* Free any existing tx buffers. */ for (j = 0; j < txq->ift_size; j++) { iflib_txsd_free(ctx, txq, j); } txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; txq->ift_pullups = 0; ifmp_ring_reset_stats(txq->ift_br); for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); } for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { /* make sure all transmitters have completed before proceeding XXX */ rxq->ifr_cq_cidx = 0; for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); /* also resets the free lists pidx/cidx */ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) iflib_fl_bufs_free(fl); } } static inline caddr_t calc_next_rxd(iflib_fl_t fl, int cidx) { qidx_t size; int nrxd; caddr_t start, end, cur, next; nrxd = fl->ifl_size; size = fl->ifl_rxd_size; start = fl->ifl_ifdi->idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*nrxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } static inline void prefetch_pkts(iflib_fl_t fl, int cidx) { int nextptr; int nrxd = fl->ifl_size; caddr_t next_rxd; nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); prefetch(&fl->ifl_sds.ifsd_m[nextptr]); prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); next_rxd = calc_next_rxd(fl, cidx); prefetch(next_rxd); prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); } static void rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) { int flid, cidx; bus_dmamap_t map; iflib_fl_t fl; int next; map = NULL; flid = irf->irf_flid; cidx = irf->irf_idx; fl = &rxq->ifr_fl[flid]; sd->ifsd_fl = fl; sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; fl->ifl_credits--; #if MEMORY_LOGGING fl->ifl_m_dequeued++; #endif if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) prefetch_pkts(fl, cidx); next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_map[next]); map = fl->ifl_sds.ifsd_map[cidx]; bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD); if (unload && irf->irf_len != 0) bus_dmamap_unload(fl->ifl_buf_tag, map); fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); if (__predict_false(fl->ifl_cidx == 0)) fl->ifl_gen = 0; bit_clear(fl->ifl_rx_bitmap, cidx); } static struct mbuf * assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) { int i, padlen , flags; struct mbuf *m, *mh, *mt; caddr_t cl; i = 0; mh = NULL; do { rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); MPASS(*sd->ifsd_cl != NULL); MPASS(*sd->ifsd_m != NULL); /* Don't include zero-length frags */ if (ri->iri_frags[i].irf_len == 0) { /* XXX we can save the cluster here, but not the mbuf */ m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); m_free(*sd->ifsd_m); *sd->ifsd_m = NULL; continue; } m = *sd->ifsd_m; *sd->ifsd_m = NULL; if (mh == NULL) { flags = M_PKTHDR|M_EXT; mh = mt = m; padlen = ri->iri_pad; } else { flags = M_EXT; mt->m_next = m; mt = m; /* assuming padding is only on the first fragment */ padlen = 0; } cl = *sd->ifsd_cl; *sd->ifsd_cl = NULL; /* Can these two be made one ? */ m_init(m, M_NOWAIT, MT_DATA, flags); m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); /* * These must follow m_init and m_cljset */ m->m_data += padlen; ri->iri_len -= padlen; m->m_len = ri->iri_frags[i].irf_len; } while (++i < ri->iri_nfrags); return (mh); } /* * Process one software descriptor */ static struct mbuf * iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) { struct if_rxsd sd; struct mbuf *m; /* should I merge this back in now that the two paths are basically duplicated? */ if (ri->iri_nfrags == 1 && ri->iri_frags[0].irf_len != 0 && ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); m = *sd.ifsd_m; *sd.ifsd_m = NULL; m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m)) m->m_data += 2; #endif memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); m->m_len = ri->iri_frags[0].irf_len; } else { m = assemble_segments(rxq, ri, &sd); if (m == NULL) return (NULL); } m->m_pkthdr.len = ri->iri_len; m->m_pkthdr.rcvif = ri->iri_ifp; m->m_flags |= ri->iri_flags; m->m_pkthdr.ether_vtag = ri->iri_vtag; m->m_pkthdr.flowid = ri->iri_flowid; M_HASHTYPE_SET(m, ri->iri_rsstype); m->m_pkthdr.csum_flags = ri->iri_csum_flags; m->m_pkthdr.csum_data = ri->iri_csum_data; return (m); } #if defined(INET6) || defined(INET) static void iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) { CURVNET_SET(lc->ifp->if_vnet); #if defined(INET6) *v6 = V_ip6_forwarding; #endif #if defined(INET) *v4 = V_ipforwarding; #endif CURVNET_RESTORE(); } /* * Returns true if it's possible this packet could be LROed. * if it returns false, it is guaranteed that tcp_lro_rx() * would not return zero. */ static bool iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) { struct ether_header *eh; eh = mtod(m, struct ether_header *); switch (eh->ether_type) { #if defined(INET6) case htons(ETHERTYPE_IPV6): return (!v6_forwarding); #endif #if defined (INET) case htons(ETHERTYPE_IP): return (!v4_forwarding); #endif } return false; } #else static void iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) { } #endif static void _task_fn_rx_watchdog(void *context) { iflib_rxq_t rxq = context; GROUPTASK_ENQUEUE(&rxq->ifr_task); } static uint8_t iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) { if_t ifp; if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int avail, i; qidx_t *cidxp; struct if_rxd_info ri; int err, budget_left, rx_bytes, rx_pkts; iflib_fl_t fl; int lro_enabled; bool v4_forwarding, v6_forwarding, lro_possible; uint8_t retval = 0; /* * XXX early demux data packets so that if_input processing only handles * acks in interrupt context */ struct mbuf *m, *mh, *mt, *mf; lro_possible = v4_forwarding = v6_forwarding = false; ifp = ctx->ifc_ifp; mh = mt = NULL; MPASS(budget > 0); rx_pkts = rx_bytes = 0; if (sctx->isc_flags & IFLIB_HAS_RXCQ) cidxp = &rxq->ifr_cq_cidx; else cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) retval |= iflib_fl_refill_all(ctx, fl); DBG_COUNTER_INC(rx_unavail); return (retval); } for (budget_left = budget; budget_left > 0 && avail > 0;) { if (__predict_false(!CTX_ACTIVE(ctx))) { DBG_COUNTER_INC(rx_ctx_inactive); break; } /* * Reset client set fields to their default values */ rxd_info_zero(&ri); ri.iri_qsidx = rxq->ifr_id; ri.iri_cidx = *cidxp; ri.iri_ifp = ifp; ri.iri_frags = rxq->ifr_frags; err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); if (err) goto err; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { *cidxp = ri.iri_cidx; /* Update our consumer index */ /* XXX NB: shurd - check if this is still safe */ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; /* was this only a completion queue message? */ if (__predict_false(ri.iri_nfrags == 0)) continue; } MPASS(ri.iri_nfrags != 0); MPASS(ri.iri_len != 0); /* will advance the cidx on the corresponding free lists */ m = iflib_rxd_pkt_get(rxq, &ri); avail--; budget_left--; if (avail == 0 && budget_left) avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); if (__predict_false(m == NULL)) { DBG_COUNTER_INC(rx_mbuf_null); continue; } /* imm_pkt: -- cxgb */ if (mh == NULL) mh = mt = m; else { mt->m_nextpkt = m; mt = m; } } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) retval |= iflib_fl_refill_all(ctx, fl); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); if (lro_enabled) iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); mt = mf = NULL; while (mh != NULL) { m = mh; mh = mh->m_nextpkt; m->m_nextpkt = NULL; #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) continue; #endif rx_bytes += m->m_pkthdr.len; rx_pkts++; #if defined(INET6) || defined(INET) if (lro_enabled) { if (!lro_possible) { lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); if (lro_possible && mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); mt = mf = NULL; } } if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == (CSUM_L4_CALC|CSUM_L4_VALID)) { if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) continue; } } #endif if (lro_possible) { ifp->if_input(ifp, m); DBG_COUNTER_INC(rx_if_input); continue; } if (mf == NULL) mf = m; if (mt != NULL) mt->m_nextpkt = m; mt = m; } if (mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); } if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); /* * Flush any outstanding LRO work */ #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0) retval |= IFLIB_RXEOF_MORE; return (retval); err: STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); return (0); } #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) static inline qidx_t txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (in_use > 4*minthresh) return (notify_count); if (in_use > 2*minthresh) return (notify_count >> 1); if (in_use > minthresh) return (notify_count >> 3); return (0); } static inline qidx_t txq_max_rs_deferred(iflib_txq_t txq) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (txq->ift_in_use > 4*minthresh) return (notify_count); if (txq->ift_in_use > 2*minthresh) return (notify_count >> 1); if (txq->ift_in_use > minthresh) return (notify_count >> 2); return (2); } #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) /* forward compatibility for cxgb */ #define FIRST_QSET(ctx) 0 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) /* XXX we should be setting this to something other than zero */ #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ (ctx)->ifc_softc_ctx.isc_tx_nsegments) static inline bool iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) { qidx_t dbval, max; bool rang; rang = false; max = TXQ_MAX_DB_DEFERRED(txq, in_use); if (ring || txq->ift_db_pending >= max) { dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); txq->ift_db_pending = txq->ift_npending = 0; rang = true; } return (rang); } #ifdef PKT_DEBUG static void print_pkt(if_pkt_info_t pi) { printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); } #endif #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) static int iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) { if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; struct ether_vlan_header *eh; struct mbuf *m; m = *mp; if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && M_WRITABLE(m) == 0) { if ((m = m_dup(m, M_NOWAIT)) == NULL) { return (ENOMEM); } else { m_freem(*mp); DBG_COUNTER_INC(tx_frees); *mp = m; } } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ if (__predict_false(m->m_len < sizeof(*eh))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) return (ENOMEM); } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { pi->ipi_etype = ntohs(eh->evl_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { pi->ipi_etype = ntohs(eh->evl_encap_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN; } switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: { struct mbuf *n; struct ip *ip = NULL; struct tcphdr *th = NULL; int minthlen; minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); if (__predict_false(m->m_len < minthlen)) { /* * if this code bloat is causing too much of a hit * move it to a separate function and mark it noinline */ if (m->m_len == pi->ipi_ehdrlen) { n = m->m_next; MPASS(n); if (n->m_len >= sizeof(*ip)) { ip = (struct ip *)n->m_data; if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); } } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } } else { ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } pi->ipi_ip_hlen = ip->ip_hl << 2; pi->ipi_ipproto = ip->ip_p; pi->ipi_flags |= IPI_TX_IPV4; /* TCP checksum offload may require TCP header length */ if (IS_TX_OFFLOAD4(pi)) { if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { if (__predict_false(th == NULL)) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) return (ENOMEM); th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO4(pi)) { if (__predict_false(ip->ip_p != IPPROTO_TCP)) return (ENXIO); /* * TSO always requires hardware checksum offload. */ pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { ip->ip_sum = 0; ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); } } } if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) ip->ip_sum = 0; break; } #endif #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); struct tcphdr *th; pi->ipi_ip_hlen = sizeof(struct ip6_hdr); if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) return (ENOMEM); } th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); /* XXX-BZ this will go badly in case of ext hdrs. */ pi->ipi_ipproto = ip6->ip6_nxt; pi->ipi_flags |= IPI_TX_IPV6; /* TCP checksum offload may require TCP header length */ if (IS_TX_OFFLOAD6(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) return (ENOMEM); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO6(pi)) { if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) return (ENXIO); /* * TSO always requires hardware checksum offload. */ pi->ipi_csum_flags |= CSUM_IP6_TCP; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; } } break; } #endif default: pi->ipi_csum_flags &= ~CSUM_OFFLOAD; pi->ipi_ip_hlen = 0; break; } *mp = m; return (0); } /* * If dodgy hardware rejects the scatter gather chain we've handed it * we'll need to remove the mbuf chain from ifsg_m[] before we can add the * m_defrag'd mbufs */ static __noinline struct mbuf * iflib_remove_mbuf(iflib_txq_t txq) { int ntxd, pidx; struct mbuf *m, **ifsd_m; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; pidx = txq->ift_pidx & (ntxd - 1); ifsd_m = txq->ift_sds.ifsd_m; m = ifsd_m[pidx]; ifsd_m[pidx] = NULL; bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]); if (txq->ift_sds.ifsd_tso_map != NULL) bus_dmamap_unload(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[pidx]); #if MEMORY_LOGGING txq->ift_dequeued++; #endif return (m); } static inline caddr_t calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) { qidx_t size; int ntxd; caddr_t start, end, cur, next; ntxd = txq->ift_size; size = txq->ift_txd_size[qid]; start = txq->ift_ifdi[qid].idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*ntxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } /* * Pad an mbuf to ensure a minimum ethernet frame size. * min_frame_size is the frame size (less CRC) to pad the mbuf to */ static __noinline int iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) { /* * 18 is enough bytes to pad an ARP packet to 46 bytes, and * and ARP message is the smallest common payload I can think of */ static char pad[18]; /* just zeros */ int n; struct mbuf *new_head; if (!M_WRITABLE(*m_head)) { new_head = m_dup(*m_head, M_NOWAIT); if (new_head == NULL) { m_freem(*m_head); device_printf(dev, "cannot pad short frame, m_dup() failed"); DBG_COUNTER_INC(encap_pad_mbuf_fail); DBG_COUNTER_INC(tx_frees); return ENOMEM; } m_freem(*m_head); *m_head = new_head; } for (n = min_frame_size - (*m_head)->m_pkthdr.len; n > 0; n -= sizeof(pad)) if (!m_append(*m_head, min(n, sizeof(pad)), pad)) break; if (n > 0) { m_freem(*m_head); device_printf(dev, "cannot pad short frame\n"); DBG_COUNTER_INC(encap_pad_mbuf_fail); DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } return 0; } static int iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; bus_dma_tag_t buf_tag; bus_dma_segment_t *segs; struct mbuf *m_head, **ifsd_m; void *next_txd; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ cidx = txq->ift_cidx; pidx = txq->ift_pidx; if (ctx->ifc_flags & IFC_PREFETCH) { next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { next_txd = calc_next_txd(txq, cidx, 0); prefetch(next_txd); } /* prefetch the next cache line of mbuf pointers and flags */ prefetch(&txq->ift_sds.ifsd_m[next]); prefetch(&txq->ift_sds.ifsd_map[next]); next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); } map = txq->ift_sds.ifsd_map[pidx]; ifsd_m = txq->ift_sds.ifsd_m; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { buf_tag = txq->ift_tso_buf_tag; max_segs = scctx->isc_tx_tso_segments_max; map = txq->ift_sds.ifsd_tso_map[pidx]; MPASS(buf_tag != NULL); MPASS(max_segs > 0); } else { buf_tag = txq->ift_buf_tag; max_segs = scctx->isc_tx_nsegments; map = txq->ift_sds.ifsd_map[pidx]; } if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); if (err) { DBG_COUNTER_INC(encap_txd_encap_fail); return err; } } m_head = *m_headp; pkt_info_zero(&pi); pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); pi.ipi_pidx = pidx; pi.ipi_qsidx = txq->ift_id; pi.ipi_len = m_head->m_pkthdr.len; pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0; /* deliberate bitwise OR to make one condition */ if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) { DBG_COUNTER_INC(encap_txd_encap_fail); return (err); } m_head = *m_headp; } retry: err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); defrag: if (__predict_false(err)) { switch (err) { case EFBIG: /* try collapse once and defrag once */ if (remap == 0) { m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); /* try defrag if collapsing fails */ if (m_head == NULL) remap++; } if (remap == 1) { txq->ift_mbuf_defrag++; m_head = m_defrag(*m_headp, M_NOWAIT); } /* * remap should never be >1 unless bus_dmamap_load_mbuf_sg * failed to map an mbuf that was run through m_defrag */ MPASS(remap <= 1); if (__predict_false(m_head == NULL || remap > 1)) goto defrag_failed; remap++; *m_headp = m_head; goto retry; break; case ENOMEM: txq->ift_no_tx_dma_setup++; break; default: txq->ift_no_tx_dma_setup++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; break; } txq->ift_map_failed++; DBG_COUNTER_INC(encap_load_mbuf_fail); DBG_COUNTER_INC(encap_txd_encap_fail); return (err); } ifsd_m[pidx] = m_head; /* * XXX assumes a 1 to 1 relationship between segments and * descriptors - this does not hold true on all drivers, e.g. * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; bus_dmamap_unload(buf_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); DBG_COUNTER_INC(encap_txd_encap_fail); if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); } /* * On Intel cards we can greatly reduce the number of TX interrupts * we see by only setting report status on every Nth descriptor. * However, this also means that the driver will need to keep track * of the descriptors that RS was set on to check them for the DD bit. */ txq->ift_rs_pending += nsegs + 1; if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { pi.ipi_flags |= IPI_TX_INTR; txq->ift_rs_pending = 0; } pi.ipi_segs = segs; pi.ipi_nsegs = nsegs; MPASS(pidx >= 0 && pidx < txq->ift_size); #ifdef PKT_DEBUG print_pkt(&pi); #endif if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE); DBG_COUNTER_INC(tx_encap); MPASS(pi.ipi_new_pidx < txq->ift_size); ndesc = pi.ipi_new_pidx - pi.ipi_pidx; if (pi.ipi_new_pidx < pi.ipi_pidx) { ndesc += txq->ift_size; txq->ift_gen = 1; } /* * drivers can need as many as * two sentinels */ MPASS(ndesc <= pi.ipi_nsegs + 2); MPASS(pi.ipi_new_pidx != pidx); MPASS(ndesc > 0); txq->ift_in_use += ndesc; /* * We update the last software descriptor again here because there may * be a sentinel and/or there may be more mbufs than segments */ txq->ift_pidx = pi.ipi_new_pidx; txq->ift_npending += pi.ipi_ndescs; } else { *m_headp = m_head = iflib_remove_mbuf(txq); if (err == EFBIG) { txq->ift_txd_encap_efbig++; if (remap < 2) { remap = 1; goto defrag; } } goto defrag_failed; } /* * err can't possibly be non-zero here, so we don't neet to test it * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail). */ return (err); defrag_failed: txq->ift_mbuf_defrag_failed++; txq->ift_map_failed++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; DBG_COUNTER_INC(encap_txd_encap_fail); return (ENOMEM); } static void iflib_tx_desc_free(iflib_txq_t txq, int n) { uint32_t qsize, cidx, mask, gen; struct mbuf *m, **ifsd_m; bool do_prefetch; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; mask = qsize-1; ifsd_m = txq->ift_sds.ifsd_m; do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); while (n-- > 0) { if (do_prefetch) { prefetch(ifsd_m[(cidx + 3) & mask]); prefetch(ifsd_m[(cidx + 4) & mask]); } if ((m = ifsd_m[cidx]) != NULL) { prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); if (m->m_pkthdr.csum_flags & CSUM_TSO) { bus_dmamap_sync(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[cidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[cidx]); } else { bus_dmamap_sync(txq->ift_buf_tag, txq->ift_sds.ifsd_map[cidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[cidx]); } /* XXX we don't support any drivers that batch packets yet */ MPASS(m->m_nextpkt == NULL); m_freem(m); ifsd_m[cidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif DBG_COUNTER_INC(tx_frees); } if (__predict_false(++cidx == qsize)) { cidx = 0; gen = 0; } } txq->ift_cidx = cidx; txq->ift_gen = gen; } static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) { int reclaim; if_ctx_t ctx = txq->ift_ctx; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); /* * Need a rate-limiting check so that this isn't called every time */ iflib_tx_credits_update(ctx, txq); reclaim = DESC_RECLAIMABLE(txq); if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { #ifdef INVARIANTS if (iflib_verbose_debug) { printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, reclaim, thresh); } #endif return (0); } iflib_tx_desc_free(txq, reclaim); txq->ift_cleaned += reclaim; txq->ift_in_use -= reclaim; return (reclaim); } static struct mbuf ** _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) { int next, size; struct mbuf **items; size = r->size; next = (cidx + CACHE_PTR_INCREMENT) & (size-1); items = __DEVOLATILE(struct mbuf **, &r->items[0]); prefetch(items[(cidx + offset) & (size-1)]); if (remaining > 1) { prefetch2cachelines(&items[next]); prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); } return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); } static void iflib_txq_check_drain(iflib_txq_t txq, int budget) { ifmp_ring_check_drainage(txq->ift_br, budget); } static uint32_t iflib_txq_can_drain(struct ifmp_ring *r) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) return (1); bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); } static uint32_t iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; if_t ifp = ctx->ifc_ifp; struct mbuf *m, **mp; int avail, bytes_sent, consumed, count, err, i, in_use_prev; int mcast_sent, pkt_sent, reclaimed, txq_avail; bool do_prefetch, rang, ring; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(txq_drain_notready); return (0); } reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); avail = IDXDIFF(pidx, cidx, r->size); if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { DBG_COUNTER_INC(txq_drain_flushing); for (i = 0; i < avail; i++) { if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq)) m_freem(r->items[(cidx + i) & (r->size-1)]); r->items[(cidx + i) & (r->size-1)] = NULL; } return (avail); } if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); DBG_COUNTER_INC(txq_drain_oactive); return (0); } if (reclaimed) txq->ift_qstatus = IFLIB_QUEUE_IDLE; consumed = mcast_sent = bytes_sent = pkt_sent = 0; count = MIN(avail, TX_BATCH_SIZE); #ifdef INVARIANTS if (iflib_verbose_debug) printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, avail, ctx->ifc_flags, TXQ_AVAIL(txq)); #endif do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); txq_avail = TXQ_AVAIL(txq); err = 0; for (i = 0; i < count && txq_avail > MAX_TX_DESC(ctx) + 2; i++) { int rem = do_prefetch ? count - i : 0; mp = _ring_peek_one(r, cidx, i, rem); MPASS(mp != NULL && *mp != NULL); if (__predict_false(*mp == (struct mbuf *)txq)) { consumed++; continue; } in_use_prev = txq->ift_in_use; err = iflib_encap(txq, mp); if (__predict_false(err)) { /* no room - bail out */ if (err == ENOBUFS) break; consumed++; /* we can't send this packet - skip it */ continue; } consumed++; pkt_sent++; m = *mp; DBG_COUNTER_INC(tx_sent); bytes_sent += m->m_pkthdr.len; mcast_sent += !!(m->m_flags & M_MCAST); txq_avail = TXQ_AVAIL(txq); txq->ift_db_pending += (txq->ift_in_use - in_use_prev); ETHER_BPF_MTAP(ifp, m); if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) break; rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); if (mcast_sent) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); #ifdef INVARIANTS if (iflib_verbose_debug) printf("consumed=%d\n", consumed); #endif return (consumed); } static uint32_t iflib_txq_drain_always(struct ifmp_ring *r) { return (1); } static uint32_t iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { int i, avail; struct mbuf **mp; iflib_txq_t txq; txq = r->cookie; txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); avail = IDXDIFF(pidx, cidx, r->size); for (i = 0; i < avail; i++) { mp = _ring_peek_one(r, cidx, i, avail - i); if (__predict_false(*mp == (struct mbuf *)txq)) continue; m_freem(*mp); DBG_COUNTER_INC(tx_frees); } MPASS(ifmp_ring_is_stalled(r) == 0); return (avail); } static void iflib_ifmp_purge(iflib_txq_t txq) { struct ifmp_ring *r; r = txq->ift_br; r->drain = iflib_txq_drain_free; r->can_drain = iflib_txq_drain_always; ifmp_ring_check_drainage(r, r->size); r->drain = iflib_txq_drain; r->can_drain = iflib_txq_can_drain; } static void _task_fn_tx(void *context) { iflib_txq_t txq = context; if_ctx_t ctx = txq->ift_ctx; if_t ifp = ctx->ifc_ifp; int abdicate = ctx->ifc_sysctl_tx_abdicate; #ifdef IFLIB_DIAGNOSTICS txq->ift_cpu_exec_count[curcpu]++; #endif if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) return; #ifdef DEV_NETMAP if ((if_getcapenable(ifp) & IFCAP_NETMAP) && netmap_tx_irq(ifp, txq->ift_id)) goto skip_ifmp; #endif #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) iflib_altq_if_start(ifp); #endif if (txq->ift_db_pending) ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); else if (!abdicate) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); /* * When abdicating, we always need to check drainage, not just when we don't enqueue */ if (abdicate) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); #ifdef DEV_NETMAP skip_ifmp: #endif if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); } static void _task_fn_rx(void *context) { iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; uint8_t more; uint16_t budget; #ifdef DEV_NETMAP u_int work = 0; int nmirq; #endif #ifdef IFLIB_DIAGNOSTICS rxq->ifr_cpu_exec_count[curcpu]++; #endif DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; #ifdef DEV_NETMAP nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work); if (nmirq != NM_IRQ_PASS) { more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0; goto skip_rxeof; } #endif budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ more = iflib_rxeof(rxq, budget); #ifdef DEV_NETMAP skip_rxeof: #endif if ((more & IFLIB_RXEOF_MORE) == 0) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more & IFLIB_RXEOF_MORE) GROUPTASK_ENQUEUE(&rxq->ifr_task); else if (more & IFLIB_RXEOF_EMPTY) callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq); } static void _task_fn_admin(void *context) { if_ctx_t ctx = context; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; iflib_txq_t txq; int i; bool oactive, running, do_reset, do_watchdog, in_detach; STATE_LOCK(ctx); running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); do_reset = (ctx->ifc_flags & IFC_DO_RESET); do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); in_detach = (ctx->ifc_flags & IFC_IN_DETACH); ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); STATE_UNLOCK(ctx); if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) return; if (in_detach) return; CTX_LOCK(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); } if (do_watchdog) { ctx->ifc_watchdog_events++; IFDI_WATCHDOG_RESET(ctx); } IFDI_UPDATE_ADMIN_STATUS(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { callout_reset_on(&txq->ift_timer, hz / 2, iflib_timer, txq, txq->ift_timer.c_cpu); } IFDI_LINK_INTR_ENABLE(ctx); if (do_reset) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); if (LINK_ACTIVE(ctx) == 0) return; for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); } static void _task_fn_iov(void *context) { if_ctx_t ctx = context; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) return; CTX_LOCK(ctx); IFDI_VFLR_HANDLE(ctx); CTX_UNLOCK(ctx); } static int iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { int err; if_int_delay_info_t info; if_ctx_t ctx; info = (if_int_delay_info_t)arg1; ctx = info->iidi_ctx; info->iidi_req = req; info->iidi_oidp = oidp; CTX_LOCK(ctx); err = IFDI_SYSCTL_INT_DELAY(ctx, info); CTX_UNLOCK(ctx); return (err); } /********************************************************************* * * IFNET FUNCTIONS * **********************************************************************/ static void iflib_if_init_locked(if_ctx_t ctx) { iflib_stop(ctx); iflib_init_locked(ctx); } static void iflib_if_init(void *arg) { if_ctx_t ctx = arg; CTX_LOCK(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static int iflib_if_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq; int err, qidx; int abdicate = ctx->ifc_sysctl_tx_abdicate; if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(tx_frees); m_freem(m); return (ENETDOWN); } MPASS(m->m_nextpkt == NULL); /* ALTQ-enabled interfaces always use queue 0. */ qidx = 0; if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd)) qidx = QIDX(ctx, m); /* * XXX calculate buf_ring based on flowid (divvy up bits?) */ txq = &ctx->ifc_txqs[qidx]; #ifdef DRIVER_BACKPRESSURE if (txq->ift_closed) { while (m != NULL) { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); DBG_COUNTER_INC(tx_frees); m = next; } return (ENOBUFS); } #endif #ifdef notyet qidx = count = 0; mp = marr; next = m; do { count++; next = next->m_nextpkt; } while (next != NULL); if (count > nitems(marr)) if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { /* XXX check nextpkt */ m_freem(m); /* XXX simplify for now */ DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } for (next = m, i = 0; next != NULL; i++) { mp[i] = next; next = next->m_nextpkt; mp[i]->m_nextpkt = NULL; } #endif DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); if (abdicate) GROUPTASK_ENQUEUE(&txq->ift_task); if (err) { if (!abdicate) GROUPTASK_ENQUEUE(&txq->ift_task); /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); DBG_COUNTER_INC(tx_frees); } return (err); } #ifdef ALTQ /* * The overall approach to integrating iflib with ALTQ is to continue to use * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring * is redundant/unnecessary, but doing so minimizes the amount of * ALTQ-specific code required in iflib. It is assumed that the overhead of * redundantly queueing to an intermediate mp_ring is swamped by the * performance limitations inherent in using ALTQ. * * When ALTQ support is compiled in, all iflib drivers will use a transmit * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the * given interface. If ALTQ is enabled for an interface, then all * transmitted packets for that interface will be submitted to the ALTQ * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit() * implementation because it uses IFQ_HANDOFF(), which will duplicatively * update stats that the iflib machinery handles, and which is sensitve to * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start() * will be installed as the start routine for use by ALTQ facilities that * need to trigger queue drains on a scheduled basis. * */ static void iflib_altq_if_start(if_t ifp) { struct ifaltq *ifq = &ifp->if_snd; struct mbuf *m; IFQ_LOCK(ifq); IFQ_DEQUEUE_NOLOCK(ifq, m); while (m != NULL) { iflib_if_transmit(ifp, m); IFQ_DEQUEUE_NOLOCK(ifq, m); } IFQ_UNLOCK(ifq); } static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m) { int err; if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_ENQUEUE(&ifp->if_snd, m, err); if (err == 0) iflib_altq_if_start(ifp); } else err = iflib_if_transmit(ifp, m); return (err); } #endif /* ALTQ */ static void iflib_if_qflush(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq = ctx->ifc_txqs; int i; STATE_LOCK(ctx); ctx->ifc_flags |= IFC_QFLUSH; STATE_UNLOCK(ctx); for (i = 0; i < NTXQSETS(ctx); i++, txq++) while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) iflib_txq_check_drain(txq, 0); STATE_LOCK(ctx); ctx->ifc_flags &= ~IFC_QFLUSH; STATE_UNLOCK(ctx); /* * When ALTQ is enabled, this will also take care of purging the * ALTQ queue(s). */ if_qflush(ifp); } #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \ IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM) static int iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) { if_ctx_t ctx = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = false; int err = 0, reinit = 0, bits; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = true; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = true; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP,0); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) reinit = 1; #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: CTX_LOCK(ctx); if (ifr->ifr_mtu == if_getmtu(ifp)) { CTX_UNLOCK(ctx); break; } bits = if_getdrvflags(ifp); /* stop the driver and free any clusters before proceeding */ iflib_stop(ctx); if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { STATE_LOCK(ctx); if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) ctx->ifc_flags |= IFC_MULTISEG; else ctx->ifc_flags &= ~IFC_MULTISEG; STATE_UNLOCK(ctx); err = if_setmtu(ifp, ifr->ifr_mtu); } iflib_init_locked(ctx); STATE_LOCK(ctx); if_setdrvflags(ifp, bits); STATE_UNLOCK(ctx); CTX_UNLOCK(ctx); break; case SIOCSIFFLAGS: CTX_LOCK(ctx); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); } } else reinit = 1; } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { iflib_stop(ctx); } ctx->ifc_if_flags = if_getflags(ifp); CTX_UNLOCK(ctx); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); IFDI_MULTI_SET(ctx); IFDI_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); } break; case SIOCSIFMEDIA: CTX_LOCK(ctx); IFDI_MEDIA_SET(ctx); CTX_UNLOCK(ctx); /* FALLTHROUGH */ case SIOCGIFMEDIA: case SIOCGIFXMEDIA: err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); break; case SIOCGI2C: { struct ifi2creq i2c; err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (err != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { err = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { err = EINVAL; break; } if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) err = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } case SIOCSIFCAP: { int mask, setmask, oldmask; oldmask = if_getcapenable(ifp); mask = ifr->ifr_reqcap ^ oldmask; mask &= ctx->ifc_softc_ctx.isc_capabilities; setmask = 0; #ifdef TCP_OFFLOAD setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); #endif setmask |= (mask & IFCAP_FLAGS); setmask |= (mask & IFCAP_WOL); /* * If any RX csum has changed, change all the ones that * are supported by the driver. */ if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { setmask |= ctx->ifc_softc_ctx.isc_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); } /* * want to ensure that traffic has stopped before we change any of the flags */ if (setmask) { CTX_LOCK(ctx); bits = if_getdrvflags(ifp); if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) iflib_stop(ctx); STATE_LOCK(ctx); if_togglecapenable(ifp, setmask); STATE_UNLOCK(ctx); if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) iflib_init_locked(ctx); STATE_LOCK(ctx); if_setdrvflags(ifp, bits); STATE_UNLOCK(ctx); CTX_UNLOCK(ctx); } if_vlancap(ifp); break; } case SIOCGPRIVATE_0: case SIOCSDRVSPEC: case SIOCGDRVSPEC: CTX_LOCK(ctx); err = IFDI_PRIV_IOCTL(ctx, command, data); CTX_UNLOCK(ctx); break; default: err = ether_ioctl(ifp, command, data); break; } if (reinit) iflib_if_init(ctx); return (err); } static uint64_t iflib_if_get_counter(if_t ifp, ift_counter cnt) { if_ctx_t ctx = if_getsoftc(ifp); return (IFDI_GET_COUNTER(ctx, cnt)); } /********************************************************************* * * OTHER FUNCTIONS EXPORTED TO THE STACK * **********************************************************************/ static void iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; if (iflib_in_detach(ctx)) return; CTX_LOCK(ctx); /* Driver may need all untagged packets to be flushed */ if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) iflib_stop(ctx); IFDI_VLAN_REGISTER(ctx, vtag); /* Re-init to load the changes, if required */ if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) iflib_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); /* Driver may need all tagged packets to be flushed */ if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) iflib_stop(ctx); IFDI_VLAN_UNREGISTER(ctx, vtag); /* Re-init to load the changes, if required */ if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) iflib_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_led_func(void *arg, int onoff) { if_ctx_t ctx = arg; CTX_LOCK(ctx); IFDI_LED_FUNC(ctx, onoff); CTX_UNLOCK(ctx); } /********************************************************************* * * BUS FUNCTION DEFINITIONS * **********************************************************************/ int iflib_device_probe(device_t dev) { pci_vendor_info_t *ent; uint16_t pci_vendor_id, pci_device_id; uint16_t pci_subvendor_id, pci_subdevice_id; uint16_t pci_rev_id; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); pci_rev_id = pci_get_revid(dev); if (sctx->isc_parse_devinfo != NULL) sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); ent = sctx->isc_vendor_info; while (ent->pvi_vendor_id != 0) { if (pci_vendor_id != ent->pvi_vendor_id) { ent++; continue; } if ((pci_device_id == ent->pvi_device_id) && ((pci_subvendor_id == ent->pvi_subvendor_id) || (ent->pvi_subvendor_id == 0)) && ((pci_subdevice_id == ent->pvi_subdevice_id) || (ent->pvi_subdevice_id == 0)) && ((pci_rev_id == ent->pvi_rev_id) || (ent->pvi_rev_id == 0))) { device_set_desc_copy(dev, ent->pvi_name); /* this needs to be changed to zero if the bus probing code * ever stops re-probing on best match because the sctx * may have its values over written by register calls * in subsequent probes */ return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } int iflib_device_probe_vendor(device_t dev) { int probe; probe = iflib_device_probe(dev); if (probe == BUS_PROBE_DEFAULT) return (BUS_PROBE_VENDOR); else return (probe); } static void iflib_reset_qvalues(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; int i; scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES; scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH; if (ctx->ifc_sysctl_ntxqs != 0) scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; if (ctx->ifc_sysctl_nrxqs != 0) scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; for (i = 0; i < sctx->isc_ntxqs; i++) { if (ctx->ifc_sysctl_ntxds[i] != 0) scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; else scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (ctx->ifc_sysctl_nrxds[i] != 0) scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; else scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; } if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; } if (!powerof2(scctx->isc_nrxd[i])) { device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; } if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; } if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } } } static uint16_t get_ctx_core_offset(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; struct cpu_offset *op; uint16_t qc; uint16_t ret = ctx->ifc_sysctl_core_offset; if (ret != CORE_OFFSET_UNSPECIFIED) return (ret); if (ctx->ifc_sysctl_separate_txrx) qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets; else qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets); mtx_lock(&cpu_offset_mtx); SLIST_FOREACH(op, &cpu_offsets, entries) { if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { ret = op->offset; op->offset += qc; MPASS(op->refcount < UINT_MAX); op->refcount++; break; } } if (ret == CORE_OFFSET_UNSPECIFIED) { ret = 0; op = malloc(sizeof(struct cpu_offset), M_IFLIB, M_NOWAIT | M_ZERO); if (op == NULL) { device_printf(ctx->ifc_dev, "allocation for cpu offset failed.\n"); } else { op->offset = qc; op->refcount = 1; CPU_COPY(&ctx->ifc_cpus, &op->set); SLIST_INSERT_HEAD(&cpu_offsets, op, entries); } } mtx_unlock(&cpu_offset_mtx); return (ret); } static void unref_ctx_core_offset(if_ctx_t ctx) { struct cpu_offset *op, *top; mtx_lock(&cpu_offset_mtx); SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) { if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { MPASS(op->refcount > 0); op->refcount--; if (op->refcount == 0) { SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries); free(op, M_IFLIB); } break; } } mtx_unlock(&cpu_offset_mtx); } int iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) { if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; kobjop_desc_t kobj_desc; kobj_method_t *kobj_method; int err, msix, rid; int num_txd, num_rxd; ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); if (sc == NULL) { sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); device_set_softc(dev, ctx); ctx->ifc_flags |= IFC_SC_ALLOCATED; } ctx->ifc_sctx = sctx; ctx->ifc_dev = dev; ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "iflib_register failed %d\n", err); goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; iflib_reset_qvalues(ctx); CTX_LOCK(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); goto fail_unlock; } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; #ifdef INVARIANTS if (scctx->isc_capabilities & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; num_txd = iflib_num_tx_descs(ctx); num_rxd = iflib_num_rx_descs(ctx); /* XXX change for per-queue sizes */ device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", num_txd, num_rxd); if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, num_txd / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > num_txd / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, num_txd / MAX_SINGLE_PACKET_FRACTION); /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ if (if_getcapabilities(ifp) & IFCAP_TSO) { /* * The stack can't handle a TSO size larger than IP_MAXPACKET, * but some MACs do. */ if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, IP_MAXPACKET)); /* * Take maximum number of m_pullup(9)'s in iflib_parse_header() * into account. In the worst case, each of these calls will * add another mbuf and, thus, the requirement for another DMA * segment. So for best performance, it doesn't make sense to * advertize a maximum of TSO segments that typically will * require defragmentation in iflib_encap(). */ if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); } if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* Set up cpu set. If it fails, use the set of all CPUs. */ if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { device_printf(dev, "Unable to fetch CPU list\n"); CPU_COPY(&all_cpus, &ctx->ifc_cpus); } MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); /* ** Now set up MSI or MSI-X, should return us the number of supported ** vectors (will be 1 for a legacy interrupt and MSI). */ if (sctx->isc_flags & IFLIB_SKIP_MSIX) { msix = scctx->isc_vectors; } else if (scctx->isc_msix_bar != 0) /* * The simple fact that isc_msix_bar is not 0 does not mean we * we have a good value there that is known to work. */ msix = iflib_msix_init(ctx); else { scctx->isc_vectors = 1; scctx->isc_ntxqsets = 1; scctx->isc_nrxqsets = 1; scctx->isc_intr = IFLIB_INTR_LEGACY; msix = 0; } /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail_intr_free; } if ((err = iflib_qset_structures_setup(ctx))) goto fail_queues; /* * Now that we know how many queues there are, get the core offset. */ ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx); /* * Group taskqueues aren't properly set up until SMP is started, * so we disable interrupts until we can handle them post * SI_SUB_SMP. * * XXX: disabling interrupts doesn't actually work, at least for * the non-MSI case. When they occur before SI_SUB_SMP completes, * we do null handling and depend on this not causing too large an * interrupt storm. */ IFDI_INTR_DISABLE(ctx); if (msix > 1) { /* * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable * aren't the default NULL implementation. */ kobj_desc = &ifdi_rx_queue_intr_enable_desc; kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) { device_printf(dev, "MSI-X requires ifdi_rx_queue_intr_enable method"); err = EOPNOTSUPP; goto fail_queues; } kobj_desc = &ifdi_tx_queue_intr_enable_desc; kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) { device_printf(dev, "MSI-X requires ifdi_tx_queue_intr_enable method"); err = EOPNOTSUPP; goto fail_queues; } /* * Assign the MSI-X vectors. * Note that the default NULL ifdi_msix_intr_assign method will * fail here, too. */ err = IFDI_MSIX_INTR_ASSIGN(ctx, msix); if (err != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); goto fail_queues; } } else if (scctx->isc_intr != IFLIB_INTR_MSIX) { rid = 0; if (scctx->isc_intr == IFLIB_INTR_MSI) { MPASS(msix == 1); rid = 1; } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); goto fail_queues; } } else { device_printf(dev, "Cannot use iflib with only 1 MSI-X interrupt!\n"); err = ENODEV; goto fail_intr_free; } ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if ((err = iflib_netmap_attach(ctx))) { device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); goto fail_detach; } *ctxp = ctx; NETDUMP_SET(ctx->ifc_ifp, iflib); if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; CTX_UNLOCK(ctx); return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: iflib_free_intr_mem(ctx); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); iflib_tqg_detach(ctx); IFDI_DETACH(ctx); fail_unlock: CTX_UNLOCK(ctx); iflib_deregister(ctx); fail_ctx_free: device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (err); } int iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, struct iflib_cloneattach_ctx *clctx) { int num_txd, num_rxd; int err; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; void *sc; ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); ctx->ifc_flags |= IFC_SC_ALLOCATED; if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) ctx->ifc_flags |= IFC_PSEUDO; ctx->ifc_sctx = sctx; ctx->ifc_softc = sc; ctx->ifc_dev = dev; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; iflib_reset_qvalues(ctx); CTX_LOCK(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); goto fail_unlock; } if (sctx->isc_flags & IFLIB_GEN_MAC) iflib_gen_mac(ctx); if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, clctx->cc_params)) != 0) { device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); goto fail_unlock; } #ifdef INVARIANTS if (scctx->isc_capabilities & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); ifp->if_flags |= IFF_NOGROUP; if (sctx->isc_flags & IFLIB_PSEUDO) { ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO); if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) { ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); } else { if_attach(ctx->ifc_ifp); bpfattach(ctx->ifc_ifp, DLT_NULL, sizeof(u_int32_t)); } if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } *ctxp = ctx; /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; CTX_UNLOCK(ctx); return (0); } ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO); _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; num_txd = iflib_num_tx_descs(ctx); num_rxd = iflib_num_rx_descs(ctx); /* XXX change for per-queue sizes */ device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", num_txd, num_rxd); if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, num_txd / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > num_txd / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, num_txd / MAX_SINGLE_PACKET_FRACTION); /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ if (if_getcapabilities(ifp) & IFCAP_TSO) { /* * The stack can't handle a TSO size larger than IP_MAXPACKET, * but some MACs do. */ if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, IP_MAXPACKET)); /* * Take maximum number of m_pullup(9)'s in iflib_parse_header() * into account. In the worst case, each of these calls will * add another mbuf and, thus, the requirement for another DMA * segment. So for best performance, it doesn't make sense to * advertize a maximum of TSO segments that typically will * require defragmentation in iflib_encap(). */ if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); } if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* XXX --- can support > 1 -- but keep it simple for now */ scctx->isc_intr = IFLIB_INTR_LEGACY; /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail_iflib_detach; } if ((err = iflib_qset_structures_setup(ctx))) { device_printf(dev, "qset structure setup failed %d\n", err); goto fail_queues; } /* * XXX What if anything do we want to do about interrupts? */ ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); /* XXX handle more than one queue */ for (i = 0; i < scctx->isc_nrxqsets; i++) IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); *ctxp = ctx; if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; CTX_UNLOCK(ctx); return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); iflib_tqg_detach(ctx); fail_iflib_detach: IFDI_DETACH(ctx); fail_unlock: CTX_UNLOCK(ctx); iflib_deregister(ctx); fail_ctx_free: free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (err); } int iflib_pseudo_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; if_shared_ctx_t sctx = ctx->ifc_sctx; /* Unregister VLAN event handlers early */ iflib_unregister_vlan_handlers(ctx); if ((sctx->isc_flags & IFLIB_PSEUDO) && (sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) { bpfdetach(ifp); if_detach(ifp); } else { ether_ifdetach(ifp); } iflib_tqg_detach(ctx); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); iflib_deregister(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (0); } int iflib_device_attach(device_t dev) { if_ctx_t ctx; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_enable_busmaster(dev); return (iflib_device_register(dev, NULL, sctx, &ctx)); } int iflib_device_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; device_t dev = ctx->ifc_dev; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev, "Vlan in use, detach first\n"); return (EBUSY); } #ifdef PCI_IOV if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (EBUSY); } #endif STATE_LOCK(ctx); ctx->ifc_flags |= IFC_IN_DETACH; STATE_UNLOCK(ctx); /* Unregister VLAN handlers before calling iflib_stop() */ iflib_unregister_vlan_handlers(ctx); iflib_netmap_detach(ifp); ether_ifdetach(ifp); CTX_LOCK(ctx); iflib_stop(ctx); CTX_UNLOCK(ctx); if (ctx->ifc_led_dev != NULL) led_destroy(ctx->ifc_led_dev); iflib_tqg_detach(ctx); CTX_LOCK(ctx); IFDI_DETACH(ctx); CTX_UNLOCK(ctx); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ iflib_free_intr_mem(ctx); bus_generic_detach(dev); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); iflib_deregister(ctx); device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); unref_ctx_core_offset(ctx); free(ctx, M_IFLIB); return (0); } static void iflib_tqg_detach(if_ctx_t ctx) { iflib_txq_t txq; iflib_rxq_t rxq; int i; struct taskqgroup *tqg; /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); #ifdef DEV_NETMAP callout_drain(&txq->ift_netmap_timer); #endif /* DEV_NETMAP */ if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); } static void iflib_free_intr_mem(if_ctx_t ctx) { if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { iflib_irq_free(ctx, &ctx->ifc_legacy_irq); } if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { pci_release_msi(ctx->ifc_dev); } if (ctx->ifc_msix_mem != NULL) { bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } } int iflib_device_detach(device_t dev) { if_ctx_t ctx = device_get_softc(dev); return (iflib_device_deregister(ctx)); } int iflib_device_suspend(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SUSPEND(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_shutdown(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SHUTDOWN(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_resume(device_t dev) { if_ctx_t ctx = device_get_softc(dev); iflib_txq_t txq = ctx->ifc_txqs; CTX_LOCK(ctx); IFDI_RESUME(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); for (int i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); return (bus_generic_resume(dev)); } int iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_INIT(ctx, num_vfs, params); CTX_UNLOCK(ctx); return (error); } void iflib_device_iov_uninit(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_IOV_UNINIT(ctx); CTX_UNLOCK(ctx); } int iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_VF_ADD(ctx, vfnum, params); CTX_UNLOCK(ctx); return (error); } /********************************************************************* * * MODULE FUNCTION DEFINITIONS * **********************************************************************/ /* * - Start a fast taskqueue thread for each core * - Start a taskqueue for control operations */ static int iflib_module_init(void) { return (0); } static int iflib_module_event_handler(module_t mod, int what, void *arg) { int err; switch (what) { case MOD_LOAD: if ((err = iflib_module_init()) != 0) return (err); break; case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } return (0); } /********************************************************************* * * PUBLIC FUNCTION DEFINITIONS * ordered as in iflib.h * **********************************************************************/ static void _iflib_assert(if_shared_ctx_t sctx) { int i; MPASS(sctx->isc_tx_maxsize); MPASS(sctx->isc_tx_maxsegsize); MPASS(sctx->isc_rx_maxsize); MPASS(sctx->isc_rx_nsegments); MPASS(sctx->isc_rx_maxsegsize); MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8); for (i = 0; i < sctx->isc_nrxqs; i++) { MPASS(sctx->isc_nrxd_min[i]); MPASS(powerof2(sctx->isc_nrxd_min[i])); MPASS(sctx->isc_nrxd_max[i]); MPASS(powerof2(sctx->isc_nrxd_max[i])); MPASS(sctx->isc_nrxd_default[i]); MPASS(powerof2(sctx->isc_nrxd_default[i])); } MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8); for (i = 0; i < sctx->isc_ntxqs; i++) { MPASS(sctx->isc_ntxd_min[i]); MPASS(powerof2(sctx->isc_ntxd_min[i])); MPASS(sctx->isc_ntxd_max[i]); MPASS(powerof2(sctx->isc_ntxd_max[i])); MPASS(sctx->isc_ntxd_default[i]); MPASS(powerof2(sctx->isc_ntxd_default[i])); } } static void _iflib_pre_assert(if_softc_ctx_t scctx) { MPASS(scctx->isc_txrx->ift_txd_encap); MPASS(scctx->isc_txrx->ift_txd_flush); MPASS(scctx->isc_txrx->ift_txd_credits_update); MPASS(scctx->isc_txrx->ift_rxd_available); MPASS(scctx->isc_txrx->ift_rxd_pkt_get); MPASS(scctx->isc_txrx->ift_rxd_refill); MPASS(scctx->isc_txrx->ift_rxd_flush); } static int iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; driver_t *driver = sctx->isc_driver; device_t dev = ctx->ifc_dev; if_t ifp; u_char type; int iflags; if ((sctx->isc_flags & IFLIB_PSEUDO) == 0) _iflib_assert(sctx); CTX_LOCK_INIT(ctx); STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); if (sctx->isc_flags & IFLIB_PSEUDO) { if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) type = IFT_ETHER; else type = IFT_PPP; } else type = IFT_ETHER; ifp = ctx->ifc_ifp = if_alloc(type); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (ENOMEM); } /* * Initialize our context's device specific methods */ kobj_init((kobj_t) ctx, (kobj_class_t) driver); kobj_class_compile((kobj_class_t) driver); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, ctx); if_setdev(ifp, dev); if_setinitfn(ifp, iflib_if_init); if_setioctlfn(ifp, iflib_if_ioctl); #ifdef ALTQ if_setstartfn(ifp, iflib_altq_if_start); if_settransmitfn(ifp, iflib_altq_if_transmit); if_setsendqready(ifp); #else if_settransmitfn(ifp, iflib_if_transmit); #endif if_setqflushfn(ifp, iflib_if_qflush); iflags = IFF_MULTICAST; if ((sctx->isc_flags & IFLIB_PSEUDO) && (sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) iflags |= IFF_POINTOPOINT; else iflags |= IFF_BROADCAST | IFF_SIMPLEX; if_setflags(ifp, iflags); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, EVENTHANDLER_PRI_FIRST); ctx->ifc_vlan_detach_event = EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, EVENTHANDLER_PRI_FIRST); ifmedia_init(&ctx->ifc_media, IFM_IMASK, iflib_media_change, iflib_media_status); return (0); } static void iflib_unregister_vlan_handlers(if_ctx_t ctx) { /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); ctx->ifc_vlan_attach_event = NULL; } if (ctx->ifc_vlan_detach_event != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); ctx->ifc_vlan_detach_event = NULL; } } static void iflib_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; /* Remove all media */ ifmedia_removeall(&ctx->ifc_media); /* Ensure that VLAN event handlers are unregistered */ iflib_unregister_vlan_handlers(ctx); /* Release kobject reference */ kobj_delete((kobj_t) ctx, NULL); /* Free the ifnet structure */ if_free(ifp); STATE_LOCK_DESTROY(ctx); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); } static int iflib_queues_alloc(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int nrxqsets = scctx->isc_nrxqsets; int ntxqsets = scctx->isc_ntxqsets; iflib_txq_t txq; iflib_rxq_t rxq; iflib_fl_t fl = NULL; int i, j, cpu, err, txconf, rxconf; iflib_dma_info_t ifdip; uint32_t *rxqsizes = scctx->isc_rxqsizes; uint32_t *txqsizes = scctx->isc_txqsizes; uint8_t nrxqs = sctx->isc_nrxqs; uint8_t ntxqs = sctx->isc_ntxqs; int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; caddr_t *vaddrs; uint64_t *paddrs; KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); /* Allocate the TX ring struct memory */ if (!(ctx->ifc_txqs = (iflib_txq_t) malloc(sizeof(struct iflib_txq) * ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); err = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(ctx->ifc_rxqs = (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); err = ENOMEM; goto rx_fail; } txq = ctx->ifc_txqs; rxq = ctx->ifc_rxqs; /* * XXX handle allocation failure */ for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TX DMA info memory\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_ifdi = ifdip; for (j = 0; j < ntxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) { device_printf(dev, "Unable to allocate TX descriptors\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_txd_size[j] = scctx->isc_txd_size[j]; bzero((void *)ifdip->idi_vaddr, txqsizes[j]); } txq->ift_ctx = ctx; txq->ift_id = i; if (sctx->isc_flags & IFLIB_HAS_TXCQ) { txq->ift_br_offset = 1; } else { txq->ift_br_offset = 0; } if (iflib_txsd_alloc(txq)) { device_printf(dev, "Critical Failure setting up TX buffers\n"); err = ENOMEM; goto err_tx_desc; } /* Initialize the TX lock */ snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout", device_get_nameunit(dev), txq->ift_id); mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); txq->ift_timer.c_cpu = cpu; #ifdef DEV_NETMAP callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0); txq->ift_netmap_timer.c_cpu = cpu; #endif /* DEV_NETMAP */ err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, iflib_txq_can_drain, M_IFLIB, M_WAITOK); if (err) { /* XXX free any allocated rings */ device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ callout_init(&rxq->ifr_watchdog, 1); if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate RX DMA info memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_ifdi = ifdip; /* XXX this needs to be changed if #rx queues != #tx queues */ rxq->ifr_ntxqirq = 1; rxq->ifr_txqid[0] = i; for (j = 0; j < nrxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) { device_printf(dev, "Unable to allocate RX descriptors\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); } rxq->ifr_ctx = ctx; rxq->ifr_id = i; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { rxq->ifr_fl_offset = 1; } else { rxq->ifr_fl_offset = 0; } rxq->ifr_nfl = nfree_lists; if (!(fl = (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate free list memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_fl = fl; for (j = 0; j < nfree_lists; j++) { fl[j].ifl_rxq = rxq; fl[j].ifl_id = j; fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; } /* Allocate receive buffers for the ring */ if (iflib_rxsd_alloc(rxq)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); err = ENOMEM; goto err_rx_desc; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK); } /* TXQs */ vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); for (i = 0; i < ntxqsets; i++) { iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; for (j = 0; j < ntxqs; j++, di++) { vaddrs[i*ntxqs + j] = di->idi_vaddr; paddrs[i*ntxqs + j] = di->idi_paddr; } } if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { device_printf(ctx->ifc_dev, "Unable to allocate device TX queue\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); /* RXQs */ vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); for (i = 0; i < nrxqsets; i++) { iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; for (j = 0; j < nrxqs; j++, di++) { vaddrs[i*nrxqs + j] = di->idi_vaddr; paddrs[i*nrxqs + j] = di->idi_paddr; } } if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { device_printf(ctx->ifc_dev, "Unable to allocate device RX queue\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); return (0); /* XXX handle allocation failure changes */ err_rx_desc: err_tx_desc: rx_fail: if (ctx->ifc_rxqs != NULL) free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; if (ctx->ifc_txqs != NULL) free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; fail: return (err); } static int iflib_tx_structures_setup(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i; for (i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_setup(txq); return (0); } static void iflib_tx_structures_free(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; if_shared_ctx_t sctx = ctx->ifc_sctx; int i, j; for (i = 0; i < NTXQSETS(ctx); i++, txq++) { for (j = 0; j < sctx->isc_ntxqs; j++) iflib_dma_free(&txq->ift_ifdi[j]); iflib_txq_destroy(txq); } free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; IFDI_QUEUES_FREE(ctx); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int iflib_rx_structures_setup(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; int q; #if defined(INET6) || defined(INET) int err, i; #endif for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { #if defined(INET6) || defined(INET) if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) { err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, TCP_LRO_ENTRIES, min(1024, ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset])); if (err != 0) { device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); goto fail; } } #endif IFDI_RXQ_SETUP(ctx, rxq->ifr_id); } return (0); #if defined(INET6) || defined(INET) fail: /* * Free LRO resources allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ rxq = ctx->ifc_rxqs; for (i = 0; i < q; ++i, rxq++) { if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) tcp_lro_free(&rxq->ifr_lc); } return (err); #endif } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void iflib_rx_structures_free(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; if_shared_ctx_t sctx = ctx->ifc_sctx; int i, j; for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { for (j = 0; j < sctx->isc_nrxqs; j++) iflib_dma_free(&rxq->ifr_ifdi[j]); iflib_rx_sds_free(rxq); #if defined(INET6) || defined(INET) if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) tcp_lro_free(&rxq->ifr_lc); #endif } free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; } static int iflib_qset_structures_setup(if_ctx_t ctx) { int err; /* * It is expected that the caller takes care of freeing queues if this * fails. */ if ((err = iflib_tx_structures_setup(ctx)) != 0) { device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); return (err); } if ((err = iflib_rx_structures_setup(ctx)) != 0) device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); return (err); } int iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) { return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); } #ifdef SMP static int find_nth(if_ctx_t ctx, int qid) { cpuset_t cpus; int i, cpuid, eqid, count; CPU_COPY(&ctx->ifc_cpus, &cpus); count = CPU_COUNT(&cpus); eqid = qid % count; /* clear up to the qid'th bit */ for (i = 0; i < eqid; i++) { cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); CPU_CLR(cpuid-1, &cpus); } cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); return (cpuid-1); } #ifdef SCHED_ULE extern struct cpu_group *cpu_top; /* CPU topology */ static int find_child_with_core(int cpu, struct cpu_group *grp) { int i; if (grp->cg_children == 0) return -1; MPASS(grp->cg_child); for (i = 0; i < grp->cg_children; i++) { if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) return i; } return -1; } /* * Find the nth "close" core to the specified core * "close" is defined as the deepest level that shares * at least an L2 cache. With threads, this will be * threads on the same core. If the shared cache is L3 * or higher, simply returns the same core. */ static int find_close_core(int cpu, int core_offset) { struct cpu_group *grp; int i; int fcpu; cpuset_t cs; grp = cpu_top; if (grp == NULL) return cpu; i = 0; while ((i = find_child_with_core(cpu, grp)) != -1) { /* If the child only has one cpu, don't descend */ if (grp->cg_child[i].cg_count <= 1) break; grp = &grp->cg_child[i]; } /* If they don't share at least an L2 cache, use the same CPU */ if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) return cpu; /* Now pick one */ CPU_COPY(&grp->cg_mask, &cs); /* Add the selected CPU offset to core offset. */ for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { if (fcpu - 1 == cpu) break; CPU_CLR(fcpu - 1, &cs); } MPASS(fcpu); core_offset += i; CPU_COPY(&grp->cg_mask, &cs); for (i = core_offset % grp->cg_count; i > 0; i--) { MPASS(CPU_FFS(&cs)); CPU_CLR(CPU_FFS(&cs) - 1, &cs); } MPASS(CPU_FFS(&cs)); return CPU_FFS(&cs) - 1; } #else static int find_close_core(int cpu, int core_offset __unused) { return cpu; } #endif static int get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) { switch (type) { case IFLIB_INTR_TX: /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ /* XXX handle multiple RX threads per core and more than two core per L2 group */ return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; case IFLIB_INTR_RX: case IFLIB_INTR_RXTX: /* RX queues get the specified core */ return qid / CPU_COUNT(&ctx->ifc_cpus); default: return -1; } } #else #define get_core_offset(ctx, type, qid) CPU_FIRST() #define find_close_core(cpuid, tid) CPU_FIRST() #define find_nth(ctx, gid) CPU_FIRST() #endif /* Just to avoid copy/paste */ static inline int iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name) { device_t dev; int co, cpuid, err, tid; dev = ctx->ifc_dev; co = ctx->ifc_sysctl_core_offset; if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX) co += ctx->ifc_softc_ctx.isc_nrxqsets; cpuid = find_nth(ctx, qid + co); tid = get_core_offset(ctx, type, qid); if (tid < 0) { device_printf(dev, "get_core_offset failed\n"); return (EOPNOTSUPP); } cpuid = find_close_core(cpuid, tid); err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, rman_get_start(irq->ii_res), name); if (err) { device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err); return (err); } #ifdef notyet if (cpuid > ctx->ifc_cpuid_highest) ctx->ifc_cpuid_highest = cpuid; #endif return (0); } int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, const char *name) { device_t dev; struct grouptask *gtask; struct taskqgroup *tqg; iflib_filter_info_t info; gtask_fn_t *fn; int tqrid, err; driver_filter_t *intr_fast; void *q; info = &ctx->ifc_filter_info; tqrid = rid; switch (type) { /* XXX merge tx/rx for netmap? */ case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; info = &ctx->ifc_txqs[qid].ift_filter_info; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; tqrid = -1; info = &ctx->ifc_filter_info; gtask = &ctx->ifc_admin_task; tqg = qgroup_if_config_tqg; fn = _task_fn_admin; intr_fast = iflib_fast_intr_ctx; break; default: device_printf(ctx->ifc_dev, "%s: unknown net intr type\n", __func__); return (EINVAL); } info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = q; dev = ctx->ifc_dev; err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); if (err != 0) { device_printf(dev, "_iflib_irq_alloc failed %d\n", err); return (err); } if (type == IFLIB_INTR_ADMIN) return (0); if (tqrid != -1) { err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name); if (err) return (err); } else { taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); } return (0); } void iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) { struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; int err; switch (type) { case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; break; case IFLIB_INTR_IOV: q = ctx; gtask = &ctx->ifc_vflr_task; tqg = qgroup_if_config_tqg; fn = _task_fn_iov; break; default: panic("unknown net intr type"); } GROUPTASK_INIT(gtask, 0, fn, q); if (irq != NULL) { err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name); if (err) taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); } else { taskqgroup_attach(tqg, gtask, q, -1, name); } } void iflib_irq_free(if_ctx_t ctx, if_irq_t irq) { if (irq->ii_tag) bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); if (irq->ii_res) bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, rman_get_rid(irq->ii_res), irq->ii_res); } static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_irq_t irq = &ctx->ifc_legacy_irq; iflib_filter_info_t info; struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; int tqrid; void *q; int err; bool rx_only; q = &ctx->ifc_rxqs[0]; info = &rxq[0].ifr_filter_info; gtask = &rxq[0].ifr_task; tqg = qgroup_if_io_tqg; tqrid = irq->ii_rid = *rid; fn = _task_fn_rx; rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0; ctx->ifc_flags |= IFC_LEGACY; info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = rx_only ? ctx : q; /* We allocate a single interrupt resource */ err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx : iflib_fast_intr_rxtx, NULL, info, name); if (err != 0) return (err); GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); return (0); } void iflib_led_create(if_ctx_t ctx) { ctx->ifc_led_dev = led_create(iflib_led_func, ctx, device_get_nameunit(ctx->ifc_dev)); } void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) { GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) { GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); } void iflib_admin_intr_deferred(if_ctx_t ctx) { #ifdef INVARIANTS struct grouptask *gtask; gtask = &ctx->ifc_admin_task; MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); #endif GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); } void iflib_iov_intr_deferred(if_ctx_t ctx) { GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); } void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) { taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); } void iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, const char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); } void iflib_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_if_config_tqg, gtask); } void iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq = ctx->ifc_txqs; if_setbaudrate(ifp, baudrate); if (baudrate >= IF_Gbps(10)) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_PREFETCH; STATE_UNLOCK(ctx); } /* If link down, disable watchdog */ if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) txq->ift_qstatus = IFLIB_QUEUE_IDLE; } ctx->ifc_link_state = link_state; if_link_state_change(ifp, link_state); } static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) { int credits; #ifdef INVARIANTS int credits_pre = txq->ift_cidx_processed; #endif bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) return (0); txq->ift_processed += credits; txq->ift_cidx_processed += credits; MPASS(credits_pre + credits == txq->ift_cidx_processed); if (txq->ift_cidx_processed >= txq->ift_size) txq->ift_cidx_processed -= txq->ift_size; return (credits); } static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) { iflib_fl_t fl; u_int i; for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, budget)); } void iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, const char *description, if_int_delay_info_t info, int offset, int value) { info->iidi_ctx = ctx; info->iidi_offset = offset; info->iidi_value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, iflib_sysctl_int_delay, "I", description); } struct sx * iflib_ctx_lock_get(if_ctx_t ctx) { return (&ctx->ifc_ctx_sx); } static int iflib_msix_init(if_ctx_t ctx) { device_t dev = ctx->ifc_dev; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues; int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors; iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; if (bootverbose) device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); /* Override by tuneable */ if (scctx->isc_disable_msix) goto msi; /* First try MSI-X */ if ((msgs = pci_msix_count(dev)) == 0) { if (bootverbose) device_printf(dev, "MSI-X not supported or disabled\n"); goto msi; } bar = ctx->ifc_softc_ctx.isc_msix_bar; /* * bar == -1 => "trust me I know what I'm doing" * Some drivers are for hardware that is so shoddily * documented that no one knows which bars are which * so the developer has to map all bars. This hack * allows shoddy garbage to use MSI-X in this framework. */ if (bar != -1) { ctx->ifc_msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (ctx->ifc_msix_mem == NULL) { device_printf(dev, "Unable to map MSI-X table\n"); goto msi; } } admincnt = sctx->isc_admin_intrcnt; #if IFLIB_DEBUG /* use only 1 qset in debug mode */ queuemsgs = min(msgs - admincnt, 1); #else queuemsgs = msgs - admincnt; #endif #ifdef RSS queues = imin(queuemsgs, rss_getnumbuckets()); #else queues = queuemsgs; #endif queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); if (bootverbose) device_printf(dev, "intr CPUs: %d queue msgs: %d admincnt: %d\n", CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) rx_queues = iflib_num_rx_queues; else rx_queues = queues; if (rx_queues > scctx->isc_nrxqsets) rx_queues = scctx->isc_nrxqsets; /* * We want this to be all logical CPUs by default */ if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) tx_queues = iflib_num_tx_queues; else tx_queues = mp_ncpus; if (tx_queues > scctx->isc_ntxqsets) tx_queues = scctx->isc_ntxqsets; if (ctx->ifc_sysctl_qs_eq_override == 0) { #ifdef INVARIANTS if (tx_queues != rx_queues) device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", min(rx_queues, tx_queues), min(rx_queues, tx_queues)); #endif tx_queues = min(rx_queues, tx_queues); rx_queues = min(rx_queues, tx_queues); } vectors = rx_queues + admincnt; if (msgs < vectors) { device_printf(dev, "insufficient number of MSI-X vectors " "(supported %d, need %d)\n", msgs, vectors); goto msi; } device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues, tx_queues); msgs = vectors; if ((err = pci_alloc_msix(dev, &vectors)) == 0) { if (vectors != msgs) { device_printf(dev, "Unable to allocate sufficient MSI-X vectors " "(got %d, need %d)\n", vectors, msgs); pci_release_msi(dev); if (bar != -1) { bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } goto msi; } device_printf(dev, "Using MSI-X interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = rx_queues; scctx->isc_ntxqsets = tx_queues; scctx->isc_intr = IFLIB_INTR_MSIX; return (vectors); } else { device_printf(dev, "failed to allocate %d MSI-X vectors, err: %d\n", vectors, err); if (bar != -1) { bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } } msi: vectors = pci_msi_count(dev); scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; scctx->isc_vectors = vectors; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { device_printf(dev,"Using an MSI interrupt\n"); scctx->isc_intr = IFLIB_INTR_MSI; } else { scctx->isc_vectors = 1; device_printf(dev,"Using a Legacy interrupt\n"); scctx->isc_intr = IFLIB_INTR_LEGACY; } return (vectors); } static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; static int mp_ring_state_handler(SYSCTL_HANDLER_ARGS) { int rc; uint16_t *state = ((uint16_t *)oidp->oid_arg1); struct sbuf *sb; const char *ring_state = "UNKNOWN"; /* XXX needed ? */ rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); if (state[3] <= 3) ring_state = ring_states[state[3]]; sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", state[0], state[1], state[2], ring_state); rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } enum iflib_ndesc_handler { IFLIB_NTXD_HANDLER, IFLIB_NRXD_HANDLER, }; static int mp_ndesc_handler(SYSCTL_HANDLER_ARGS) { if_ctx_t ctx = (void *)arg1; enum iflib_ndesc_handler type = arg2; char buf[256] = {0}; qidx_t *ndesc; char *p, *next; int nqs, rc, i; nqs = 8; switch(type) { case IFLIB_NTXD_HANDLER: ndesc = ctx->ifc_sysctl_ntxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_ntxqs; break; case IFLIB_NRXD_HANDLER: ndesc = ctx->ifc_sysctl_nrxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_nrxqs; break; default: printf("%s: unhandled type\n", __func__); return (EINVAL); } if (nqs == 0) nqs = 8; for (i=0; i<8; i++) { if (i >= nqs) break; if (i) strcat(buf, ","); sprintf(strchr(buf, 0), "%d", ndesc[i]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; i++, p = strsep(&next, " ,")) { ndesc[i] = strtoul(p, NULL, 10); } return(rc); } #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child, *oid_list; struct sysctl_ctx_list *ctx_list; struct sysctl_oid *node; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", CTLFLAG_RD, NULL, "IFLIB fields"); oid_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, "# of txqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, "# of rxqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, "permit #txq != #rxq"); SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, "disable MSI-X (default 0)"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, "cause TX to abdicate instead of running to completion"); ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED; SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset", CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0, "offset to start using cores at"); SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx", CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0, "use separate cores for TX and RX"); /* XXX change for per-queue sizes */ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", "list of # of TX descriptors to use, 0 = use default #"); SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", "list of # of RX descriptors to use, 0 = use default #"); } static void iflib_add_device_sysctl_post(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; char namebuf[NAME_BUFLEN]; char *qfmt; struct sysctl_oid *queue_node, *fl_node, *node; struct sysctl_oid_list *queue_list, *fl_list; ctx_list = device_get_sysctl_ctx(dev); node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) qfmt = "txq%02d"; else qfmt = "txq%d"; for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); #endif SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", CTLFLAG_RD, &txq->ift_mbuf_defrag, "# of times m_defrag was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", CTLFLAG_RD, &txq->ift_pullups, "# of times m_pullup was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, "# of times no descriptors were available"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, "# of times DMA map failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", CTLFLAG_RD, &txq->ift_processed, "descriptors procesed for clean"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, mp_ring_state_handler, "A", "soft ring state"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->ift_br->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->ift_br->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications, "# of consumer abdications in the mp_ring for this queue"); } if (scctx->isc_nrxqsets > 100) qfmt = "rxq%03d"; else if (scctx->isc_nrxqsets > 10) qfmt = "rxq%02d"; else qfmt = "rxq%d"; for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, "Consumer Index"); } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "freelist Name"); fl_list = SYSCTL_CHILDREN(fl_node); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", CTLFLAG_RD, &fl->ifl_credits, 1, "credits available"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size", CTLFLAG_RD, &fl->ifl_buf_size, 1, "buffer size"); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, "mbufs allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, "mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, "clusters allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, "clusters freed"); #endif } } } void iflib_request_reset(if_ctx_t ctx) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; STATE_UNLOCK(ctx); } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m) { struct mbuf *n; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; n = m; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; } return (n); } #endif #ifdef NETDUMP static void iflib_netdump_init(if_t ifp, int *nrxr, int *ncl, int *clsize) { if_ctx_t ctx; ctx = if_getsoftc(ifp); CTX_LOCK(ctx); *nrxr = NRXQSETS(ctx); *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; CTX_UNLOCK(ctx); } static void iflib_netdump_event(if_t ifp, enum netdump_ev event) { if_ctx_t ctx; if_softc_ctx_t scctx; iflib_fl_t fl; iflib_rxq_t rxq; int i, j; ctx = if_getsoftc(ifp); scctx = &ctx->ifc_softc_ctx; switch (event) { case NETDUMP_START: for (i = 0; i < scctx->isc_nrxqsets; i++) { rxq = &ctx->ifc_rxqs[i]; for (j = 0; j < rxq->ifr_nfl; j++) { fl = rxq->ifr_fl; fl->ifl_zone = m_getzone(fl->ifl_buf_size); } } iflib_no_tx_batch = 1; break; default: break; } } static int iflib_netdump_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx; iflib_txq_t txq; int error; ctx = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &ctx->ifc_txqs[0]; error = iflib_encap(txq, &m); if (error == 0) (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); return (error); } static int iflib_netdump_poll(if_t ifp, int count) { if_ctx_t ctx; if_softc_ctx_t scctx; iflib_txq_t txq; int i; ctx = if_getsoftc(ifp); scctx = &ctx->ifc_softc_ctx; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &ctx->ifc_txqs[0]; (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); for (i = 0; i < scctx->isc_nrxqsets; i++) (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); return (0); } #endif /* NETDUMP */