Index: head/sys/dev/sfxge/sfxge.h =================================================================== --- head/sys/dev/sfxge/sfxge.h (revision 310626) +++ head/sys/dev/sfxge/sfxge.h (revision 310627) @@ -1,475 +1,475 @@ /*- * Copyright (c) 2010-2016 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SFXGE_H #define _SFXGE_H #include #include #include #include #include #include #include #include #include #include #include #include "sfxge_ioc.h" /* * Debugging */ #if 0 #define DBGPRINT(dev, fmt, args...) \ device_printf(dev, "%s: " fmt "\n", __func__, ## args) #else #define DBGPRINT(dev, fmt, args...) #endif /* * Backward-compatibility */ #ifndef CACHE_LINE_SIZE /* This should be right on most machines the driver will be used on, and * we needn't care too much about wasting a few KB per interface. */ #define CACHE_LINE_SIZE 128 #endif #ifndef IFCAP_LINKSTATE #define IFCAP_LINKSTATE 0 #endif #ifndef IFCAP_VLAN_HWTSO #define IFCAP_VLAN_HWTSO 0 #endif #ifndef IFM_10G_T #define IFM_10G_T IFM_UNKNOWN #endif #ifndef IFM_10G_KX4 #define IFM_10G_KX4 IFM_10G_CX4 #endif #ifndef IFM_40G_CR4 #define IFM_40G_CR4 IFM_UNKNOWN #endif #if (__FreeBSD_version >= 800501 && __FreeBSD_version < 900000) || \ __FreeBSD_version >= 900003 #define SFXGE_HAVE_DESCRIBE_INTR #endif #ifdef IFM_ETH_RXPAUSE #define SFXGE_HAVE_PAUSE_MEDIAOPTS #endif #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #endif #include "sfxge_rx.h" #include "sfxge_tx.h" #define ROUNDUP_POW_OF_TWO(_n) (1ULL << flsl((_n) - 1)) #define SFXGE_IP_ALIGN 2 #define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */ #define SFXGE_MAGIC_RESERVED 0x8000 #define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6 #define SFXGE_MAGIC_DMAQ_LABEL_MASK \ ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1) enum sfxge_sw_ev { SFXGE_SW_EV_RX_QFLUSH_DONE = 1, SFXGE_SW_EV_RX_QFLUSH_FAILED, SFXGE_SW_EV_RX_QREFILL, SFXGE_SW_EV_TX_QFLUSH_DONE, }; #define SFXGE_SW_EV_MAGIC(_sw_ev) \ (SFXGE_MAGIC_RESERVED | ((_sw_ev) << SFXGE_MAGIC_DMAQ_LABEL_WIDTH)) static inline uint16_t sfxge_sw_ev_mk_magic(enum sfxge_sw_ev sw_ev, unsigned int label) { KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label, ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label")); return SFXGE_SW_EV_MAGIC(sw_ev) | label; } static inline uint16_t sfxge_sw_ev_rxq_magic(enum sfxge_sw_ev sw_ev, struct sfxge_rxq *rxq) { return sfxge_sw_ev_mk_magic(sw_ev, 0); } static inline uint16_t sfxge_sw_ev_txq_magic(enum sfxge_sw_ev sw_ev, struct sfxge_txq *txq) { return sfxge_sw_ev_mk_magic(sw_ev, txq->type); } enum sfxge_evq_state { SFXGE_EVQ_UNINITIALIZED = 0, SFXGE_EVQ_INITIALIZED, SFXGE_EVQ_STARTING, SFXGE_EVQ_STARTED }; #define SFXGE_EV_BATCH 16384 struct sfxge_evq { /* Structure members below are sorted by usage order */ struct sfxge_softc *sc; struct mtx lock; unsigned int index; enum sfxge_evq_state init_state; efsys_mem_t mem; efx_evq_t *common; unsigned int read_ptr; boolean_t exception; unsigned int rx_done; unsigned int tx_done; /* Linked list of TX queues with completions to process */ struct sfxge_txq *txq; struct sfxge_txq **txqs; /* Structure members not used on event processing path */ unsigned int buf_base_id; unsigned int entries; char lock_name[SFXGE_LOCK_NAME_MAX]; } __aligned(CACHE_LINE_SIZE); #define SFXGE_NDESCS 1024 #define SFXGE_MODERATION 30 enum sfxge_intr_state { SFXGE_INTR_UNINITIALIZED = 0, SFXGE_INTR_INITIALIZED, SFXGE_INTR_TESTING, SFXGE_INTR_STARTED }; struct sfxge_intr_hdl { int eih_rid; void *eih_tag; struct resource *eih_res; }; struct sfxge_intr { enum sfxge_intr_state state; struct resource *msix_res; struct sfxge_intr_hdl *table; int n_alloc; int type; efsys_mem_t status; uint32_t zero_count; }; enum sfxge_mcdi_state { SFXGE_MCDI_UNINITIALIZED = 0, SFXGE_MCDI_INITIALIZED, SFXGE_MCDI_BUSY, SFXGE_MCDI_COMPLETED }; struct sfxge_mcdi { struct mtx lock; efsys_mem_t mem; enum sfxge_mcdi_state state; efx_mcdi_transport_t transport; /* Only used in debugging output */ char lock_name[SFXGE_LOCK_NAME_MAX]; }; struct sfxge_hw_stats { clock_t update_time; efsys_mem_t dma_buf; void *decode_buf; }; enum sfxge_port_state { SFXGE_PORT_UNINITIALIZED = 0, SFXGE_PORT_INITIALIZED, SFXGE_PORT_STARTED }; struct sfxge_port { struct sfxge_softc *sc; struct mtx lock; enum sfxge_port_state init_state; #ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS unsigned int wanted_fc; #endif struct sfxge_hw_stats phy_stats; struct sfxge_hw_stats mac_stats; efx_link_mode_t link_mode; uint8_t mcast_addrs[EFX_MAC_MULTICAST_LIST_MAX * EFX_MAC_ADDR_LEN]; unsigned int mcast_count; /* Only used in debugging output */ char lock_name[SFXGE_LOCK_NAME_MAX]; }; enum sfxge_softc_state { SFXGE_UNINITIALIZED = 0, SFXGE_INITIALIZED, SFXGE_REGISTERED, SFXGE_STARTED }; struct sfxge_softc { device_t dev; struct sx softc_lock; char softc_lock_name[SFXGE_LOCK_NAME_MAX]; enum sfxge_softc_state init_state; struct ifnet *ifnet; unsigned int if_flags; struct sysctl_oid *stats_node; struct sysctl_oid *txqs_node; struct task task_reset; efx_family_t family; caddr_t vpd_data; size_t vpd_size; efx_nic_t *enp; efsys_lock_t enp_lock; unsigned int rxq_entries; unsigned int txq_entries; bus_dma_tag_t parent_dma_tag; efsys_bar_t bar; struct sfxge_intr intr; struct sfxge_mcdi mcdi; struct sfxge_port port; uint32_t buffer_table_next; struct sfxge_evq *evq[SFXGE_RX_SCALE_MAX]; unsigned int ev_moderation; #if EFSYS_OPT_QSTATS clock_t ev_stats_update_time; uint64_t ev_stats[EV_NQSTATS]; #endif unsigned int max_rss_channels; uma_zone_t rxq_cache; struct sfxge_rxq *rxq[SFXGE_RX_SCALE_MAX]; - unsigned int rx_indir_table[SFXGE_RX_SCALE_MAX]; + unsigned int rx_indir_table[EFX_RSS_TBL_SIZE]; struct sfxge_txq *txq[SFXGE_TXQ_NTYPES + SFXGE_RX_SCALE_MAX]; struct ifmedia media; size_t rx_prefix_size; size_t rx_buffer_size; size_t rx_buffer_align; int rx_cluster_size; unsigned int evq_max; unsigned int evq_count; unsigned int rxq_count; unsigned int txq_count; unsigned int tso_fw_assisted; #define SFXGE_FATSOV1 (1 << 0) #define SFXGE_FATSOV2 (1 << 1) #if EFSYS_OPT_MCDI_LOGGING int mcdi_logging; #endif }; #define SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN) #define SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING) #define SFXGE_PARAM(_name) "hw.sfxge." #_name SYSCTL_DECL(_hw_sfxge); /* * From sfxge.c. */ extern void sfxge_schedule_reset(struct sfxge_softc *sc); extern void sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp); /* * From sfxge_dma.c. */ extern int sfxge_dma_init(struct sfxge_softc *sc); extern void sfxge_dma_fini(struct sfxge_softc *sc); extern int sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len, efsys_mem_t *esmp); extern void sfxge_dma_free(efsys_mem_t *esmp); extern int sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs); /* * From sfxge_ev.c. */ extern int sfxge_ev_init(struct sfxge_softc *sc); extern void sfxge_ev_fini(struct sfxge_softc *sc); extern int sfxge_ev_start(struct sfxge_softc *sc); extern void sfxge_ev_stop(struct sfxge_softc *sc); extern int sfxge_ev_qpoll(struct sfxge_evq *evq); /* * From sfxge_intr.c. */ extern int sfxge_intr_init(struct sfxge_softc *sc); extern void sfxge_intr_fini(struct sfxge_softc *sc); extern int sfxge_intr_start(struct sfxge_softc *sc); extern void sfxge_intr_stop(struct sfxge_softc *sc); /* * From sfxge_mcdi.c. */ extern int sfxge_mcdi_init(struct sfxge_softc *sc); extern void sfxge_mcdi_fini(struct sfxge_softc *sc); extern int sfxge_mcdi_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ip); /* * From sfxge_nvram.c. */ extern int sfxge_nvram_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ip); /* * From sfxge_port.c. */ extern int sfxge_port_init(struct sfxge_softc *sc); extern void sfxge_port_fini(struct sfxge_softc *sc); extern int sfxge_port_start(struct sfxge_softc *sc); extern void sfxge_port_stop(struct sfxge_softc *sc); extern void sfxge_mac_link_update(struct sfxge_softc *sc, efx_link_mode_t mode); extern int sfxge_mac_filter_set(struct sfxge_softc *sc); extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc); extern uint64_t sfxge_get_counter(struct ifnet *ifp, ift_counter c); #define SFXGE_MAX_MTU (9 * 1024) #define SFXGE_ADAPTER_LOCK_INIT(_sc, _ifname) \ do { \ struct sfxge_softc *__sc = (_sc); \ \ snprintf((__sc)->softc_lock_name, \ sizeof((__sc)->softc_lock_name), \ "%s:softc", (_ifname)); \ sx_init(&(__sc)->softc_lock, (__sc)->softc_lock_name); \ } while (B_FALSE) #define SFXGE_ADAPTER_LOCK_DESTROY(_sc) \ sx_destroy(&(_sc)->softc_lock) #define SFXGE_ADAPTER_LOCK(_sc) \ sx_xlock(&(_sc)->softc_lock) #define SFXGE_ADAPTER_UNLOCK(_sc) \ sx_xunlock(&(_sc)->softc_lock) #define SFXGE_ADAPTER_LOCK_ASSERT_OWNED(_sc) \ sx_assert(&(_sc)->softc_lock, LA_XLOCKED) #define SFXGE_PORT_LOCK_INIT(_port, _ifname) \ do { \ struct sfxge_port *__port = (_port); \ \ snprintf((__port)->lock_name, \ sizeof((__port)->lock_name), \ "%s:port", (_ifname)); \ mtx_init(&(__port)->lock, (__port)->lock_name, \ NULL, MTX_DEF); \ } while (B_FALSE) #define SFXGE_PORT_LOCK_DESTROY(_port) \ mtx_destroy(&(_port)->lock) #define SFXGE_PORT_LOCK(_port) \ mtx_lock(&(_port)->lock) #define SFXGE_PORT_UNLOCK(_port) \ mtx_unlock(&(_port)->lock) #define SFXGE_PORT_LOCK_ASSERT_OWNED(_port) \ mtx_assert(&(_port)->lock, MA_OWNED) #define SFXGE_MCDI_LOCK_INIT(_mcdi, _ifname) \ do { \ struct sfxge_mcdi *__mcdi = (_mcdi); \ \ snprintf((__mcdi)->lock_name, \ sizeof((__mcdi)->lock_name), \ "%s:mcdi", (_ifname)); \ mtx_init(&(__mcdi)->lock, (__mcdi)->lock_name, \ NULL, MTX_DEF); \ } while (B_FALSE) #define SFXGE_MCDI_LOCK_DESTROY(_mcdi) \ mtx_destroy(&(_mcdi)->lock) #define SFXGE_MCDI_LOCK(_mcdi) \ mtx_lock(&(_mcdi)->lock) #define SFXGE_MCDI_UNLOCK(_mcdi) \ mtx_unlock(&(_mcdi)->lock) #define SFXGE_MCDI_LOCK_ASSERT_OWNED(_mcdi) \ mtx_assert(&(_mcdi)->lock, MA_OWNED) #define SFXGE_EVQ_LOCK_INIT(_evq, _ifname, _evq_index) \ do { \ struct sfxge_evq *__evq = (_evq); \ \ snprintf((__evq)->lock_name, \ sizeof((__evq)->lock_name), \ "%s:evq%u", (_ifname), (_evq_index)); \ mtx_init(&(__evq)->lock, (__evq)->lock_name, \ NULL, MTX_DEF); \ } while (B_FALSE) #define SFXGE_EVQ_LOCK_DESTROY(_evq) \ mtx_destroy(&(_evq)->lock) #define SFXGE_EVQ_LOCK(_evq) \ mtx_lock(&(_evq)->lock) #define SFXGE_EVQ_UNLOCK(_evq) \ mtx_unlock(&(_evq)->lock) #define SFXGE_EVQ_LOCK_ASSERT_OWNED(_evq) \ mtx_assert(&(_evq)->lock, MA_OWNED) #endif /* _SFXGE_H */ Index: head/sys/dev/sfxge/sfxge_rx.c =================================================================== --- head/sys/dev/sfxge/sfxge_rx.c (revision 310626) +++ head/sys/dev/sfxge/sfxge_rx.c (revision 310627) @@ -1,1420 +1,1420 @@ /*- * Copyright (c) 2010-2016 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include "common/efx.h" #include "sfxge.h" #include "sfxge_rx.h" #define RX_REFILL_THRESHOLD(_entries) (EFX_RXQ_LIMIT(_entries) * 9 / 10) #ifdef SFXGE_LRO SYSCTL_NODE(_hw_sfxge, OID_AUTO, lro, CTLFLAG_RD, NULL, "Large receive offload (LRO) parameters"); #define SFXGE_LRO_PARAM(_param) SFXGE_PARAM(lro._param) /* Size of the LRO hash table. Must be a power of 2. A larger table * means we can accelerate a larger number of streams. */ static unsigned lro_table_size = 128; TUNABLE_INT(SFXGE_LRO_PARAM(table_size), &lro_table_size); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, table_size, CTLFLAG_RDTUN, &lro_table_size, 0, "Size of the LRO hash table (must be a power of 2)"); /* Maximum length of a hash chain. If chains get too long then the lookup * time increases and may exceed the benefit of LRO. */ static unsigned lro_chain_max = 20; TUNABLE_INT(SFXGE_LRO_PARAM(chain_max), &lro_chain_max); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, chain_max, CTLFLAG_RDTUN, &lro_chain_max, 0, "The maximum length of a hash chain"); /* Maximum time (in ticks) that a connection can be idle before it's LRO * state is discarded. */ static unsigned lro_idle_ticks; /* initialised in sfxge_rx_init() */ TUNABLE_INT(SFXGE_LRO_PARAM(idle_ticks), &lro_idle_ticks); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, idle_ticks, CTLFLAG_RDTUN, &lro_idle_ticks, 0, "The maximum time (in ticks) that a connection can be idle " "before it's LRO state is discarded"); /* Number of packets with payload that must arrive in-order before a * connection is eligible for LRO. The idea is we should avoid coalescing * segments when the sender is in slow-start because reducing the ACK rate * can damage performance. */ static int lro_slow_start_packets = 2000; TUNABLE_INT(SFXGE_LRO_PARAM(slow_start_packets), &lro_slow_start_packets); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, slow_start_packets, CTLFLAG_RDTUN, &lro_slow_start_packets, 0, "Number of packets with payload that must arrive in-order before " "a connection is eligible for LRO"); /* Number of packets with payload that must arrive in-order following loss * before a connection is eligible for LRO. The idea is we should avoid * coalescing segments when the sender is recovering from loss, because * reducing the ACK rate can damage performance. */ static int lro_loss_packets = 20; TUNABLE_INT(SFXGE_LRO_PARAM(loss_packets), &lro_loss_packets); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, loss_packets, CTLFLAG_RDTUN, &lro_loss_packets, 0, "Number of packets with payload that must arrive in-order " "following loss before a connection is eligible for LRO"); /* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */ #define SFXGE_LRO_L2_ID_VLAN 0x4000 #define SFXGE_LRO_L2_ID_IPV6 0x8000 #define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN) #define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6)) /* Compare IPv6 addresses, avoiding conditional branches */ static unsigned long ipv6_addr_cmp(const struct in6_addr *left, const struct in6_addr *right) { #if LONG_BIT == 64 const uint64_t *left64 = (const uint64_t *)left; const uint64_t *right64 = (const uint64_t *)right; return (left64[0] - right64[0]) | (left64[1] - right64[1]); #else return (left->s6_addr32[0] - right->s6_addr32[0]) | (left->s6_addr32[1] - right->s6_addr32[1]) | (left->s6_addr32[2] - right->s6_addr32[2]) | (left->s6_addr32[3] - right->s6_addr32[3]); #endif } #endif /* SFXGE_LRO */ void sfxge_rx_qflush_done(struct sfxge_rxq *rxq) { rxq->flush_state = SFXGE_FLUSH_DONE; } void sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) { rxq->flush_state = SFXGE_FLUSH_FAILED; } #ifdef RSS static uint8_t toep_key[RSS_KEYSIZE]; #else static uint8_t toep_key[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; #endif static void sfxge_rx_post_refill(void *arg) { struct sfxge_rxq *rxq = arg; struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; uint16_t magic; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); /* This is guaranteed due to the start/stop order of rx and ev */ KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq not started")); KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, ("rxq not started")); efx_ev_qpost(evq->common, magic); } static void sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) { /* Initially retry after 100 ms, but back off in case of * repeated failures as we probably have to wait for the * administrator to raise the pool limit. */ if (retrying) rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz); else rxq->refill_delay = hz / 10; callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay, sfxge_rx_post_refill, rxq); } #define SFXGE_REFILL_BATCH 64 static void sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) { struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; unsigned int batch; unsigned int rxfill; unsigned int mblksize; int ntodo; efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; prefetch_read_many(sc->enp); prefetch_read_many(rxq->common); SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; rxfill = rxq->added - rxq->completed; KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), ("rxfill > EFX_RXQ_LIMIT(rxq->entries)")); ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), ("ntodo > EFX_RQX_LIMIT(rxq->entries)")); if (ntodo == 0) return; batch = 0; mblksize = sc->rx_buffer_size - sc->rx_buffer_align; while (ntodo-- > 0) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; bus_dma_segment_t seg; struct mbuf *m; id = (rxq->added + batch) & rxq->ptr_mask; rx_desc = &rxq->queue[id]; KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); rx_desc->flags = EFX_DISCARD; m = rx_desc->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_cluster_size); if (m == NULL) break; /* m_len specifies length of area to be mapped for DMA */ m->m_len = mblksize; m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE); m->m_data += sc->rx_buffer_align; sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); addr[batch++] = seg.ds_addr; if (batch == SFXGE_REFILL_BATCH) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; batch = 0; } } if (ntodo != 0) sfxge_rx_schedule_refill(rxq, retrying); if (batch != 0) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; } /* Make the descriptors visible to the hardware */ bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, BUS_DMASYNC_PREWRITE); efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); /* The queue could still be empty if no descriptors were actually * pushed, in which case there will be no event to cause the next * refill, so we must schedule a refill ourselves. */ if(rxq->pushed == rxq->completed) { sfxge_rx_schedule_refill(rxq, retrying); } } void sfxge_rx_qrefill(struct sfxge_rxq *rxq) { if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; /* Make sure the queue is full */ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE); } static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m) { struct ifnet *ifp = sc->ifnet; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_data = 0xffff; ifp->if_input(ifp, m); } static void sfxge_rx_deliver(struct sfxge_softc *sc, struct sfxge_rx_sw_desc *rx_desc) { struct mbuf *m = rx_desc->mbuf; int flags = rx_desc->flags; int csum_flags; /* Convert checksum flags */ csum_flags = (flags & EFX_CKSUM_IPV4) ? (CSUM_IP_CHECKED | CSUM_IP_VALID) : 0; if (flags & EFX_CKSUM_TCPUDP) csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { m->m_pkthdr.flowid = efx_psuedo_hdr_hash_get(sc->enp, EFX_RX_HASHALG_TOEPLITZ, mtod(m, uint8_t *)); /* The hash covers a 4-tuple for TCP only */ M_HASHTYPE_SET(m, (flags & EFX_PKT_IPV4) ? ((flags & EFX_PKT_TCP) ? M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_IPV4) : ((flags & EFX_PKT_TCP) ? M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_IPV6)); } m->m_data += sc->rx_prefix_size; m->m_len = rx_desc->size - sc->rx_prefix_size; m->m_pkthdr.len = m->m_len; m->m_pkthdr.csum_flags = csum_flags; __sfxge_rx_deliver(sc, rx_desc->mbuf); rx_desc->flags = EFX_DISCARD; rx_desc->mbuf = NULL; } #ifdef SFXGE_LRO static void sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c) { struct sfxge_softc *sc = st->sc; struct mbuf *m = c->mbuf; struct tcphdr *c_th; int csum_flags; KASSERT(m, ("no mbuf to deliver")); ++st->n_bursts; /* Finish off packet munging and recalculate IP header checksum. */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->nh; iph->ip_len = htons(iph->ip_len); iph->ip_sum = 0; iph->ip_sum = in_cksum_hdr(iph); c_th = (struct tcphdr *)(iph + 1); csum_flags = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID); } else { struct ip6_hdr *iph = c->nh; iph->ip6_plen = htons(iph->ip6_plen); c_th = (struct tcphdr *)(iph + 1); csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } c_th->th_win = c->th_last->th_win; c_th->th_ack = c->th_last->th_ack; if (c_th->th_off == c->th_last->th_off) { /* Copy TCP options (take care to avoid going negative). */ int optlen = ((c_th->th_off - 5) & 0xf) << 2u; memcpy(c_th + 1, c->th_last + 1, optlen); } m->m_pkthdr.flowid = c->conn_hash; M_HASHTYPE_SET(m, SFXGE_LRO_CONN_IS_TCPIPV4(c) ? M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_TCP_IPV6); m->m_pkthdr.csum_flags = csum_flags; __sfxge_rx_deliver(sc, m); c->mbuf = NULL; c->delivered = 1; } /* Drop the given connection, and add it to the free list. */ static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) { unsigned bucket; KASSERT(!c->mbuf, ("found orphaned mbuf")); if (c->next_buf.mbuf != NULL) { sfxge_rx_deliver(rxq->sc, &c->next_buf); LIST_REMOVE(c, active_link); } bucket = c->conn_hash & rxq->lro.conns_mask; KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong")); --rxq->lro.conns_n[bucket]; TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link); } /* Stop tracking connections that have gone idle in order to keep hash * chains short. */ static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now) { struct sfxge_lro_conn *c; unsigned i; KASSERT(LIST_EMPTY(&rxq->lro.active_conns), ("found active connections")); rxq->lro.last_purge_ticks = now; for (i = 0; i <= rxq->lro.conns_mask; ++i) { if (TAILQ_EMPTY(&rxq->lro.conns[i])) continue; c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq); if (now - c->last_pkt_ticks > lro_idle_ticks) { ++rxq->lro.n_drop_idle; sfxge_lro_drop(rxq, c); } } } static void sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, struct mbuf *mbuf, struct tcphdr *th) { struct tcphdr *c_th; /* Tack the new mbuf onto the chain. */ KASSERT(!mbuf->m_next, ("mbuf already chained")); c->mbuf_tail->m_next = mbuf; c->mbuf_tail = mbuf; /* Increase length appropriately */ c->mbuf->m_pkthdr.len += mbuf->m_len; /* Update the connection state flags */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->nh; iph->ip_len += mbuf->m_len; c_th = (struct tcphdr *)(iph + 1); } else { struct ip6_hdr *iph = c->nh; iph->ip6_plen += mbuf->m_len; c_th = (struct tcphdr *)(iph + 1); } c_th->th_flags |= (th->th_flags & TH_PUSH); c->th_last = th; ++st->n_merges; /* Pass packet up now if another segment could overflow the IP * length. */ if (c->mbuf->m_pkthdr.len > 65536 - 9200) sfxge_lro_deliver(st, c); } static void sfxge_lro_start(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, struct mbuf *mbuf, void *nh, struct tcphdr *th) { /* Start the chain */ c->mbuf = mbuf; c->mbuf_tail = c->mbuf; c->nh = nh; c->th_last = th; mbuf->m_pkthdr.len = mbuf->m_len; /* Mangle header fields for later processing */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = nh; iph->ip_len = ntohs(iph->ip_len); } else { struct ip6_hdr *iph = nh; iph->ip6_plen = ntohs(iph->ip6_plen); } } /* Try to merge or otherwise hold or deliver (as appropriate) the * packet buffered for this connection (c->next_buf). Return a flag * indicating whether the connection is still active for LRO purposes. */ static int sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) { struct sfxge_rx_sw_desc *rx_buf = &c->next_buf; char *eh = c->next_eh; int data_length, hdr_length, dont_merge; unsigned th_seq, pkt_length; struct tcphdr *th; unsigned now; if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->next_nh; th = (struct tcphdr *)(iph + 1); pkt_length = ntohs(iph->ip_len) + (char *) iph - eh; } else { struct ip6_hdr *iph = c->next_nh; th = (struct tcphdr *)(iph + 1); pkt_length = ntohs(iph->ip6_plen) + (char *) th - eh; } hdr_length = (char *) th + th->th_off * 4 - eh; data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) - hdr_length); th_seq = ntohl(th->th_seq); dont_merge = ((data_length <= 0) | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN))); /* Check for options other than aligned timestamp. */ if (th->th_off != 5) { const uint32_t *opt_ptr = (const uint32_t *) (th + 1); if (th->th_off == 8 && opt_ptr[0] == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { /* timestamp option -- okay */ } else { dont_merge = 1; } } if (__predict_false(th_seq != c->next_seq)) { /* Out-of-order, so start counting again. */ if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); c->n_in_order_pkts -= lro_loss_packets; c->next_seq = th_seq + data_length; ++rxq->lro.n_misorder; goto deliver_buf_out; } c->next_seq = th_seq + data_length; now = ticks; if (now - c->last_pkt_ticks > lro_idle_ticks) { ++rxq->lro.n_drop_idle; if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); sfxge_lro_drop(rxq, c); return (0); } c->last_pkt_ticks = ticks; if (c->n_in_order_pkts < lro_slow_start_packets) { /* May be in slow-start, so don't merge. */ ++rxq->lro.n_slow_start; ++c->n_in_order_pkts; goto deliver_buf_out; } if (__predict_false(dont_merge)) { if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); if (th->th_flags & (TH_FIN | TH_RST)) { ++rxq->lro.n_drop_closed; sfxge_lro_drop(rxq, c); return (0); } goto deliver_buf_out; } rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size; if (__predict_true(c->mbuf != NULL)) { /* Remove headers and any padding */ rx_buf->mbuf->m_data += hdr_length; rx_buf->mbuf->m_len = data_length; sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th); } else { /* Remove any padding */ rx_buf->mbuf->m_len = pkt_length; sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th); } rx_buf->mbuf = NULL; return (1); deliver_buf_out: sfxge_rx_deliver(rxq->sc, rx_buf); return (1); } static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash, uint16_t l2_id, void *nh, struct tcphdr *th) { unsigned bucket = conn_hash & st->conns_mask; struct sfxge_lro_conn *c; if (st->conns_n[bucket] >= lro_chain_max) { ++st->n_too_many; return; } if (!TAILQ_EMPTY(&st->free_conns)) { c = TAILQ_FIRST(&st->free_conns); TAILQ_REMOVE(&st->free_conns, c, link); } else { c = malloc(sizeof(*c), M_SFXGE, M_NOWAIT); if (c == NULL) return; c->mbuf = NULL; c->next_buf.mbuf = NULL; } /* Create the connection tracking data */ ++st->conns_n[bucket]; TAILQ_INSERT_HEAD(&st->conns[bucket], c, link); c->l2_id = l2_id; c->conn_hash = conn_hash; c->source = th->th_sport; c->dest = th->th_dport; c->n_in_order_pkts = 0; c->last_pkt_ticks = *(volatile int *)&ticks; c->delivered = 0; ++st->n_new_stream; /* NB. We don't initialise c->next_seq, and it doesn't matter what * value it has. Most likely the next packet received for this * connection will not match -- no harm done. */ } /* Process mbuf and decide whether to dispatch it to the stack now or * later. */ static void sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) { struct sfxge_softc *sc = rxq->sc; struct mbuf *m = rx_buf->mbuf; struct ether_header *eh; struct sfxge_lro_conn *c; uint16_t l2_id; uint16_t l3_proto; void *nh; struct tcphdr *th; uint32_t conn_hash; unsigned bucket; /* Get the hardware hash */ conn_hash = efx_psuedo_hdr_hash_get(sc->enp, EFX_RX_HASHALG_TOEPLITZ, mtod(m, uint8_t *)); eh = (struct ether_header *)(m->m_data + sc->rx_prefix_size); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = (struct ether_vlan_header *)eh; l2_id = EVL_VLANOFTAG(ntohs(veh->evl_tag)) | SFXGE_LRO_L2_ID_VLAN; l3_proto = veh->evl_proto; nh = veh + 1; } else { l2_id = 0; l3_proto = eh->ether_type; nh = eh + 1; } /* Check whether this is a suitable packet (unfragmented * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and * length, and compute a hash if necessary. If not, return. */ if (l3_proto == htons(ETHERTYPE_IP)) { struct ip *iph = nh; KASSERT(iph->ip_p == IPPROTO_TCP, ("IPv4 protocol is not TCP, but packet marker is set")); if ((iph->ip_hl - (sizeof(*iph) >> 2u)) | (iph->ip_off & htons(IP_MF | IP_OFFMASK))) goto deliver_now; th = (struct tcphdr *)(iph + 1); } else if (l3_proto == htons(ETHERTYPE_IPV6)) { struct ip6_hdr *iph = nh; KASSERT(iph->ip6_nxt == IPPROTO_TCP, ("IPv6 next header is not TCP, but packet marker is set")); l2_id |= SFXGE_LRO_L2_ID_IPV6; th = (struct tcphdr *)(iph + 1); } else { goto deliver_now; } bucket = conn_hash & rxq->lro.conns_mask; TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) { if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash)) continue; if ((c->source - th->th_sport) | (c->dest - th->th_dport)) continue; if (c->mbuf != NULL) { if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *c_iph, *iph = nh; c_iph = c->nh; if ((c_iph->ip_src.s_addr - iph->ip_src.s_addr) | (c_iph->ip_dst.s_addr - iph->ip_dst.s_addr)) continue; } else { struct ip6_hdr *c_iph, *iph = nh; c_iph = c->nh; if (ipv6_addr_cmp(&c_iph->ip6_src, &iph->ip6_src) | ipv6_addr_cmp(&c_iph->ip6_dst, &iph->ip6_dst)) continue; } } /* Re-insert at head of list to reduce lookup time. */ TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link); if (c->next_buf.mbuf != NULL) { if (!sfxge_lro_try_merge(rxq, c)) goto deliver_now; } else { LIST_INSERT_HEAD(&rxq->lro.active_conns, c, active_link); } c->next_buf = *rx_buf; c->next_eh = eh; c->next_nh = nh; rx_buf->mbuf = NULL; rx_buf->flags = EFX_DISCARD; return; } sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th); deliver_now: sfxge_rx_deliver(sc, rx_buf); } static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; struct sfxge_lro_conn *c; unsigned t; while (!LIST_EMPTY(&st->active_conns)) { c = LIST_FIRST(&st->active_conns); if (!c->delivered && c->mbuf != NULL) sfxge_lro_deliver(st, c); if (sfxge_lro_try_merge(rxq, c)) { if (c->mbuf != NULL) sfxge_lro_deliver(st, c); LIST_REMOVE(c, active_link); } c->delivered = 0; } t = *(volatile int *)&ticks; if (__predict_false(t != st->last_purge_ticks)) sfxge_lro_purge_idle(rxq, t); } #else /* !SFXGE_LRO */ static void sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) { } static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) { } #endif /* SFXGE_LRO */ void sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop) { struct sfxge_softc *sc = rxq->sc; int if_capenable = sc->ifnet->if_capenable; int lro_enabled = if_capenable & IFCAP_LRO; unsigned int index; struct sfxge_evq *evq; unsigned int completed; unsigned int level; struct mbuf *m; struct sfxge_rx_sw_desc *prev = NULL; index = rxq->index; evq = sc->evq[index]; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); completed = rxq->completed; while (completed != rxq->pending) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; id = completed++ & rxq->ptr_mask; rx_desc = &rxq->queue[id]; m = rx_desc->mbuf; if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) goto discard; if (rx_desc->flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) goto discard; /* Read the length from the pseudo header if required */ if (rx_desc->flags & EFX_PKT_PREFIX_LEN) { uint16_t tmp_size; int rc; rc = efx_psuedo_hdr_pkt_length_get(sc->enp, mtod(m, uint8_t *), &tmp_size); KASSERT(rc == 0, ("cannot get packet length: %d", rc)); rx_desc->size = (int)tmp_size + sc->rx_prefix_size; } prefetch_read_many(mtod(m, caddr_t)); switch (rx_desc->flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { case EFX_PKT_IPV4: if (~if_capenable & IFCAP_RXCSUM) rx_desc->flags &= ~(EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP); break; case EFX_PKT_IPV6: if (~if_capenable & IFCAP_RXCSUM_IPV6) rx_desc->flags &= ~EFX_CKSUM_TCPUDP; break; case 0: /* Check for loopback packets */ { struct ether_header *etherhp; /*LINTED*/ etherhp = mtod(m, struct ether_header *); if (etherhp->ether_type == htons(SFXGE_ETHERTYPE_LOOPBACK)) { EFSYS_PROBE(loopback); rxq->loopback++; goto discard; } } break; default: KASSERT(B_FALSE, ("Rx descriptor with both IPv4 and IPv6 flags")); goto discard; } /* Pass packet up the stack or into LRO (pipelined) */ if (prev != NULL) { if (lro_enabled && ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) sfxge_lro(rxq, prev); else sfxge_rx_deliver(sc, prev); } prev = rx_desc; continue; discard: /* Return the packet to the pool */ m_free(m); rx_desc->mbuf = NULL; } rxq->completed = completed; level = rxq->added - rxq->completed; /* Pass last packet up the stack or into LRO */ if (prev != NULL) { if (lro_enabled && ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) sfxge_lro(rxq, prev); else sfxge_rx_deliver(sc, prev); } /* * If there are any pending flows and this is the end of the * poll then they must be completed. */ if (eop) sfxge_lro_end_of_burst(rxq); /* Top up the queue if necessary */ if (level < rxq->refill_threshold) sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE); } static void sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; struct sfxge_evq *evq; unsigned int count; unsigned int retry = 3; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); rxq = sc->rxq[index]; evq = sc->evq[index]; SFXGE_EVQ_LOCK(evq); KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, ("rxq not started")); rxq->init_state = SFXGE_RXQ_INITIALIZED; callout_stop(&rxq->refill_callout); while (rxq->flush_state != SFXGE_FLUSH_DONE && retry != 0) { rxq->flush_state = SFXGE_FLUSH_PENDING; SFXGE_EVQ_UNLOCK(evq); /* Flush the receive queue */ if (efx_rx_qflush(rxq->common) != 0) { SFXGE_EVQ_LOCK(evq); rxq->flush_state = SFXGE_FLUSH_FAILED; break; } count = 0; do { /* Spin for 100 ms */ DELAY(100000); if (rxq->flush_state != SFXGE_FLUSH_PENDING) break; } while (++count < 20); SFXGE_EVQ_LOCK(evq); if (rxq->flush_state == SFXGE_FLUSH_PENDING) { /* Flush timeout - neither done nor failed */ log(LOG_ERR, "%s: Cannot flush Rx queue %u\n", device_get_nameunit(sc->dev), index); rxq->flush_state = SFXGE_FLUSH_DONE; } retry--; } if (rxq->flush_state == SFXGE_FLUSH_FAILED) { log(LOG_ERR, "%s: Flushing Rx queue %u failed\n", device_get_nameunit(sc->dev), index); rxq->flush_state = SFXGE_FLUSH_DONE; } rxq->pending = rxq->added; sfxge_rx_qcomplete(rxq, B_TRUE); KASSERT(rxq->completed == rxq->pending, ("rxq->completed != rxq->pending")); rxq->added = 0; rxq->pushed = 0; rxq->pending = 0; rxq->completed = 0; rxq->loopback = 0; /* Destroy the common code receive queue. */ efx_rx_qdestroy(rxq->common); efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, EFX_RXQ_NBUFS(sc->rxq_entries)); SFXGE_EVQ_UNLOCK(evq); } static int sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; efsys_mem_t *esmp; struct sfxge_evq *evq; int rc; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); rxq = sc->rxq[index]; esmp = &rxq->mem; evq = sc->evq[index]; KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp, EFX_RXQ_NBUFS(sc->rxq_entries))) != 0) return (rc); /* Create the common code receive queue. */ if ((rc = efx_rx_qcreate(sc->enp, index, 0, EFX_RXQ_TYPE_DEFAULT, esmp, sc->rxq_entries, rxq->buf_base_id, evq->common, &rxq->common)) != 0) goto fail; SFXGE_EVQ_LOCK(evq); /* Enable the receive queue. */ efx_rx_qenable(rxq->common); rxq->init_state = SFXGE_RXQ_STARTED; rxq->flush_state = SFXGE_FLUSH_REQUIRED; /* Try to fill the queue from the pool. */ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE); SFXGE_EVQ_UNLOCK(evq); return (0); fail: efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, EFX_RXQ_NBUFS(sc->rxq_entries)); return (rc); } void sfxge_rx_stop(struct sfxge_softc *sc) { int index; efx_mac_filter_default_rxq_clear(sc->enp); /* Stop the receive queue(s) */ index = sc->rxq_count; while (--index >= 0) sfxge_rx_qstop(sc, index); sc->rx_prefix_size = 0; sc->rx_buffer_size = 0; efx_rx_fini(sc->enp); } int sfxge_rx_start(struct sfxge_softc *sc) { struct sfxge_intr *intr; const efx_nic_cfg_t *encp; size_t hdrlen, align, reserved; int index; int rc; intr = &sc->intr; /* Initialize the common code receive module. */ if ((rc = efx_rx_init(sc->enp)) != 0) return (rc); encp = efx_nic_cfg_get(sc->enp); sc->rx_buffer_size = EFX_MAC_PDU(sc->ifnet->if_mtu); /* Calculate the receive packet buffer size. */ sc->rx_prefix_size = encp->enc_rx_prefix_size; /* Ensure IP headers are 32bit aligned */ hdrlen = sc->rx_prefix_size + sizeof (struct ether_header); sc->rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen; sc->rx_buffer_size += sc->rx_buffer_align; /* Align end of packet buffer for RX DMA end padding */ align = MAX(1, encp->enc_rx_buf_align_end); EFSYS_ASSERT(ISP2(align)); sc->rx_buffer_size = P2ROUNDUP(sc->rx_buffer_size, align); /* * Standard mbuf zones only guarantee pointer-size alignment; * we need extra space to align to the cache line */ reserved = sc->rx_buffer_size + CACHE_LINE_SIZE; /* Select zone for packet buffers */ if (reserved <= MCLBYTES) sc->rx_cluster_size = MCLBYTES; else if (reserved <= MJUMPAGESIZE) sc->rx_cluster_size = MJUMPAGESIZE; else if (reserved <= MJUM9BYTES) sc->rx_cluster_size = MJUM9BYTES; else sc->rx_cluster_size = MJUM16BYTES; /* * Set up the scale table. Enable all hash types and hash insertion. */ - for (index = 0; index < SFXGE_RX_SCALE_MAX; index++) + for (index = 0; index < nitems(sc->rx_indir_table); index++) #ifdef RSS sc->rx_indir_table[index] = rss_get_indirection_to_bucket(index) % sc->rxq_count; #else sc->rx_indir_table[index] = index % sc->rxq_count; #endif if ((rc = efx_rx_scale_tbl_set(sc->enp, sc->rx_indir_table, - SFXGE_RX_SCALE_MAX)) != 0) + nitems(sc->rx_indir_table))) != 0) goto fail; (void)efx_rx_scale_mode_set(sc->enp, EFX_RX_HASHALG_TOEPLITZ, (1 << EFX_RX_HASH_IPV4) | (1 << EFX_RX_HASH_TCPIPV4) | (1 << EFX_RX_HASH_IPV6) | (1 << EFX_RX_HASH_TCPIPV6), B_TRUE); #ifdef RSS rss_getkey(toep_key); #endif if ((rc = efx_rx_scale_key_set(sc->enp, toep_key, sizeof(toep_key))) != 0) goto fail; /* Start the receive queue(s). */ for (index = 0; index < sc->rxq_count; index++) { if ((rc = sfxge_rx_qstart(sc, index)) != 0) goto fail2; } rc = efx_mac_filter_default_rxq_set(sc->enp, sc->rxq[0]->common, sc->intr.n_alloc > 1); if (rc != 0) goto fail3; return (0); fail3: fail2: while (--index >= 0) sfxge_rx_qstop(sc, index); fail: efx_rx_fini(sc->enp); return (rc); } #ifdef SFXGE_LRO static void sfxge_lro_init(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; unsigned i; st->conns_mask = lro_table_size - 1; KASSERT(!((st->conns_mask + 1) & st->conns_mask), ("lro_table_size must be a power of 2")); st->sc = rxq->sc; st->conns = malloc((st->conns_mask + 1) * sizeof(st->conns[0]), M_SFXGE, M_WAITOK); st->conns_n = malloc((st->conns_mask + 1) * sizeof(st->conns_n[0]), M_SFXGE, M_WAITOK); for (i = 0; i <= st->conns_mask; ++i) { TAILQ_INIT(&st->conns[i]); st->conns_n[i] = 0; } LIST_INIT(&st->active_conns); TAILQ_INIT(&st->free_conns); } static void sfxge_lro_fini(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; struct sfxge_lro_conn *c; unsigned i; /* Return cleanly if sfxge_lro_init() has not been called. */ if (st->conns == NULL) return; KASSERT(LIST_EMPTY(&st->active_conns), ("found active connections")); for (i = 0; i <= st->conns_mask; ++i) { while (!TAILQ_EMPTY(&st->conns[i])) { c = TAILQ_LAST(&st->conns[i], sfxge_lro_tailq); sfxge_lro_drop(rxq, c); } } while (!TAILQ_EMPTY(&st->free_conns)) { c = TAILQ_FIRST(&st->free_conns); TAILQ_REMOVE(&st->free_conns, c, link); KASSERT(!c->mbuf, ("found orphaned mbuf")); free(c, M_SFXGE); } free(st->conns_n, M_SFXGE); free(st->conns, M_SFXGE); st->conns = NULL; } #else static void sfxge_lro_init(struct sfxge_rxq *rxq) { } static void sfxge_lro_fini(struct sfxge_rxq *rxq) { } #endif /* SFXGE_LRO */ static void sfxge_rx_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; rxq = sc->rxq[index]; KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); /* Free the context array and the flow table. */ free(rxq->queue, M_SFXGE); sfxge_lro_fini(rxq); /* Release DMA memory. */ sfxge_dma_free(&rxq->mem); sc->rxq[index] = NULL; free(rxq, M_SFXGE); } static int sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; struct sfxge_evq *evq; efsys_mem_t *esmp; int rc; KASSERT(index < sc->rxq_count, ("index >= %d", sc->rxq_count)); rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK); rxq->sc = sc; rxq->index = index; rxq->entries = sc->rxq_entries; rxq->ptr_mask = rxq->entries - 1; rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries); sc->rxq[index] = rxq; esmp = &rxq->mem; evq = sc->evq[index]; /* Allocate and zero DMA space. */ if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0) return (rc); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries), &rxq->buf_base_id); /* Allocate the context array and the flow table. */ rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries, M_SFXGE, M_WAITOK | M_ZERO); sfxge_lro_init(rxq); callout_init(&rxq->refill_callout, 1); rxq->init_state = SFXGE_RXQ_INITIALIZED; return (0); } static const struct { const char *name; size_t offset; } sfxge_rx_stats[] = { #define SFXGE_RX_STAT(name, member) \ { #name, offsetof(struct sfxge_rxq, member) } #ifdef SFXGE_LRO SFXGE_RX_STAT(lro_merges, lro.n_merges), SFXGE_RX_STAT(lro_bursts, lro.n_bursts), SFXGE_RX_STAT(lro_slow_start, lro.n_slow_start), SFXGE_RX_STAT(lro_misorder, lro.n_misorder), SFXGE_RX_STAT(lro_too_many, lro.n_too_many), SFXGE_RX_STAT(lro_new_stream, lro.n_new_stream), SFXGE_RX_STAT(lro_drop_idle, lro.n_drop_idle), SFXGE_RX_STAT(lro_drop_closed, lro.n_drop_closed) #endif }; static int sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; unsigned int sum, index; /* Sum across all RX queues */ sum = 0; for (index = 0; index < sc->rxq_count; index++) sum += *(unsigned int *)((caddr_t)sc->rxq[index] + sfxge_rx_stats[id].offset); return (SYSCTL_OUT(req, &sum, sizeof(sum))); } static void sfxge_rx_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < nitems(sfxge_rx_stats); id++) { SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, sfxge_rx_stats[id].name, CTLTYPE_UINT|CTLFLAG_RD, sc, id, sfxge_rx_stat_handler, "IU", ""); } } void sfxge_rx_fini(struct sfxge_softc *sc) { int index; index = sc->rxq_count; while (--index >= 0) sfxge_rx_qfini(sc, index); sc->rxq_count = 0; } int sfxge_rx_init(struct sfxge_softc *sc) { struct sfxge_intr *intr; int index; int rc; #ifdef SFXGE_LRO if (!ISP2(lro_table_size)) { log(LOG_ERR, "%s=%u must be power of 2", SFXGE_LRO_PARAM(table_size), lro_table_size); rc = EINVAL; goto fail_lro_table_size; } if (lro_idle_ticks == 0) lro_idle_ticks = hz / 10 + 1; /* 100 ms */ #endif intr = &sc->intr; sc->rxq_count = intr->n_alloc; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); /* Initialize the receive queue(s) - one per interrupt. */ for (index = 0; index < sc->rxq_count; index++) { if ((rc = sfxge_rx_qinit(sc, index)) != 0) goto fail; } sfxge_rx_stat_init(sc); return (0); fail: /* Tear down the receive queue(s). */ while (--index >= 0) sfxge_rx_qfini(sc, index); sc->rxq_count = 0; #ifdef SFXGE_LRO fail_lro_table_size: #endif return (rc); } Index: head/sys/dev/sfxge/sfxge_tx.c =================================================================== --- head/sys/dev/sfxge/sfxge_tx.c (revision 310626) +++ head/sys/dev/sfxge/sfxge_tx.c (revision 310627) @@ -1,2006 +1,2007 @@ /*- * Copyright (c) 2010-2016 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ /* Theory of operation: * * Tx queues allocation and mapping * * One Tx queue with enabled checksum offload is allocated per Rx channel * (event queue). Also 2 Tx queues (one without checksum offload and one * with IP checksum offload only) are allocated and bound to event queue 0. * sfxge_txq_type is used as Tx queue label. * * So, event queue plus label mapping to Tx queue index is: * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 * See sfxge_get_txq_by_label() sfxge_ev.c */ #include __FBSDID("$FreeBSD$"); #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include "common/efx.h" #include "sfxge.h" #include "sfxge_tx.h" #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_get_max, 0, "Maximum number of any packets in deferred packet get-list"); #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ SFXGE_PARAM(tx_dpl_get_non_tcp_max) static int sfxge_tx_dpl_get_non_tcp_max = SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_get_non_tcp_max, 0, "Maximum number of non-TCP packets in deferred packet get-list"); #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_put_max, 0, "Maximum number of any packets in deferred packet put-list"); #define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted) static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2); TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted); SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN, &sfxge_tso_fw_assisted, 0, "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware"); static const struct { const char *name; size_t offset; } sfxge_tx_stats[] = { #define SFXGE_TX_STAT(name, member) \ { #name, offsetof(struct sfxge_txq, member) } SFXGE_TX_STAT(tso_bursts, tso_bursts), SFXGE_TX_STAT(tso_packets, tso_packets), SFXGE_TX_STAT(tso_long_headers, tso_long_headers), SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), SFXGE_TX_STAT(tx_collapses, collapses), SFXGE_TX_STAT(tx_drops, drops), SFXGE_TX_STAT(tx_get_overflow, get_overflow), SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), SFXGE_TX_STAT(tx_put_overflow, put_overflow), SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), }; /* Forward declarations. */ static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); static void sfxge_tx_qlist_post(struct sfxge_txq *txq); static void sfxge_tx_qunblock(struct sfxge_txq *txq); static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, const bus_dma_segment_t *dma_seg, int n_dma_seg, int vlan_tagged); static int sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf) { uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? mbuf->m_pkthdr.ether_vtag : 0); if (this_tag == txq->hw_vlan_tci) return (0); efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), &txq->pend_desc[0]); txq->n_pend_desc = 1; txq->hw_vlan_tci = this_tag; return (1); } static inline void sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp) { KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); if (__predict_false(*pstmp == &txq->stmp[txq->ptr_mask])) *pstmp = &txq->stmp[0]; else (*pstmp)++; } void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) { unsigned int completed; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); completed = txq->completed; while (completed != txq->pending) { struct sfxge_tx_mapping *stmp; unsigned int id; id = completed++ & txq->ptr_mask; stmp = &txq->stmp[id]; if (stmp->flags & TX_BUF_UNMAP) { bus_dmamap_unload(txq->packet_dma_tag, stmp->map); if (stmp->flags & TX_BUF_MBUF) { struct mbuf *m = stmp->u.mbuf; do m = m_free(m); while (m != NULL); } else { free(stmp->u.heap_buf, M_SFXGE); } stmp->flags = 0; } } txq->completed = completed; /* Check whether we need to unblock the queue. */ mb(); if (txq->blocked) { unsigned int level; level = txq->added - txq->completed; if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) sfxge_tx_qunblock(txq); } } static unsigned int sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) { /* Absence of TCP checksum flags does not mean that it is non-TCP * but it should be true if user wants to achieve high throughput. */ return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); } /* * Reorder the put list and append it to the get list. */ static void sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) { struct sfxge_tx_dpl *stdp; struct mbuf *mbuf, *get_next, **get_tailp; volatile uintptr_t *putp; uintptr_t put; unsigned int count; unsigned int non_tcp_count; SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); stdp = &txq->dpl; /* Acquire the put list. */ putp = &stdp->std_put; put = atomic_readandclear_ptr(putp); mbuf = (void *)put; if (mbuf == NULL) return; /* Reverse the put list. */ get_tailp = &mbuf->m_nextpkt; get_next = NULL; count = 0; non_tcp_count = 0; do { struct mbuf *put_next; non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); put_next = mbuf->m_nextpkt; mbuf->m_nextpkt = get_next; get_next = mbuf; mbuf = put_next; count++; } while (mbuf != NULL); if (count > stdp->std_put_hiwat) stdp->std_put_hiwat = count; /* Append the reversed put list to the get list. */ KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); *stdp->std_getp = get_next; stdp->std_getp = get_tailp; stdp->std_get_count += count; stdp->std_get_non_tcp_count += non_tcp_count; } static void sfxge_tx_qreap(struct sfxge_txq *txq) { SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); txq->reaped = txq->completed; } static void sfxge_tx_qlist_post(struct sfxge_txq *txq) { unsigned int old_added; unsigned int block_level; unsigned int level; int rc; SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, ("txq->n_pend_desc too large")); KASSERT(!txq->blocked, ("txq->blocked")); old_added = txq->added; /* Post the fragment list. */ rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, txq->reaped, &txq->added); KASSERT(rc == 0, ("efx_tx_qdesc_post() failed")); /* If efx_tx_qdesc_post() had to refragment, our information about * buffers to free may be associated with the wrong * descriptors. */ KASSERT(txq->added - old_added == txq->n_pend_desc, ("efx_tx_qdesc_post() refragmented descriptors")); level = txq->added - txq->reaped; KASSERT(level <= txq->entries, ("overfilled TX queue")); /* Clear the fragment list. */ txq->n_pend_desc = 0; /* * Set the block level to ensure there is space to generate a * large number of descriptors for TSO. */ block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; /* Have we reached the block level? */ if (level < block_level) return; /* Reap, and check again */ sfxge_tx_qreap(txq); level = txq->added - txq->reaped; if (level < block_level) return; txq->blocked = 1; /* * Avoid a race with completion interrupt handling that could leave * the queue blocked. */ mb(); sfxge_tx_qreap(txq); level = txq->added - txq->reaped; if (level < block_level) { mb(); txq->blocked = 0; } } static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) { bus_dmamap_t *used_map; bus_dmamap_t map; bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; unsigned int id; struct sfxge_tx_mapping *stmp; efx_desc_t *desc; int n_dma_seg; int rc; int i; int eop; int vlan_tagged; KASSERT(!txq->blocked, ("txq->blocked")); if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) prefetch_read_many(mbuf->m_data); if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { rc = EINTR; goto reject; } /* Load the packet for DMA. */ id = txq->added & txq->ptr_mask; stmp = &txq->stmp[id]; rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, mbuf, dma_seg, &n_dma_seg, 0); if (rc == EFBIG) { /* Try again. */ struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, SFXGE_TX_MAPPING_MAX_SEG); if (new_mbuf == NULL) goto reject; ++txq->collapses; mbuf = new_mbuf; rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, mbuf, dma_seg, &n_dma_seg, 0); } if (rc != 0) goto reject; /* Make the packet visible to the hardware. */ bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); used_map = &stmp->map; vlan_tagged = sfxge_tx_maybe_insert_tag(txq, mbuf); if (vlan_tagged) { sfxge_next_stmp(txq, &stmp); } if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, vlan_tagged); if (rc < 0) goto reject_mapped; stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; } else { /* Add the mapping to the fragment list, and set flags * for the buffer. */ i = 0; for (;;) { desc = &txq->pend_desc[i + vlan_tagged]; eop = (i == n_dma_seg - 1); efx_tx_qdesc_dma_create(txq->common, dma_seg[i].ds_addr, dma_seg[i].ds_len, eop, desc); if (eop) break; i++; sfxge_next_stmp(txq, &stmp); } txq->n_pend_desc = n_dma_seg + vlan_tagged; } /* * If the mapping required more than one descriptor * then we need to associate the DMA map with the last * descriptor, not the first. */ if (used_map != &stmp->map) { map = stmp->map; stmp->map = *used_map; *used_map = map; } stmp->u.mbuf = mbuf; stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; /* Post the fragment list. */ sfxge_tx_qlist_post(txq); return (0); reject_mapped: bus_dmamap_unload(txq->packet_dma_tag, *used_map); reject: /* Drop the packet on the floor. */ m_freem(mbuf); ++txq->drops; return (rc); } /* * Drain the deferred packet list into the transmit queue. */ static void sfxge_tx_qdpl_drain(struct sfxge_txq *txq) { struct sfxge_softc *sc; struct sfxge_tx_dpl *stdp; struct mbuf *mbuf, *next; unsigned int count; unsigned int non_tcp_count; unsigned int pushed; int rc; SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); sc = txq->sc; stdp = &txq->dpl; pushed = txq->added; if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { prefetch_read_many(sc->enp); prefetch_read_many(txq->common); } mbuf = stdp->std_get; count = stdp->std_get_count; non_tcp_count = stdp->std_get_non_tcp_count; if (count > stdp->std_get_hiwat) stdp->std_get_hiwat = count; while (count != 0) { KASSERT(mbuf != NULL, ("mbuf == NULL")); next = mbuf->m_nextpkt; mbuf->m_nextpkt = NULL; ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ if (next != NULL) prefetch_read_many(next); rc = sfxge_tx_queue_mbuf(txq, mbuf); --count; non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); mbuf = next; if (rc != 0) continue; if (txq->blocked) break; /* Push the fragments to the hardware in batches. */ if (txq->added - pushed >= SFXGE_TX_BATCH) { efx_tx_qpush(txq->common, txq->added, pushed); pushed = txq->added; } } if (count == 0) { KASSERT(mbuf == NULL, ("mbuf != NULL")); KASSERT(non_tcp_count == 0, ("inconsistent TCP/non-TCP detection")); stdp->std_get = NULL; stdp->std_get_count = 0; stdp->std_get_non_tcp_count = 0; stdp->std_getp = &stdp->std_get; } else { stdp->std_get = mbuf; stdp->std_get_count = count; stdp->std_get_non_tcp_count = non_tcp_count; } if (txq->added != pushed) efx_tx_qpush(txq->common, txq->added, pushed); KASSERT(txq->blocked || stdp->std_get_count == 0, ("queue unblocked but count is non-zero")); } #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0) /* * Service the deferred packet list. * * NOTE: drops the txq mutex! */ static void sfxge_tx_qdpl_service(struct sfxge_txq *txq) { SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); do { if (SFXGE_TX_QDPL_PENDING(txq)) sfxge_tx_qdpl_swizzle(txq); if (!txq->blocked) sfxge_tx_qdpl_drain(txq); SFXGE_TXQ_UNLOCK(txq); } while (SFXGE_TX_QDPL_PENDING(txq) && SFXGE_TXQ_TRYLOCK(txq)); } /* * Put a packet on the deferred packet get-list. */ static int sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf) { struct sfxge_tx_dpl *stdp; stdp = &txq->dpl; KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); if (stdp->std_get_count >= stdp->std_get_max) { txq->get_overflow++; return (ENOBUFS); } if (sfxge_is_mbuf_non_tcp(mbuf)) { if (stdp->std_get_non_tcp_count >= stdp->std_get_non_tcp_max) { txq->get_non_tcp_overflow++; return (ENOBUFS); } stdp->std_get_non_tcp_count++; } *(stdp->std_getp) = mbuf; stdp->std_getp = &mbuf->m_nextpkt; stdp->std_get_count++; return (0); } /* * Put a packet on the deferred packet put-list. * * We overload the csum_data field in the mbuf to keep track of this length * because there is no cheap alternative to avoid races. */ static int sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf) { struct sfxge_tx_dpl *stdp; volatile uintptr_t *putp; uintptr_t old; uintptr_t new; unsigned old_len; KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); stdp = &txq->dpl; putp = &stdp->std_put; new = (uintptr_t)mbuf; do { old = *putp; if (old != 0) { struct mbuf *mp = (struct mbuf *)old; old_len = mp->m_pkthdr.csum_data; } else old_len = 0; if (old_len >= stdp->std_put_max) { atomic_add_long(&txq->put_overflow, 1); return (ENOBUFS); } mbuf->m_pkthdr.csum_data = old_len + 1; mbuf->m_nextpkt = (void *)old; } while (atomic_cmpset_ptr(putp, old, new) == 0); return (0); } /* * Called from if_transmit - will try to grab the txq lock and enqueue to the * put list if it succeeds, otherwise try to push onto the defer list if space. */ static int sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) { int rc; if (!SFXGE_LINK_UP(txq->sc)) { atomic_add_long(&txq->netdown_drops, 1); return (ENETDOWN); } /* * Try to grab the txq lock. If we are able to get the lock, * the packet will be appended to the "get list" of the deferred * packet list. Otherwise, it will be pushed on the "put list". */ if (SFXGE_TXQ_TRYLOCK(txq)) { /* First swizzle put-list to get-list to keep order */ sfxge_tx_qdpl_swizzle(txq); rc = sfxge_tx_qdpl_put_locked(txq, m); /* Try to service the list. */ sfxge_tx_qdpl_service(txq); /* Lock has been dropped. */ } else { rc = sfxge_tx_qdpl_put_unlocked(txq, m); /* * Try to grab the lock again. * * If we are able to get the lock, we need to process * the deferred packet list. If we are not able to get * the lock, another thread is processing the list. */ if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) { sfxge_tx_qdpl_service(txq); /* Lock has been dropped. */ } } SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); return (rc); } static void sfxge_tx_qdpl_flush(struct sfxge_txq *txq) { struct sfxge_tx_dpl *stdp = &txq->dpl; struct mbuf *mbuf, *next; SFXGE_TXQ_LOCK(txq); sfxge_tx_qdpl_swizzle(txq); for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { next = mbuf->m_nextpkt; m_freem(mbuf); } stdp->std_get = NULL; stdp->std_get_count = 0; stdp->std_get_non_tcp_count = 0; stdp->std_getp = &stdp->std_get; SFXGE_TXQ_UNLOCK(txq); } void sfxge_if_qflush(struct ifnet *ifp) { struct sfxge_softc *sc; unsigned int i; sc = ifp->if_softc; for (i = 0; i < sc->txq_count; i++) sfxge_tx_qdpl_flush(sc->txq[i]); } #if SFXGE_TX_PARSE_EARLY /* There is little space for user data in mbuf pkthdr, so we * use l*hlen fields which are not used by the driver otherwise * to store header offsets. * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes. */ #define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */ #define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen) #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) #define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1]) static void sfxge_parse_tx_packet(struct mbuf *mbuf) { struct ether_header *eh = mtod(mbuf, struct ether_header *); const struct tcphdr *th; struct tcphdr th_copy; /* Find network protocol and header */ TSO_MBUF_PROTO(mbuf) = eh->ether_type; if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = mtod(mbuf, struct ether_vlan_header *); TSO_MBUF_PROTO(mbuf) = veh->evl_proto; mbuf->m_pkthdr.l2hlen = sizeof(*veh); } else { mbuf->m_pkthdr.l2hlen = sizeof(*eh); } /* Find TCP header */ if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) { const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); KASSERT(iph->ip_p == IPPROTO_TCP, ("TSO required on non-TCP packet")); mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; TSO_MBUF_PACKETID(mbuf) = iph->ip_id; } else { KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6), ("TSO required on non-IP packet")); KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == IPPROTO_TCP, ("TSO required on non-TCP packet")); mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); TSO_MBUF_PACKETID(mbuf) = 0; } KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, ("network header is fragmented in mbuf")); /* We need TCP header including flags (window is the next) */ if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), (caddr_t)&th_copy); th = &th_copy; } else { th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); } mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); /* These flags must not be duplicated */ /* * RST should not be duplicated as well, but FreeBSD kernel * generates TSO packets with RST flag. So, do not assert * its absence. */ KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), ("incompatible TCP flag 0x%x on TSO packet", th->th_flags & (TH_URG | TH_SYN))); TSO_MBUF_FLAGS(mbuf) = th->th_flags; } #endif /* * TX start -- called by the stack. */ int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) { struct sfxge_softc *sc; struct sfxge_txq *txq; int rc; sc = (struct sfxge_softc *)ifp->if_softc; /* * Transmit may be called when interface is up from the kernel * point of view, but not yet up (in progress) from the driver * point of view. I.e. link aggregation bring up. * Transmit may be called when interface is up from the driver * point of view, but already down from the kernel point of * view. I.e. Rx when interface shutdown is in progress. */ KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP), ("interface not up")); /* Pick the desired transmit queue. */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO)) { int index = 0; #ifdef RSS uint32_t bucket_id; /* * Select a TX queue which matches the corresponding * RX queue for the hash in order to assign both * TX and RX parts of the flow to the same CPU */ if (rss_m2bucket(m, &bucket_id) == 0) index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1)); #else /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { uint32_t hash = m->m_pkthdr.flowid; + uint32_t idx = hash % nitems(sc->rx_indir_table); - index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; + index = sc->rx_indir_table[idx]; } #endif #if SFXGE_TX_PARSE_EARLY if (m->m_pkthdr.csum_flags & CSUM_TSO) sfxge_parse_tx_packet(m); #endif txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; } else { txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; } rc = sfxge_tx_packet_add(txq, m); if (rc != 0) m_freem(m); return (rc); } /* * Software "TSO". Not quite as good as doing it in hardware, but * still faster than segmenting in the stack. */ struct sfxge_tso_state { /* Output position */ unsigned out_len; /* Remaining length in current segment */ unsigned seqnum; /* Current sequence number */ unsigned packet_space; /* Remaining space in current packet */ unsigned segs_space; /* Remaining number of DMA segments for the packet (FATSOv2 only) */ /* Input position */ uint64_t dma_addr; /* DMA address of current position */ unsigned in_len; /* Remaining length in current mbuf */ const struct mbuf *mbuf; /* Input mbuf (head of chain) */ u_short protocol; /* Network protocol (after VLAN decap) */ ssize_t nh_off; /* Offset of network header */ ssize_t tcph_off; /* Offset of TCP header */ unsigned header_len; /* Number of bytes of header */ unsigned seg_size; /* TCP segment size */ int fw_assisted; /* Use FW-assisted TSO */ u_short packet_id; /* IPv4 packet ID from the original packet */ uint8_t tcp_flags; /* TCP flags */ efx_desc_t header_desc; /* Precomputed header descriptor for * FW-assisted TSO */ }; #if !SFXGE_TX_PARSE_EARLY static const struct ip *tso_iph(const struct sfxge_tso_state *tso) { KASSERT(tso->protocol == htons(ETHERTYPE_IP), ("tso_iph() in non-IPv4 state")); return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); } static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) { KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), ("tso_ip6h() in non-IPv6 state")); return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); } static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) { return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); } #endif /* Size of preallocated TSO header buffers. Larger blocks must be * allocated from the heap. */ #define TSOH_STD_SIZE 128 /* At most half the descriptors in the queue at any time will refer to * a TSO header buffer, since they must always be followed by a * payload descriptor referring to an mbuf. */ #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) #define TSOH_PAGE_COUNT(_txq_entries) \ howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE) static int tso_init(struct sfxge_txq *txq) { struct sfxge_softc *sc = txq->sc; unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); int i, rc; /* Allocate TSO header buffers */ txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), M_SFXGE, M_WAITOK); for (i = 0; i < tsoh_page_count; i++) { rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); if (rc != 0) goto fail; } return (0); fail: while (i-- > 0) sfxge_dma_free(&txq->tsoh_buffer[i]); free(txq->tsoh_buffer, M_SFXGE); txq->tsoh_buffer = NULL; return (rc); } static void tso_fini(struct sfxge_txq *txq) { int i; if (txq->tsoh_buffer != NULL) { for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) sfxge_dma_free(&txq->tsoh_buffer[i]); free(txq->tsoh_buffer, M_SFXGE); } } static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso, const bus_dma_segment_t *hdr_dma_seg, struct mbuf *mbuf) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); #if !SFXGE_TX_PARSE_EARLY struct ether_header *eh = mtod(mbuf, struct ether_header *); const struct tcphdr *th; struct tcphdr th_copy; #endif tso->fw_assisted = txq->tso_fw_assisted; tso->mbuf = mbuf; /* Find network protocol and header */ #if !SFXGE_TX_PARSE_EARLY tso->protocol = eh->ether_type; if (tso->protocol == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = mtod(mbuf, struct ether_vlan_header *); tso->protocol = veh->evl_proto; tso->nh_off = sizeof(*veh); } else { tso->nh_off = sizeof(*eh); } #else tso->protocol = TSO_MBUF_PROTO(mbuf); tso->nh_off = mbuf->m_pkthdr.l2hlen; tso->tcph_off = mbuf->m_pkthdr.l3hlen; tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf)); #endif #if !SFXGE_TX_PARSE_EARLY /* Find TCP header */ if (tso->protocol == htons(ETHERTYPE_IP)) { KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, ("TSO required on non-TCP packet")); tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; tso->packet_id = ntohs(tso_iph(tso)->ip_id); } else { KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), ("TSO required on non-IP packet")); KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, ("TSO required on non-TCP packet")); tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); tso->packet_id = 0; } #endif if (tso->fw_assisted && __predict_false(tso->tcph_off > encp->enc_tx_tso_tcp_header_offset_limit)) { tso->fw_assisted = 0; } #if !SFXGE_TX_PARSE_EARLY KASSERT(mbuf->m_len >= tso->tcph_off, ("network header is fragmented in mbuf")); /* We need TCP header including flags (window is the next) */ if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), (caddr_t)&th_copy); th = &th_copy; } else { th = tso_tcph(tso); } tso->header_len = tso->tcph_off + 4 * th->th_off; #else tso->header_len = mbuf->m_pkthdr.l4hlen; #endif tso->seg_size = mbuf->m_pkthdr.tso_segsz; #if !SFXGE_TX_PARSE_EARLY tso->seqnum = ntohl(th->th_seq); /* These flags must not be duplicated */ /* * RST should not be duplicated as well, but FreeBSD kernel * generates TSO packets with RST flag. So, do not assert * its absence. */ KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), ("incompatible TCP flag 0x%x on TSO packet", th->th_flags & (TH_URG | TH_SYN))); tso->tcp_flags = th->th_flags; #else tso->seqnum = TSO_MBUF_SEQNUM(mbuf); tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); #endif tso->out_len = mbuf->m_pkthdr.len - tso->header_len; if (tso->fw_assisted) { if (hdr_dma_seg->ds_len >= tso->header_len) efx_tx_qdesc_dma_create(txq->common, hdr_dma_seg->ds_addr, tso->header_len, B_FALSE, &tso->header_desc); else tso->fw_assisted = 0; } } /* * tso_fill_packet_with_fragment - form descriptors for the current fragment * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space. */ static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, struct sfxge_tso_state *tso) { efx_desc_t *desc; int n; uint64_t dma_addr = tso->dma_addr; boolean_t eop; if (tso->in_len == 0 || tso->packet_space == 0) return; KASSERT(tso->in_len > 0, ("TSO input length went negative")); KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); if (tso->fw_assisted & SFXGE_FATSOV2) { n = tso->in_len; tso->out_len -= n; tso->seqnum += n; tso->in_len = 0; if (n < tso->packet_space) { tso->packet_space -= n; tso->segs_space--; } else { tso->packet_space = tso->seg_size - (n - tso->packet_space) % tso->seg_size; tso->segs_space = EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - (tso->packet_space != tso->seg_size); } } else { n = min(tso->in_len, tso->packet_space); tso->packet_space -= n; tso->out_len -= n; tso->dma_addr += n; tso->in_len -= n; } /* * It is OK to use binary OR below to avoid extra branching * since all conditions may always be checked. */ eop = (tso->out_len == 0) | (tso->packet_space == 0) | (tso->segs_space == 0); desc = &txq->pend_desc[txq->n_pend_desc++]; efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); } /* Callback from bus_dmamap_load() for long TSO headers. */ static void tso_map_long_header(void *dma_addr_ret, bus_dma_segment_t *segs, int nseg, int error) { *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && __predict_true(nseg == 1)) ? segs->ds_addr : 0); } /* * tso_start_new_packet - generate a new header and prepare for the new packet * * Generate a new header and prepare for the new packet. Return 0 on * success, or an error code if failed to alloc header. */ static int tso_start_new_packet(struct sfxge_txq *txq, struct sfxge_tso_state *tso, unsigned int *idp) { unsigned int id = *idp; struct tcphdr *tsoh_th; unsigned ip_length; caddr_t header; uint64_t dma_addr; bus_dmamap_t map; efx_desc_t *desc; int rc; if (tso->fw_assisted) { if (tso->fw_assisted & SFXGE_FATSOV2) { /* Add 2 FATSOv2 option descriptors */ desc = &txq->pend_desc[txq->n_pend_desc]; efx_tx_qdesc_tso2_create(txq->common, tso->packet_id, tso->seqnum, tso->seg_size, desc, EFX_TX_FATSOV2_OPT_NDESCS); desc += EFX_TX_FATSOV2_OPT_NDESCS; txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; tso->segs_space = EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; } else { uint8_t tcp_flags = tso->tcp_flags; if (tso->out_len > tso->seg_size) tcp_flags &= ~(TH_FIN | TH_PUSH); /* Add FATSOv1 option descriptor */ desc = &txq->pend_desc[txq->n_pend_desc++]; efx_tx_qdesc_tso_create(txq->common, tso->packet_id, tso->seqnum, tcp_flags, desc++); KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + 1) & txq->ptr_mask; tso->seqnum += tso->seg_size; tso->segs_space = UINT_MAX; } /* Header DMA descriptor */ *desc = tso->header_desc; txq->n_pend_desc++; KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + 1) & txq->ptr_mask; } else { /* Allocate a DMA-mapped header buffer. */ if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { unsigned int page_index = (id / 2) / TSOH_PER_PAGE; unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; header = (txq->tsoh_buffer[page_index].esm_base + buf_index * TSOH_STD_SIZE); dma_addr = (txq->tsoh_buffer[page_index].esm_addr + buf_index * TSOH_STD_SIZE); map = txq->tsoh_buffer[page_index].esm_map; KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); } else { struct sfxge_tx_mapping *stmp = &txq->stmp[id]; /* We cannot use bus_dmamem_alloc() as that may sleep */ header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); if (__predict_false(!header)) return (ENOMEM); rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, header, tso->header_len, tso_map_long_header, &dma_addr, BUS_DMA_NOWAIT); if (__predict_false(dma_addr == 0)) { if (rc == 0) { /* Succeeded but got >1 segment */ bus_dmamap_unload(txq->packet_dma_tag, stmp->map); rc = EINVAL; } free(header, M_SFXGE); return (rc); } map = stmp->map; txq->tso_long_headers++; stmp->u.heap_buf = header; stmp->flags = TX_BUF_UNMAP; } tsoh_th = (struct tcphdr *)(header + tso->tcph_off); /* Copy and update the headers. */ m_copydata(tso->mbuf, 0, tso->header_len, header); tsoh_th->th_seq = htonl(tso->seqnum); tso->seqnum += tso->seg_size; if (tso->out_len > tso->seg_size) { /* This packet will not finish the TSO burst. */ ip_length = tso->header_len - tso->nh_off + tso->seg_size; tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); } else { /* This packet will be the last in the TSO burst. */ ip_length = tso->header_len - tso->nh_off + tso->out_len; } if (tso->protocol == htons(ETHERTYPE_IP)) { struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); tsoh_iph->ip_len = htons(ip_length); /* XXX We should increment ip_id, but FreeBSD doesn't * currently allocate extra IDs for multiple segments. */ } else { struct ip6_hdr *tsoh_iph = (struct ip6_hdr *)(header + tso->nh_off); tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); } /* Make the header visible to the hardware. */ bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); /* Form a descriptor for this header. */ desc = &txq->pend_desc[txq->n_pend_desc++]; efx_tx_qdesc_dma_create(txq->common, dma_addr, tso->header_len, 0, desc); id = (id + 1) & txq->ptr_mask; tso->segs_space = UINT_MAX; } tso->packet_space = tso->seg_size; txq->tso_packets++; *idp = id; return (0); } static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, const bus_dma_segment_t *dma_seg, int n_dma_seg, int vlan_tagged) { struct sfxge_tso_state tso; unsigned int id; unsigned skipped = 0; tso_start(txq, &tso, dma_seg, mbuf); while (dma_seg->ds_len + skipped <= tso.header_len) { skipped += dma_seg->ds_len; --n_dma_seg; KASSERT(n_dma_seg, ("no payload found in TSO packet")); ++dma_seg; } tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); id = (txq->added + vlan_tagged) & txq->ptr_mask; if (__predict_false(tso_start_new_packet(txq, &tso, &id))) return (-1); while (1) { tso_fill_packet_with_fragment(txq, &tso); /* Exactly one DMA descriptor is added */ KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + 1) & txq->ptr_mask; /* Move onto the next fragment? */ if (tso.in_len == 0) { --n_dma_seg; if (n_dma_seg == 0) break; ++dma_seg; tso.in_len = dma_seg->ds_len; tso.dma_addr = dma_seg->ds_addr; } /* End of packet? */ if ((tso.packet_space == 0) | (tso.segs_space == 0)) { unsigned int n_fatso_opt_desc = (tso.fw_assisted & SFXGE_FATSOV2) ? EFX_TX_FATSOV2_OPT_NDESCS : (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0; /* If the queue is now full due to tiny MSS, * or we can't create another header, discard * the remainder of the input mbuf but do not * roll back the work we have done. */ if (txq->n_pend_desc + n_fatso_opt_desc + 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { txq->tso_pdrop_too_many++; break; } if (__predict_false(tso_start_new_packet(txq, &tso, &id))) { txq->tso_pdrop_no_rsrc++; break; } } } txq->tso_bursts++; return (id); } static void sfxge_tx_qunblock(struct sfxge_txq *txq) { struct sfxge_softc *sc; struct sfxge_evq *evq; sc = txq->sc; evq = sc->evq[txq->evq_index]; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) return; SFXGE_TXQ_LOCK(txq); if (txq->blocked) { unsigned int level; level = txq->added - txq->completed; if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { /* reaped must be in sync with blocked */ sfxge_tx_qreap(txq); txq->blocked = 0; } } sfxge_tx_qdpl_service(txq); /* note: lock has been dropped */ } void sfxge_tx_qflush_done(struct sfxge_txq *txq) { txq->flush_state = SFXGE_FLUSH_DONE; } static void sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; struct sfxge_evq *evq; unsigned int count; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); txq = sc->txq[index]; evq = sc->evq[txq->evq_index]; SFXGE_EVQ_LOCK(evq); SFXGE_TXQ_LOCK(txq); KASSERT(txq->init_state == SFXGE_TXQ_STARTED, ("txq->init_state != SFXGE_TXQ_STARTED")); txq->init_state = SFXGE_TXQ_INITIALIZED; if (txq->flush_state != SFXGE_FLUSH_DONE) { txq->flush_state = SFXGE_FLUSH_PENDING; SFXGE_EVQ_UNLOCK(evq); SFXGE_TXQ_UNLOCK(txq); /* Flush the transmit queue. */ if (efx_tx_qflush(txq->common) != 0) { log(LOG_ERR, "%s: Flushing Tx queue %u failed\n", device_get_nameunit(sc->dev), index); txq->flush_state = SFXGE_FLUSH_DONE; } else { count = 0; do { /* Spin for 100ms. */ DELAY(100000); if (txq->flush_state != SFXGE_FLUSH_PENDING) break; } while (++count < 20); } SFXGE_EVQ_LOCK(evq); SFXGE_TXQ_LOCK(txq); KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, ("txq->flush_state == SFXGE_FLUSH_FAILED")); if (txq->flush_state != SFXGE_FLUSH_DONE) { /* Flush timeout */ log(LOG_ERR, "%s: Cannot flush Tx queue %u\n", device_get_nameunit(sc->dev), index); txq->flush_state = SFXGE_FLUSH_DONE; } } txq->blocked = 0; txq->pending = txq->added; sfxge_tx_qcomplete(txq, evq); KASSERT(txq->completed == txq->added, ("txq->completed != txq->added")); sfxge_tx_qreap(txq); KASSERT(txq->reaped == txq->completed, ("txq->reaped != txq->completed")); txq->added = 0; txq->pending = 0; txq->completed = 0; txq->reaped = 0; /* Destroy the common code transmit queue. */ efx_tx_qdestroy(txq->common); txq->common = NULL; efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, EFX_TXQ_NBUFS(sc->txq_entries)); SFXGE_EVQ_UNLOCK(evq); SFXGE_TXQ_UNLOCK(txq); } /* * Estimate maximum number of Tx descriptors required for TSO packet. * With minimum MSS and maximum mbuf length we might need more (even * than a ring-ful of descriptors), but this should not happen in * practice except due to deliberate attack. In that case we will * truncate the output at a packet boundary. */ static unsigned int sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type, unsigned int tso_fw_assisted) { /* One descriptor for every input fragment */ unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG; unsigned int sw_tso_max_descs; unsigned int fa_tso_v1_max_descs = 0; unsigned int fa_tso_v2_max_descs = 0; /* VLAN tagging Tx option descriptor may be required */ if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) max_descs++; if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) { /* * Plus header and payload descriptor for each output segment. * Minus one since header fragment is already counted. * Even if FATSO is used, we should be ready to fallback * to do it in the driver. */ sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; /* FW assisted TSOv1 requires one more descriptor per segment * in comparison to SW TSO */ if (tso_fw_assisted & SFXGE_FATSOV1) fa_tso_v1_max_descs = sw_tso_max_descs + SFXGE_TSO_MAX_SEGS; /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra * descriptors per superframe limited by number of DMA fetches * per packet. The first packet header is already counted. */ if (tso_fw_assisted & SFXGE_FATSOV2) { fa_tso_v2_max_descs = howmany(SFXGE_TX_MAPPING_MAX_SEG, EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; } max_descs += MAX(sw_tso_max_descs, MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs)); } return (max_descs); } static int sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; efsys_mem_t *esmp; uint16_t flags; unsigned int tso_fw_assisted; struct sfxge_evq *evq; unsigned int desc_index; int rc; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); txq = sc->txq[index]; esmp = &txq->mem; evq = sc->evq[txq->evq_index]; KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq->init_state != SFXGE_TXQ_INITIALIZED")); KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, EFX_TXQ_NBUFS(sc->txq_entries))) != 0) return (rc); /* Determine the kind of queue we are creating. */ tso_fw_assisted = 0; switch (txq->type) { case SFXGE_TXQ_NON_CKSUM: flags = 0; break; case SFXGE_TXQ_IP_CKSUM: flags = EFX_TXQ_CKSUM_IPV4; break; case SFXGE_TXQ_IP_TCP_UDP_CKSUM: flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; tso_fw_assisted = sc->tso_fw_assisted; if (tso_fw_assisted & SFXGE_FATSOV2) flags |= EFX_TXQ_FATSOV2; break; default: KASSERT(0, ("Impossible TX queue")); flags = 0; break; } /* Create the common code transmit queue. */ if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, sc->txq_entries, txq->buf_base_id, flags, evq->common, &txq->common, &desc_index)) != 0) { /* Retry if no FATSOv2 resources, otherwise fail */ if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2)) goto fail; /* Looks like all FATSOv2 contexts are used */ flags &= ~EFX_TXQ_FATSOV2; tso_fw_assisted &= ~SFXGE_FATSOV2; if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, sc->txq_entries, txq->buf_base_id, flags, evq->common, &txq->common, &desc_index)) != 0) goto fail; } /* Initialise queue descriptor indexes */ txq->added = txq->pending = txq->completed = txq->reaped = desc_index; SFXGE_TXQ_LOCK(txq); /* Enable the transmit queue. */ efx_tx_qenable(txq->common); txq->init_state = SFXGE_TXQ_STARTED; txq->flush_state = SFXGE_FLUSH_REQUIRED; txq->tso_fw_assisted = tso_fw_assisted; txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, tso_fw_assisted); SFXGE_TXQ_UNLOCK(txq); return (0); fail: efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, EFX_TXQ_NBUFS(sc->txq_entries)); return (rc); } void sfxge_tx_stop(struct sfxge_softc *sc) { int index; index = sc->txq_count; while (--index >= 0) sfxge_tx_qstop(sc, index); /* Tear down the transmit module */ efx_tx_fini(sc->enp); } int sfxge_tx_start(struct sfxge_softc *sc) { int index; int rc; /* Initialize the common code transmit module. */ if ((rc = efx_tx_init(sc->enp)) != 0) return (rc); for (index = 0; index < sc->txq_count; index++) { if ((rc = sfxge_tx_qstart(sc, index)) != 0) goto fail; } return (0); fail: while (--index >= 0) sfxge_tx_qstop(sc, index); efx_tx_fini(sc->enp); return (rc); } static int sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); struct sysctl_oid *stat_node; unsigned int id; stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, "stats", CTLFLAG_RD, NULL, "Tx queue statistics"); if (stat_node == NULL) return (ENOMEM); for (id = 0; id < nitems(sfxge_tx_stats); id++) { SYSCTL_ADD_ULONG( ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO, sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS, (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset), ""); } return (0); } /** * Destroy a transmit queue. */ static void sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; unsigned int nmaps; txq = sc->txq[index]; KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq->init_state != SFXGE_TXQ_INITIALIZED")); if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) tso_fini(txq); /* Free the context arrays. */ free(txq->pend_desc, M_SFXGE); nmaps = sc->txq_entries; while (nmaps-- != 0) bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); free(txq->stmp, M_SFXGE); /* Release DMA memory mapping. */ sfxge_dma_free(&txq->mem); sc->txq[index] = NULL; SFXGE_TXQ_LOCK_DESTROY(txq); free(txq, M_SFXGE); } static int sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, enum sfxge_txq_type type, unsigned int evq_index) { char name[16]; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid *txq_node; struct sfxge_txq *txq; struct sfxge_evq *evq; struct sfxge_tx_dpl *stdp; struct sysctl_oid *dpl_node; efsys_mem_t *esmp; unsigned int nmaps; int rc; txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); txq->sc = sc; txq->entries = sc->txq_entries; txq->ptr_mask = txq->entries - 1; sc->txq[txq_index] = txq; esmp = &txq->mem; evq = sc->evq[evq_index]; /* Allocate and zero DMA space for the descriptor ring. */ if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) return (rc); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), &txq->buf_base_id); /* Create a DMA tag for packet mappings. */ if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, &txq->packet_dma_tag) != 0) { device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); rc = ENOMEM; goto fail; } /* Allocate pending descriptor array for batching writes. */ txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, M_SFXGE, M_ZERO | M_WAITOK); /* Allocate and initialise mbuf DMA mapping array. */ txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, M_SFXGE, M_ZERO | M_WAITOK); for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { rc = bus_dmamap_create(txq->packet_dma_tag, 0, &txq->stmp[nmaps].map); if (rc != 0) goto fail2; } snprintf(name, sizeof(name), "%u", txq_index); txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), OID_AUTO, name, CTLFLAG_RD, NULL, ""); if (txq_node == NULL) { rc = ENOMEM; goto fail_txq_node; } if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && (rc = tso_init(txq)) != 0) goto fail3; if (sfxge_tx_dpl_get_max <= 0) { log(LOG_ERR, "%s=%d must be greater than 0", SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); rc = EINVAL; goto fail_tx_dpl_get_max; } if (sfxge_tx_dpl_get_non_tcp_max <= 0) { log(LOG_ERR, "%s=%d must be greater than 0", SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, sfxge_tx_dpl_get_non_tcp_max); rc = EINVAL; goto fail_tx_dpl_get_max; } if (sfxge_tx_dpl_put_max < 0) { log(LOG_ERR, "%s=%d must be greater or equal to 0", SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); rc = EINVAL; goto fail_tx_dpl_put_max; } /* Initialize the deferred packet list. */ stdp = &txq->dpl; stdp->std_put_max = sfxge_tx_dpl_put_max; stdp->std_get_max = sfxge_tx_dpl_get_max; stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; stdp->std_getp = &stdp->std_get; SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, "dpl", CTLFLAG_RD, NULL, "Deferred packet list statistics"); if (dpl_node == NULL) { rc = ENOMEM; goto fail_dpl_node; } SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "get_count", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_count, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_non_tcp_count, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_hiwat, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_put_hiwat, 0, ""); rc = sfxge_txq_stat_init(txq, txq_node); if (rc != 0) goto fail_txq_stat_init; txq->type = type; txq->evq_index = evq_index; txq->txq_index = txq_index; txq->init_state = SFXGE_TXQ_INITIALIZED; txq->hw_vlan_tci = 0; return (0); fail_txq_stat_init: fail_dpl_node: fail_tx_dpl_put_max: fail_tx_dpl_get_max: fail3: fail_txq_node: free(txq->pend_desc, M_SFXGE); fail2: while (nmaps-- != 0) bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); free(txq->stmp, M_SFXGE); bus_dma_tag_destroy(txq->packet_dma_tag); fail: sfxge_dma_free(esmp); return (rc); } static int sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; unsigned long sum; unsigned int index; /* Sum across all TX queues */ sum = 0; for (index = 0; index < sc->txq_count; index++) sum += *(unsigned long *)((caddr_t)sc->txq[index] + sfxge_tx_stats[id].offset); return (SYSCTL_OUT(req, &sum, sizeof(sum))); } static void sfxge_tx_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < nitems(sfxge_tx_stats); id++) { SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, sfxge_tx_stats[id].name, CTLTYPE_ULONG|CTLFLAG_RD, sc, id, sfxge_tx_stat_handler, "LU", ""); } } uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc) { unsigned int index; uint64_t drops = 0; struct sfxge_txq *txq; /* Sum across all TX queues */ for (index = 0; index < sc->txq_count; index++) { txq = sc->txq[index]; /* * In theory, txq->put_overflow and txq->netdown_drops * should use atomic operation and other should be * obtained under txq lock, but it is just statistics. */ drops += txq->drops + txq->get_overflow + txq->get_non_tcp_overflow + txq->put_overflow + txq->netdown_drops + txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; } return (drops); } void sfxge_tx_fini(struct sfxge_softc *sc) { int index; index = sc->txq_count; while (--index >= 0) sfxge_tx_qfini(sc, index); sc->txq_count = 0; } int sfxge_tx_init(struct sfxge_softc *sc) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); struct sfxge_intr *intr; int index; int rc; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc; sc->tso_fw_assisted = sfxge_tso_fw_assisted; if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || (!encp->enc_fw_assisted_tso_enabled)) sc->tso_fw_assisted &= ~SFXGE_FATSOV1; if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || (!encp->enc_fw_assisted_tso_v2_enabled)) sc->tso_fw_assisted &= ~SFXGE_FATSOV2; sc->txqs_node = SYSCTL_ADD_NODE( device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); if (sc->txqs_node == NULL) { rc = ENOMEM; goto fail_txq_node; } /* Initialize the transmit queues */ if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, SFXGE_TXQ_NON_CKSUM, 0)) != 0) goto fail; if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, SFXGE_TXQ_IP_CKSUM, 0)) != 0) goto fail2; for (index = 0; index < sc->txq_count - SFXGE_TXQ_NTYPES + 1; index++) { if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index, SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) goto fail3; } sfxge_tx_stat_init(sc); return (0); fail3: while (--index >= 0) sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); fail2: sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); fail: fail_txq_node: sc->txq_count = 0; return (rc); }