Changeset View
Changeset View
Standalone View
Standalone View
head/sys/net/iflib.c
Show First 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | |||||
#include <sys/kobj.h> | #include <sys/kobj.h> | ||||
#include <sys/rman.h> | #include <sys/rman.h> | ||||
#include <sys/sbuf.h> | #include <sys/sbuf.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/socket.h> | #include <sys/socket.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/syslog.h> | #include <sys/syslog.h> | ||||
#include <sys/taskqueue.h> | #include <sys/taskqueue.h> | ||||
#include <sys/limits.h> | |||||
#include <net/if.h> | #include <net/if.h> | ||||
#include <net/if_var.h> | #include <net/if_var.h> | ||||
#include <net/if_types.h> | #include <net/if_types.h> | ||||
#include <net/if_media.h> | #include <net/if_media.h> | ||||
#include <net/bpf.h> | #include <net/bpf.h> | ||||
#include <net/ethernet.h> | #include <net/ethernet.h> | ||||
Show All 36 Lines | |||||
/* | /* | ||||
* enable accounting of every mbuf as it comes in to and goes out of iflib's software descriptor references | * enable accounting of every mbuf as it comes in to and goes out of iflib's software descriptor references | ||||
*/ | */ | ||||
#define MEMORY_LOGGING 0 | #define MEMORY_LOGGING 0 | ||||
/* | /* | ||||
* Enable mbuf vectors for compressing long mbuf chains | * Enable mbuf vectors for compressing long mbuf chains | ||||
*/ | */ | ||||
/* | /* | ||||
* NB: | * NB: | ||||
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead | * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead | ||||
* we prefetch needs to be determined by the time spent in m_free vis a vis | * we prefetch needs to be determined by the time spent in m_free vis a vis | ||||
* the cost of a prefetch. This will of course vary based on the workload: | * the cost of a prefetch. This will of course vary based on the workload: | ||||
* - NFLX's m_free path is dominated by vm-based M_EXT manipulation which | * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which | ||||
* is quite expensive, thus suggesting very little prefetch. | * is quite expensive, thus suggesting very little prefetch. | ||||
* - small packet forwarding which is just returning a single mbuf to | * - small packet forwarding which is just returning a single mbuf to | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | struct iflib_ctx { | ||||
struct grouptask ifc_admin_task; | struct grouptask ifc_admin_task; | ||||
struct grouptask ifc_vflr_task; | struct grouptask ifc_vflr_task; | ||||
struct iflib_filter_info ifc_filter_info; | struct iflib_filter_info ifc_filter_info; | ||||
struct ifmedia ifc_media; | struct ifmedia ifc_media; | ||||
struct sysctl_oid *ifc_sysctl_node; | struct sysctl_oid *ifc_sysctl_node; | ||||
uint16_t ifc_sysctl_ntxqs; | uint16_t ifc_sysctl_ntxqs; | ||||
uint16_t ifc_sysctl_nrxqs; | uint16_t ifc_sysctl_nrxqs; | ||||
uint16_t ifc_sysctl_ntxds; | uint16_t ifc_sysctl_qs_eq_override; | ||||
uint16_t ifc_sysctl_nrxds; | |||||
uint16_t ifc_sysctl_ntxds[8]; | |||||
uint16_t ifc_sysctl_nrxds[8]; | |||||
struct if_txrx ifc_txrx; | struct if_txrx ifc_txrx; | ||||
#define isc_txd_encap ifc_txrx.ift_txd_encap | #define isc_txd_encap ifc_txrx.ift_txd_encap | ||||
#define isc_txd_flush ifc_txrx.ift_txd_flush | #define isc_txd_flush ifc_txrx.ift_txd_flush | ||||
#define isc_txd_credits_update ifc_txrx.ift_txd_credits_update | #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update | ||||
#define isc_rxd_available ifc_txrx.ift_rxd_available | #define isc_rxd_available ifc_txrx.ift_rxd_available | ||||
#define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get | #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get | ||||
#define isc_rxd_refill ifc_txrx.ift_rxd_refill | #define isc_rxd_refill ifc_txrx.ift_rxd_refill | ||||
#define isc_rxd_flush ifc_txrx.ift_rxd_flush | #define isc_rxd_flush ifc_txrx.ift_rxd_flush | ||||
▲ Show 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | |||||
#define IFLIB_QUEUE_HUNG 1 | #define IFLIB_QUEUE_HUNG 1 | ||||
#define IFLIB_QUEUE_WORKING 2 | #define IFLIB_QUEUE_WORKING 2 | ||||
/* this should really scale with ring size - 32 is a fairly arbitrary value for this */ | /* this should really scale with ring size - 32 is a fairly arbitrary value for this */ | ||||
#define TX_BATCH_SIZE 16 | #define TX_BATCH_SIZE 16 | ||||
#define IFLIB_RESTART_BUDGET 8 | #define IFLIB_RESTART_BUDGET 8 | ||||
#define IFC_LEGACY 0x1 | #define IFC_LEGACY 0x01 | ||||
#define IFC_QFLUSH 0x2 | #define IFC_QFLUSH 0x02 | ||||
#define IFC_MULTISEG 0x4 | #define IFC_MULTISEG 0x04 | ||||
#define IFC_DMAR 0x8 | #define IFC_DMAR 0x08 | ||||
#define IFC_SC_ALLOCATED 0x10 | |||||
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ | #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ | ||||
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ | CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ | ||||
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) | CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) | ||||
struct iflib_txq { | struct iflib_txq { | ||||
uint16_t ift_in_use; | uint16_t ift_in_use; | ||||
uint16_t ift_cidx; | uint16_t ift_cidx; | ||||
uint16_t ift_cidx_processed; | uint16_t ift_cidx_processed; | ||||
uint16_t ift_pidx; | uint16_t ift_pidx; | ||||
uint8_t ift_gen; | uint8_t ift_gen; | ||||
uint8_t ift_db_pending; | uint8_t ift_db_pending; | ||||
uint8_t ift_db_pending_queued; | uint8_t ift_db_pending_queued; | ||||
uint8_t ift_npending; | uint8_t ift_npending; | ||||
uint8_t ift_br_offset; | |||||
/* implicit pad */ | /* implicit pad */ | ||||
uint64_t ift_processed; | uint64_t ift_processed; | ||||
uint64_t ift_cleaned; | uint64_t ift_cleaned; | ||||
#if MEMORY_LOGGING | #if MEMORY_LOGGING | ||||
uint64_t ift_enqueued; | uint64_t ift_enqueued; | ||||
uint64_t ift_dequeued; | uint64_t ift_dequeued; | ||||
#endif | #endif | ||||
uint64_t ift_no_tx_dma_setup; | uint64_t ift_no_tx_dma_setup; | ||||
▲ Show 20 Lines • Show All 87 Lines • ▼ Show 20 Lines | struct iflib_rxq { | ||||
/* If there is a separate completion queue - | /* If there is a separate completion queue - | ||||
* these are the cq cidx and pidx. Otherwise | * these are the cq cidx and pidx. Otherwise | ||||
* these are unused. | * these are unused. | ||||
*/ | */ | ||||
uint16_t ifr_size; | uint16_t ifr_size; | ||||
uint16_t ifr_cq_cidx; | uint16_t ifr_cq_cidx; | ||||
uint16_t ifr_cq_pidx; | uint16_t ifr_cq_pidx; | ||||
uint8_t ifr_cq_gen; | uint8_t ifr_cq_gen; | ||||
uint8_t ifr_fl_offset; | |||||
if_ctx_t ifr_ctx; | if_ctx_t ifr_ctx; | ||||
iflib_fl_t ifr_fl; | iflib_fl_t ifr_fl; | ||||
uint64_t ifr_rx_irq; | uint64_t ifr_rx_irq; | ||||
uint16_t ifr_id; | uint16_t ifr_id; | ||||
uint8_t ifr_lro_enabled; | uint8_t ifr_lro_enabled; | ||||
uint8_t ifr_nfl; | uint8_t ifr_nfl; | ||||
struct lro_ctrl ifr_lc; | struct lro_ctrl ifr_lc; | ||||
▲ Show 20 Lines • Show All 174 Lines • ▼ Show 20 Lines | |||||
#define IFLIB_DEBUG 0 | #define IFLIB_DEBUG 0 | ||||
static void iflib_tx_structures_free(if_ctx_t ctx); | static void iflib_tx_structures_free(if_ctx_t ctx); | ||||
static void iflib_rx_structures_free(if_ctx_t ctx); | static void iflib_rx_structures_free(if_ctx_t ctx); | ||||
static int iflib_queues_alloc(if_ctx_t ctx); | static int iflib_queues_alloc(if_ctx_t ctx); | ||||
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); | static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); | ||||
static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx); | static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget); | ||||
static int iflib_qset_structures_setup(if_ctx_t ctx); | static int iflib_qset_structures_setup(if_ctx_t ctx); | ||||
static int iflib_msix_init(if_ctx_t ctx); | static int iflib_msix_init(if_ctx_t ctx); | ||||
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); | static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); | ||||
static void iflib_txq_check_drain(iflib_txq_t txq, int budget); | static void iflib_txq_check_drain(iflib_txq_t txq, int budget); | ||||
static uint32_t iflib_txq_can_drain(struct ifmp_ring *); | static uint32_t iflib_txq_can_drain(struct ifmp_ring *); | ||||
static int iflib_register(if_ctx_t); | static int iflib_register(if_ctx_t); | ||||
static void iflib_init_locked(if_ctx_t ctx); | static void iflib_init_locked(if_ctx_t ctx); | ||||
static void iflib_add_device_sysctl_pre(if_ctx_t ctx); | static void iflib_add_device_sysctl_pre(if_ctx_t ctx); | ||||
▲ Show 20 Lines • Show All 254 Lines • ▼ Show 20 Lines | iflib_netmap_rxsync(struct netmap_kring *kring, int flags) | ||||
if (netmap_no_pendintr || force_update) { | if (netmap_no_pendintr || force_update) { | ||||
int crclen = iflib_crcstrip ? 0 : 4; | int crclen = iflib_crcstrip ? 0 : 4; | ||||
int error, avail; | int error, avail; | ||||
uint16_t slot_flags = kring->nkr_slot_flags; | uint16_t slot_flags = kring->nkr_slot_flags; | ||||
for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) { | for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) { | ||||
nic_i = fl->ifl_cidx; | nic_i = fl->ifl_cidx; | ||||
nm_i = netmap_idx_n2k(kring, nic_i); | nm_i = netmap_idx_n2k(kring, nic_i); | ||||
avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i); | avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i, INT_MAX); | ||||
for (n = 0; avail > 0; n++, avail--) { | for (n = 0; avail > 0; n++, avail--) { | ||||
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); | error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); | ||||
if (error) | if (error) | ||||
ring->slot[nm_i].len = 0; | ring->slot[nm_i].len = 0; | ||||
else | else | ||||
ring->slot[nm_i].len = ri.iri_len - crclen; | ring->slot[nm_i].len = ri.iri_len - crclen; | ||||
ring->slot[nm_i].flags = slot_flags; | ring->slot[nm_i].flags = slot_flags; | ||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, | bus_dmamap_sync(fl->ifl_ifdi->idi_tag, | ||||
Show All 38 Lines | for (n = 0; nm_i != head; n++) { | ||||
if (slot->flags & NS_BUF_CHANGED) { | if (slot->flags & NS_BUF_CHANGED) { | ||||
/* buffer has changed, reload map */ | /* buffer has changed, reload map */ | ||||
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, addr); | netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, addr); | ||||
slot->flags &= ~NS_BUF_CHANGED; | slot->flags &= ~NS_BUF_CHANGED; | ||||
} | } | ||||
/* | /* | ||||
* XXX we should be batching this operation - TODO | * XXX we should be batching this operation - TODO | ||||
*/ | */ | ||||
ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1); | ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1, fl->ifl_buf_size); | ||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, | bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, | ||||
BUS_DMASYNC_PREREAD); | BUS_DMASYNC_PREREAD); | ||||
nm_i = nm_next(nm_i, lim); | nm_i = nm_next(nm_i, lim); | ||||
nic_i = nm_next(nic_i, lim); | nic_i = nm_next(nic_i, lim); | ||||
} | } | ||||
kring->nr_hwcur = head; | kring->nr_hwcur = head; | ||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, | bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, | ||||
Show All 11 Lines | |||||
ring_reset: | ring_reset: | ||||
return netmap_ring_reinit(kring); | return netmap_ring_reinit(kring); | ||||
} | } | ||||
static int | static int | ||||
iflib_netmap_attach(if_ctx_t ctx) | iflib_netmap_attach(if_ctx_t ctx) | ||||
{ | { | ||||
struct netmap_adapter na; | struct netmap_adapter na; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | |||||
bzero(&na, sizeof(na)); | bzero(&na, sizeof(na)); | ||||
na.ifp = ctx->ifc_ifp; | na.ifp = ctx->ifc_ifp; | ||||
na.na_flags = NAF_BDG_MAYSLEEP; | na.na_flags = NAF_BDG_MAYSLEEP; | ||||
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); | MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); | ||||
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); | MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); | ||||
na.num_tx_desc = ctx->ifc_sctx->isc_ntxd; | na.num_tx_desc = scctx->isc_ntxd[0]; | ||||
na.num_rx_desc = ctx->ifc_sctx->isc_ntxd; | na.num_rx_desc = scctx->isc_nrxd[0]; | ||||
na.nm_txsync = iflib_netmap_txsync; | na.nm_txsync = iflib_netmap_txsync; | ||||
na.nm_rxsync = iflib_netmap_rxsync; | na.nm_rxsync = iflib_netmap_rxsync; | ||||
na.nm_register = iflib_netmap_register; | na.nm_register = iflib_netmap_register; | ||||
na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; | na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; | ||||
na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; | na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; | ||||
return (netmap_attach(&na)); | return (netmap_attach(&na)); | ||||
} | } | ||||
static void | static void | ||||
iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) | iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) | ||||
{ | { | ||||
struct netmap_adapter *na = NA(ctx->ifc_ifp); | struct netmap_adapter *na = NA(ctx->ifc_ifp); | ||||
struct netmap_slot *slot; | struct netmap_slot *slot; | ||||
slot = netmap_reset(na, NR_TX, txq->ift_id, 0); | slot = netmap_reset(na, NR_TX, txq->ift_id, 0); | ||||
if (slot == 0) | if (slot == 0) | ||||
return; | return; | ||||
for (int i = 0; i < ctx->ifc_sctx->isc_ntxd; i++) { | for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { | ||||
/* | /* | ||||
* In netmap mode, set the map for the packet buffer. | * In netmap mode, set the map for the packet buffer. | ||||
* NOTE: Some drivers (not this one) also need to set | * NOTE: Some drivers (not this one) also need to set | ||||
* the physical buffer address in the NIC ring. | * the physical buffer address in the NIC ring. | ||||
* netmap_idx_n2k() maps a nic index, i, into the corresponding | * netmap_idx_n2k() maps a nic index, i, into the corresponding | ||||
* netmap slot index, si | * netmap slot index, si | ||||
*/ | */ | ||||
int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); | int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); | ||||
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); | netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) | iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) | ||||
{ | { | ||||
struct netmap_adapter *na = NA(ctx->ifc_ifp); | struct netmap_adapter *na = NA(ctx->ifc_ifp); | ||||
struct netmap_slot *slot; | struct netmap_slot *slot; | ||||
iflib_rxsd_t sd; | iflib_rxsd_t sd; | ||||
int nrxd; | int nrxd; | ||||
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); | slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); | ||||
if (slot == 0) | if (slot == 0) | ||||
return; | return; | ||||
sd = rxq->ifr_fl[0].ifl_sds; | sd = rxq->ifr_fl[0].ifl_sds; | ||||
nrxd = ctx->ifc_sctx->isc_nrxd; | nrxd = ctx->ifc_softc_ctx.isc_nrxd[0]; | ||||
for (int i = 0; i < nrxd; i++, sd++) { | for (int i = 0; i < nrxd; i++, sd++) { | ||||
int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i); | int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i); | ||||
uint64_t paddr; | uint64_t paddr; | ||||
void *addr; | void *addr; | ||||
caddr_t vaddr; | caddr_t vaddr; | ||||
vaddr = addr = PNMB(na, slot + sj, &paddr); | vaddr = addr = PNMB(na, slot + sj, &paddr); | ||||
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, sd->ifsd_map, addr); | netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, sd->ifsd_map, addr); | ||||
/* Update descriptor and the cached value */ | /* Update descriptor and the cached value */ | ||||
ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1); | ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1, rxq->ifr_fl[0].ifl_buf_size); | ||||
} | } | ||||
/* preserve queue */ | /* preserve queue */ | ||||
if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) { | if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) { | ||||
struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; | struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; | ||||
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | ||||
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, t); | ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, t); | ||||
} else | } else | ||||
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, nrxd-1); | ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, nrxd-1); | ||||
▲ Show 20 Lines • Show All 198 Lines • ▼ Show 20 Lines | iflib_txsd_alloc(iflib_txq_t txq) | ||||
if_ctx_t ctx = txq->ift_ctx; | if_ctx_t ctx = txq->ift_ctx; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | ||||
device_t dev = ctx->ifc_dev; | device_t dev = ctx->ifc_dev; | ||||
int err, nsegments, ntsosegments; | int err, nsegments, ntsosegments; | ||||
nsegments = scctx->isc_tx_nsegments; | nsegments = scctx->isc_tx_nsegments; | ||||
ntsosegments = scctx->isc_tx_tso_segments_max; | ntsosegments = scctx->isc_tx_tso_segments_max; | ||||
MPASS(sctx->isc_ntxd > 0); | MPASS(scctx->isc_ntxd[0] > 0); | ||||
MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); | |||||
MPASS(nsegments > 0); | MPASS(nsegments > 0); | ||||
MPASS(ntsosegments > 0); | MPASS(ntsosegments > 0); | ||||
/* | /* | ||||
* Setup DMA descriptor areas. | * Setup DMA descriptor areas. | ||||
*/ | */ | ||||
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), | if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
sctx->isc_tx_maxsize, /* maxsize */ | sctx->isc_tx_maxsize, /* maxsize */ | ||||
nsegments, /* nsegments */ | nsegments, /* nsegments */ | ||||
sctx->isc_tx_maxsegsize, /* maxsegsize */ | sctx->isc_tx_maxsegsize, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&txq->ift_desc_tag))) { | &txq->ift_desc_tag))) { | ||||
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); | device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); | ||||
device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n", | device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n", | ||||
sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize); | sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef IFLIB_DIAGNOSTICS | ||||
device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n", | device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n", | ||||
sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize); | sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize); | ||||
#endif | #endif | ||||
device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n", | |||||
scctx->isc_tx_tso_size_max, ntsosegments, | |||||
scctx->isc_tx_tso_segsize_max); | |||||
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), | if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
scctx->isc_tx_tso_size_max, /* maxsize */ | scctx->isc_tx_tso_size_max, /* maxsize */ | ||||
ntsosegments, /* nsegments */ | ntsosegments, /* nsegments */ | ||||
scctx->isc_tx_tso_segsize_max, /* maxsegsize */ | scctx->isc_tx_tso_segsize_max, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockfuncarg */ | NULL, /* lockfuncarg */ | ||||
&txq->ift_tso_desc_tag))) { | &txq->ift_tso_desc_tag))) { | ||||
device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); | device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef IFLIB_DIAGNOSTICS | ||||
device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n", | device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n", | ||||
scctx->isc_tx_tso_size_max, ntsosegments, | scctx->isc_tx_tso_size_max, ntsosegments, | ||||
scctx->isc_tx_tso_segsize_max); | scctx->isc_tx_tso_segsize_max); | ||||
#endif | #endif | ||||
if (!(txq->ift_sds.ifsd_flags = | if (!(txq->ift_sds.ifsd_flags = | ||||
(uint8_t *) malloc(sizeof(uint8_t) * | (uint8_t *) malloc(sizeof(uint8_t) * | ||||
sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) { | scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate tx_buffer memory\n"); | device_printf(dev, "Unable to allocate tx_buffer memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
if (!(txq->ift_sds.ifsd_m = | if (!(txq->ift_sds.ifsd_m = | ||||
(struct mbuf **) malloc(sizeof(struct mbuf *) * | (struct mbuf **) malloc(sizeof(struct mbuf *) * | ||||
sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) { | scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate tx_buffer memory\n"); | device_printf(dev, "Unable to allocate tx_buffer memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
/* Create the descriptor buffer dma maps */ | /* Create the descriptor buffer dma maps */ | ||||
#if defined(ACPI_DMAR) || (!(defined(__i386__) && !defined(__amd64__))) | #if defined(ACPI_DMAR) || (!(defined(__i386__) && !defined(__amd64__))) | ||||
if ((ctx->ifc_flags & IFC_DMAR) == 0) | if ((ctx->ifc_flags & IFC_DMAR) == 0) | ||||
return (0); | return (0); | ||||
if (!(txq->ift_sds.ifsd_map = | if (!(txq->ift_sds.ifsd_map = | ||||
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) { | (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate tx_buffer map memory\n"); | device_printf(dev, "Unable to allocate tx_buffer map memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
for (int i = 0; i < sctx->isc_ntxd; i++) { | for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { | ||||
err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); | err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); | ||||
if (err != 0) { | if (err != 0) { | ||||
device_printf(dev, "Unable to create TX DMA map\n"); | device_printf(dev, "Unable to create TX DMA map\n"); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
return (0); | return (0); | ||||
Show All 17 Lines | if (map != NULL) { | ||||
txq->ift_sds.ifsd_map[i] = NULL; | txq->ift_sds.ifsd_map[i] = NULL; | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
iflib_txq_destroy(iflib_txq_t txq) | iflib_txq_destroy(iflib_txq_t txq) | ||||
{ | { | ||||
if_ctx_t ctx = txq->ift_ctx; | if_ctx_t ctx = txq->ift_ctx; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | |||||
for (int i = 0; i < sctx->isc_ntxd; i++) | for (int i = 0; i < txq->ift_size; i++) | ||||
iflib_txsd_destroy(ctx, txq, i); | iflib_txsd_destroy(ctx, txq, i); | ||||
if (txq->ift_sds.ifsd_map != NULL) { | if (txq->ift_sds.ifsd_map != NULL) { | ||||
free(txq->ift_sds.ifsd_map, M_IFLIB); | free(txq->ift_sds.ifsd_map, M_IFLIB); | ||||
txq->ift_sds.ifsd_map = NULL; | txq->ift_sds.ifsd_map = NULL; | ||||
} | } | ||||
if (txq->ift_sds.ifsd_m != NULL) { | if (txq->ift_sds.ifsd_m != NULL) { | ||||
free(txq->ift_sds.ifsd_m, M_IFLIB); | free(txq->ift_sds.ifsd_m, M_IFLIB); | ||||
txq->ift_sds.ifsd_m = NULL; | txq->ift_sds.ifsd_m = NULL; | ||||
Show All 23 Lines | iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) | ||||
if (txq->ift_sds.ifsd_map != NULL) { | if (txq->ift_sds.ifsd_map != NULL) { | ||||
bus_dmamap_sync(txq->ift_desc_tag, | bus_dmamap_sync(txq->ift_desc_tag, | ||||
txq->ift_sds.ifsd_map[i], | txq->ift_sds.ifsd_map[i], | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(txq->ift_desc_tag, | bus_dmamap_unload(txq->ift_desc_tag, | ||||
txq->ift_sds.ifsd_map[i]); | txq->ift_sds.ifsd_map[i]); | ||||
} | } | ||||
m_freem(*mp); | m_free(*mp); | ||||
DBG_COUNTER_INC(tx_frees); | DBG_COUNTER_INC(tx_frees); | ||||
*mp = NULL; | *mp = NULL; | ||||
} | } | ||||
static int | static int | ||||
iflib_txq_setup(iflib_txq_t txq) | iflib_txq_setup(iflib_txq_t txq) | ||||
{ | { | ||||
if_ctx_t ctx = txq->ift_ctx; | if_ctx_t ctx = txq->ift_ctx; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | ||||
iflib_dma_info_t di; | iflib_dma_info_t di; | ||||
int i; | int i; | ||||
/* Set number of descriptors available */ | /* Set number of descriptors available */ | ||||
txq->ift_qstatus = IFLIB_QUEUE_IDLE; | txq->ift_qstatus = IFLIB_QUEUE_IDLE; | ||||
/* Reset indices */ | /* Reset indices */ | ||||
txq->ift_cidx_processed = txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; | txq->ift_cidx_processed = txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; | ||||
txq->ift_size = sctx->isc_ntxd; | txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; | ||||
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) | for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) | ||||
bzero((void *)di->idi_vaddr, di->idi_size); | bzero((void *)di->idi_vaddr, di->idi_size); | ||||
IFDI_TXQ_SETUP(ctx, txq->ift_id); | IFDI_TXQ_SETUP(ctx, txq->ift_id); | ||||
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) | for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) | ||||
bus_dmamap_sync(di->idi_tag, di->idi_map, | bus_dmamap_sync(di->idi_tag, di->idi_map, | ||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | ||||
return (0); | return (0); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Allocate memory for rx_buffer structures. Since we use one | * Allocate memory for rx_buffer structures. Since we use one | ||||
* rx_buffer per received packet, the maximum number of rx_buffer's | * rx_buffer per received packet, the maximum number of rx_buffer's | ||||
* that we'll need is equal to the number of receive descriptors | * that we'll need is equal to the number of receive descriptors | ||||
* that we've allocated. | * that we've allocated. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
iflib_rxsd_alloc(iflib_rxq_t rxq) | iflib_rxsd_alloc(iflib_rxq_t rxq) | ||||
{ | { | ||||
if_ctx_t ctx = rxq->ifr_ctx; | if_ctx_t ctx = rxq->ifr_ctx; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | |||||
device_t dev = ctx->ifc_dev; | device_t dev = ctx->ifc_dev; | ||||
iflib_fl_t fl; | iflib_fl_t fl; | ||||
iflib_rxsd_t rxsd; | iflib_rxsd_t rxsd; | ||||
int err; | int err; | ||||
MPASS(sctx->isc_nrxd > 0); | MPASS(scctx->isc_nrxd[0] > 0); | ||||
MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); | |||||
fl = rxq->ifr_fl; | fl = rxq->ifr_fl; | ||||
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { | for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { | ||||
fl->ifl_sds = malloc(sizeof(struct iflib_sw_rx_desc) * | fl->ifl_sds = malloc(sizeof(struct iflib_sw_rx_desc) * | ||||
sctx->isc_nrxd, M_IFLIB, M_WAITOK | M_ZERO); | scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, | ||||
M_WAITOK | M_ZERO); | |||||
if (fl->ifl_sds == NULL) { | if (fl->ifl_sds == NULL) { | ||||
device_printf(dev, "Unable to allocate rx sw desc memory\n"); | device_printf(dev, "Unable to allocate rx sw desc memory\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
fl->ifl_size = sctx->isc_nrxd; /* this isn't necessarily the same */ | fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ | ||||
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | ||||
1, 0, /* alignment, bounds */ | 1, 0, /* alignment, bounds */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
sctx->isc_rx_maxsize, /* maxsize */ | sctx->isc_rx_maxsize, /* maxsize */ | ||||
sctx->isc_rx_nsegments, /* nsegments */ | sctx->isc_rx_nsegments, /* nsegments */ | ||||
sctx->isc_rx_maxsegsize, /* maxsegsize */ | sctx->isc_rx_maxsegsize, /* maxsegsize */ | ||||
0, /* flags */ | 0, /* flags */ | ||||
NULL, /* lockfunc */ | NULL, /* lockfunc */ | ||||
NULL, /* lockarg */ | NULL, /* lockarg */ | ||||
&fl->ifl_desc_tag); | &fl->ifl_desc_tag); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, "%s: bus_dma_tag_create failed %d\n", | device_printf(dev, "%s: bus_dma_tag_create failed %d\n", | ||||
__func__, err); | __func__, err); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
rxsd = fl->ifl_sds; | rxsd = fl->ifl_sds; | ||||
for (int i = 0; i < sctx->isc_nrxd; i++, rxsd++) { | for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++, rxsd++) { | ||||
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &rxsd->ifsd_map); | err = bus_dmamap_create(fl->ifl_desc_tag, 0, &rxsd->ifsd_map); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, "%s: bus_dmamap_create failed: %d\n", | device_printf(dev, "%s: bus_dmamap_create failed: %d\n", | ||||
__func__, err); | __func__, err); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 141 Lines • ▼ Show 20 Lines | #endif | ||||
MPASS(fl->ifl_credits <= fl->ifl_size); | MPASS(fl->ifl_credits <= fl->ifl_size); | ||||
if (++fl->ifl_pidx == fl->ifl_size) { | if (++fl->ifl_pidx == fl->ifl_size) { | ||||
fl->ifl_pidx = 0; | fl->ifl_pidx = 0; | ||||
fl->ifl_gen = 1; | fl->ifl_gen = 1; | ||||
rxsd = fl->ifl_sds; | rxsd = fl->ifl_sds; | ||||
} | } | ||||
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { | if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { | ||||
ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx, | ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx, | ||||
fl->ifl_bus_addrs, fl->ifl_vm_addrs, i); | fl->ifl_bus_addrs, fl->ifl_vm_addrs, i, fl->ifl_buf_size); | ||||
i = 0; | i = 0; | ||||
pidx = fl->ifl_pidx; | pidx = fl->ifl_pidx; | ||||
} | } | ||||
} | } | ||||
done: | done: | ||||
DBG_COUNTER_INC(rxd_flush); | DBG_COUNTER_INC(rxd_flush); | ||||
if (fl->ifl_pidx == 0) | if (fl->ifl_pidx == 0) | ||||
pidx = fl->ifl_size - 1; | pidx = fl->ifl_size - 1; | ||||
▲ Show 20 Lines • Show All 211 Lines • ▼ Show 20 Lines | for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { | ||||
callout_stop(&txq->ift_timer); | callout_stop(&txq->ift_timer); | ||||
callout_stop(&txq->ift_db_check); | callout_stop(&txq->ift_db_check); | ||||
CALLOUT_UNLOCK(txq); | CALLOUT_UNLOCK(txq); | ||||
iflib_netmap_txq_init(ctx, txq); | iflib_netmap_txq_init(ctx, txq); | ||||
} | } | ||||
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { | for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { | ||||
iflib_netmap_rxq_init(ctx, rxq); | iflib_netmap_rxq_init(ctx, rxq); | ||||
} | } | ||||
#ifdef INVARIANTS | |||||
i = if_getdrvflags(ifp); | |||||
#endif | |||||
IFDI_INIT(ctx); | IFDI_INIT(ctx); | ||||
MPASS(if_getdrvflags(ifp) == i); | |||||
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { | for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { | ||||
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { | for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { | ||||
if (iflib_fl_setup(fl)) { | if (iflib_fl_setup(fl)) { | ||||
device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); | device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); | ||||
goto done; | goto done; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
Show All 31 Lines | |||||
} | } | ||||
static void | static void | ||||
iflib_stop(if_ctx_t ctx) | iflib_stop(if_ctx_t ctx) | ||||
{ | { | ||||
iflib_txq_t txq = ctx->ifc_txqs; | iflib_txq_t txq = ctx->ifc_txqs; | ||||
iflib_rxq_t rxq = ctx->ifc_rxqs; | iflib_rxq_t rxq = ctx->ifc_rxqs; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | |||||
iflib_dma_info_t di; | iflib_dma_info_t di; | ||||
iflib_fl_t fl; | iflib_fl_t fl; | ||||
int i, j; | int i, j; | ||||
/* Tell the stack that the interface is no longer active */ | /* Tell the stack that the interface is no longer active */ | ||||
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); | if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); | ||||
IFDI_INTR_DISABLE(ctx); | IFDI_INTR_DISABLE(ctx); | ||||
msleep(ctx, &ctx->ifc_mtx, PUSER, "iflib_init", hz); | msleep(ctx, &ctx->ifc_mtx, PUSER, "iflib_init", hz); | ||||
/* Wait for current tx queue users to exit to disarm watchdog timer. */ | /* Wait for current tx queue users to exit to disarm watchdog timer. */ | ||||
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { | for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { | ||||
/* make sure all transmitters have completed before proceeding XXX */ | /* make sure all transmitters have completed before proceeding XXX */ | ||||
/* clean any enqueued buffers */ | /* clean any enqueued buffers */ | ||||
iflib_txq_check_drain(txq, 0); | iflib_txq_check_drain(txq, 0); | ||||
/* Free any existing tx buffers. */ | /* Free any existing tx buffers. */ | ||||
for (j = 0; j < sctx->isc_ntxd; j++) { | for (j = 0; j < txq->ift_size; j++) { | ||||
iflib_txsd_free(ctx, txq, j); | iflib_txsd_free(ctx, txq, j); | ||||
} | } | ||||
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; | txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; | ||||
txq->ift_in_use = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; | txq->ift_in_use = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; | ||||
txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; | txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; | ||||
txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; | txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; | ||||
txq->ift_pullups = 0; | txq->ift_pullups = 0; | ||||
ifmp_ring_reset_stats(txq->ift_br[0]); | ifmp_ring_reset_stats(txq->ift_br[0]); | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | |||||
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri) | assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri) | ||||
{ | { | ||||
int i, padlen , flags, cltype; | int i, padlen , flags, cltype; | ||||
struct mbuf *m, *mh, *mt; | struct mbuf *m, *mh, *mt; | ||||
iflib_rxsd_t sd; | iflib_rxsd_t sd; | ||||
caddr_t cl; | caddr_t cl; | ||||
i = 0; | i = 0; | ||||
mh = NULL; | |||||
do { | do { | ||||
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE); | sd = rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE); | ||||
MPASS(sd->ifsd_cl != NULL); | MPASS(sd->ifsd_cl != NULL); | ||||
MPASS(sd->ifsd_m != NULL); | MPASS(sd->ifsd_m != NULL); | ||||
/* Don't include zero-length frags */ | |||||
if (ri->iri_frags[i].irf_len == 0) { | |||||
/* XXX we can save the cluster here, but not the mbuf */ | |||||
m_init(sd->ifsd_m, M_NOWAIT, MT_DATA, 0); | |||||
m_free(sd->ifsd_m); | |||||
sd->ifsd_m = NULL; | |||||
continue; | |||||
} | |||||
m = sd->ifsd_m; | m = sd->ifsd_m; | ||||
if (i == 0) { | if (mh == NULL) { | ||||
flags = M_PKTHDR|M_EXT; | flags = M_PKTHDR|M_EXT; | ||||
mh = mt = m; | mh = mt = m; | ||||
padlen = ri->iri_pad; | padlen = ri->iri_pad; | ||||
} else { | } else { | ||||
flags = M_EXT; | flags = M_EXT; | ||||
mt->m_next = m; | mt->m_next = m; | ||||
mt = m; | mt = m; | ||||
/* assuming padding is only on the first fragment */ | /* assuming padding is only on the first fragment */ | ||||
padlen = 0; | padlen = 0; | ||||
} | } | ||||
sd->ifsd_m = NULL; | sd->ifsd_m = NULL; | ||||
cl = sd->ifsd_cl; | cl = sd->ifsd_cl; | ||||
sd->ifsd_cl = NULL; | sd->ifsd_cl = NULL; | ||||
/* Can these two be made one ? */ | /* Can these two be made one ? */ | ||||
m_init(m, M_NOWAIT, MT_DATA, flags); | m_init(m, M_NOWAIT, MT_DATA, flags); | ||||
m_cljset(m, cl, cltype); | m_cljset(m, cl, cltype); | ||||
/* | /* | ||||
* These must follow m_init and m_cljset | * These must follow m_init and m_cljset | ||||
*/ | */ | ||||
m->m_data += padlen; | m->m_data += padlen; | ||||
ri->iri_len -= padlen; | ri->iri_len -= padlen; | ||||
m->m_len = ri->iri_len; | m->m_len = ri->iri_frags[i].irf_len; | ||||
} while (++i < ri->iri_nfrags); | } while (++i < ri->iri_nfrags); | ||||
return (mh); | return (mh); | ||||
} | } | ||||
/* | /* | ||||
* Process one software descriptor | * Process one software descriptor | ||||
*/ | */ | ||||
static struct mbuf * | static struct mbuf * | ||||
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) | iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) | ||||
{ | { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
iflib_rxsd_t sd; | iflib_rxsd_t sd; | ||||
/* should I merge this back in now that the two paths are basically duplicated? */ | /* should I merge this back in now that the two paths are basically duplicated? */ | ||||
if (ri->iri_len <= IFLIB_RX_COPY_THRESH) { | if (ri->iri_nfrags == 1 && | ||||
ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) { | |||||
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE); | sd = rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE); | ||||
m = sd->ifsd_m; | m = sd->ifsd_m; | ||||
sd->ifsd_m = NULL; | sd->ifsd_m = NULL; | ||||
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); | m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); | ||||
memcpy(m->m_data, sd->ifsd_cl, ri->iri_len); | memcpy(m->m_data, sd->ifsd_cl, ri->iri_len); | ||||
m->m_len = ri->iri_len; | m->m_len = ri->iri_frags[0].irf_len; | ||||
} else { | } else { | ||||
m = assemble_segments(rxq, ri); | m = assemble_segments(rxq, ri); | ||||
} | } | ||||
m->m_pkthdr.len = ri->iri_len; | m->m_pkthdr.len = ri->iri_len; | ||||
m->m_pkthdr.rcvif = ri->iri_ifp; | m->m_pkthdr.rcvif = ri->iri_ifp; | ||||
m->m_flags |= ri->iri_flags; | m->m_flags |= ri->iri_flags; | ||||
m->m_pkthdr.ether_vtag = ri->iri_vtag; | m->m_pkthdr.ether_vtag = ri->iri_vtag; | ||||
m->m_pkthdr.flowid = ri->iri_flowid; | m->m_pkthdr.flowid = ri->iri_flowid; | ||||
M_HASHTYPE_SET(m, ri->iri_rsstype); | M_HASHTYPE_SET(m, ri->iri_rsstype); | ||||
m->m_pkthdr.csum_flags = ri->iri_csum_flags; | m->m_pkthdr.csum_flags = ri->iri_csum_flags; | ||||
m->m_pkthdr.csum_data = ri->iri_csum_data; | m->m_pkthdr.csum_data = ri->iri_csum_data; | ||||
return (m); | return (m); | ||||
} | } | ||||
static bool | static bool | ||||
iflib_rxeof(iflib_rxq_t rxq, int budget) | iflib_rxeof(iflib_rxq_t rxq, int budget) | ||||
{ | { | ||||
if_ctx_t ctx = rxq->ifr_ctx; | if_ctx_t ctx = rxq->ifr_ctx; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | |||||
int avail, i; | int avail, i; | ||||
uint16_t *cidxp; | uint16_t *cidxp; | ||||
struct if_rxd_info ri; | struct if_rxd_info ri; | ||||
int err, budget_left, rx_bytes, rx_pkts; | int err, budget_left, rx_bytes, rx_pkts; | ||||
iflib_fl_t fl; | iflib_fl_t fl; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
struct lro_entry *queued; | |||||
int lro_enabled; | int lro_enabled; | ||||
/* | /* | ||||
* XXX early demux data packets so that if_input processing only handles | * XXX early demux data packets so that if_input processing only handles | ||||
* acks in interrupt context | * acks in interrupt context | ||||
*/ | */ | ||||
struct mbuf *m, *mh, *mt; | struct mbuf *m, *mh, *mt; | ||||
if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &budget)) { | if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &budget)) { | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
mh = mt = NULL; | mh = mt = NULL; | ||||
MPASS(budget > 0); | MPASS(budget > 0); | ||||
rx_pkts = rx_bytes = 0; | rx_pkts = rx_bytes = 0; | ||||
if (sctx->isc_flags & IFLIB_HAS_CQ) | if (sctx->isc_flags & IFLIB_HAS_RXCQ) | ||||
cidxp = &rxq->ifr_cq_cidx; | cidxp = &rxq->ifr_cq_cidx; | ||||
else | else | ||||
cidxp = &rxq->ifr_fl[0].ifl_cidx; | cidxp = &rxq->ifr_fl[0].ifl_cidx; | ||||
if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp)) == 0) { | if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { | ||||
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) | for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) | ||||
__iflib_fl_refill_lt(ctx, fl, budget + 8); | __iflib_fl_refill_lt(ctx, fl, budget + 8); | ||||
DBG_COUNTER_INC(rx_unavail); | DBG_COUNTER_INC(rx_unavail); | ||||
return (false); | return (false); | ||||
} | } | ||||
for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { | for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { | ||||
if (__predict_false(!CTX_ACTIVE(ctx))) { | if (__predict_false(!CTX_ACTIVE(ctx))) { | ||||
DBG_COUNTER_INC(rx_ctx_inactive); | DBG_COUNTER_INC(rx_ctx_inactive); | ||||
break; | break; | ||||
} | } | ||||
/* | /* | ||||
* Reset client set fields to their default values | * Reset client set fields to their default values | ||||
*/ | */ | ||||
bzero(&ri, sizeof(ri)); | bzero(&ri, sizeof(ri)); | ||||
ri.iri_qsidx = rxq->ifr_id; | ri.iri_qsidx = rxq->ifr_id; | ||||
ri.iri_cidx = *cidxp; | ri.iri_cidx = *cidxp; | ||||
ri.iri_ifp = ctx->ifc_ifp; | ri.iri_ifp = ctx->ifc_ifp; | ||||
ri.iri_frags = rxq->ifr_frags; | ri.iri_frags = rxq->ifr_frags; | ||||
err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); | err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); | ||||
/* in lieu of handling correctly - make sure it isn't being unhandled */ | /* in lieu of handling correctly - make sure it isn't being unhandled */ | ||||
MPASS(err == 0); | MPASS(err == 0); | ||||
if (sctx->isc_flags & IFLIB_HAS_CQ) { | if (sctx->isc_flags & IFLIB_HAS_RXCQ) { | ||||
/* we know we consumed _one_ CQ entry */ | *cidxp = ri.iri_cidx; | ||||
if (++rxq->ifr_cq_cidx == sctx->isc_nrxd) { | /* Update our consumer index */ | ||||
rxq->ifr_cq_cidx = 0; | while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { | ||||
rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; | |||||
rxq->ifr_cq_gen = 0; | rxq->ifr_cq_gen = 0; | ||||
} | } | ||||
/* was this only a completion queue message? */ | /* was this only a completion queue message? */ | ||||
if (__predict_false(ri.iri_nfrags == 0)) | if (__predict_false(ri.iri_nfrags == 0)) | ||||
continue; | continue; | ||||
} | } | ||||
MPASS(ri.iri_nfrags != 0); | MPASS(ri.iri_nfrags != 0); | ||||
MPASS(ri.iri_len != 0); | MPASS(ri.iri_len != 0); | ||||
/* will advance the cidx on the corresponding free lists */ | /* will advance the cidx on the corresponding free lists */ | ||||
m = iflib_rxd_pkt_get(rxq, &ri); | m = iflib_rxd_pkt_get(rxq, &ri); | ||||
if (avail == 0 && budget_left) | if (avail == 0 && budget_left) | ||||
avail = iflib_rxd_avail(ctx, rxq, *cidxp); | avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); | ||||
if (__predict_false(m == NULL)) { | if (__predict_false(m == NULL)) { | ||||
DBG_COUNTER_INC(rx_mbuf_null); | DBG_COUNTER_INC(rx_mbuf_null); | ||||
continue; | continue; | ||||
} | } | ||||
/* imm_pkt: -- cxgb */ | /* imm_pkt: -- cxgb */ | ||||
if (mh == NULL) | if (mh == NULL) | ||||
mh = mt = m; | mh = mt = m; | ||||
else { | else { | ||||
mt->m_nextpkt = m; | mt->m_nextpkt = m; | ||||
mt = m; | mt = m; | ||||
} | } | ||||
} | } | ||||
/* make sure that we can refill faster than drain */ | /* make sure that we can refill faster than drain */ | ||||
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) | for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) | ||||
__iflib_fl_refill_lt(ctx, fl, budget + 8); | __iflib_fl_refill_lt(ctx, fl, budget + 8); | ||||
ifp = ctx->ifc_ifp; | ifp = ctx->ifc_ifp; | ||||
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); | lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); | ||||
while (mh != NULL) { | while (mh != NULL) { | ||||
m = mh; | m = mh; | ||||
mh = mh->m_nextpkt; | mh = mh->m_nextpkt; | ||||
m->m_nextpkt = NULL; | m->m_nextpkt = NULL; | ||||
rx_bytes += m->m_pkthdr.len; | rx_bytes += m->m_pkthdr.len; | ||||
rx_pkts++; | rx_pkts++; | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
if (lro_enabled && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) | if (lro_enabled && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) | ||||
continue; | continue; | ||||
#endif | #endif | ||||
DBG_COUNTER_INC(rx_if_input); | DBG_COUNTER_INC(rx_if_input); | ||||
ifp->if_input(ifp, m); | ifp->if_input(ifp, m); | ||||
} | } | ||||
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); | if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); | ||||
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); | if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); | ||||
/* | /* | ||||
* Flush any outstanding LRO work | * Flush any outstanding LRO work | ||||
*/ | */ | ||||
while ((queued = LIST_FIRST(&rxq->ifr_lc.lro_active)) != NULL) { | |||||
LIST_REMOVE(queued, next); | |||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
tcp_lro_flush(&rxq->ifr_lc, queued); | tcp_lro_flush_all(&rxq->ifr_lc); | ||||
#endif | #endif | ||||
if (avail) | |||||
return true; | |||||
return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); | |||||
} | } | ||||
return (iflib_rxd_avail(ctx, rxq, *cidxp)); | |||||
} | |||||
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) | #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) | ||||
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) | #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) | ||||
#define TXQ_MAX_DB_DEFERRED(ctx) (ctx->ifc_sctx->isc_ntxd >> 5) | #define TXQ_MAX_DB_DEFERRED(size) (size >> 5) | ||||
#define TXQ_MAX_DB_CONSUMED(ctx) (ctx->ifc_sctx->isc_ntxd >> 4) | #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) | ||||
static __inline void | static __inline void | ||||
iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring) | iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring) | ||||
{ | { | ||||
uint32_t dbval; | uint32_t dbval; | ||||
if (ring || txq->ift_db_pending >= TXQ_MAX_DB_DEFERRED(ctx)) { | if (ring || txq->ift_db_pending >= | ||||
TXQ_MAX_DB_DEFERRED(txq->ift_size)) { | |||||
/* the lock will only ever be contended in the !min_latency case */ | /* the lock will only ever be contended in the !min_latency case */ | ||||
if (!TXDB_TRYLOCK(txq)) | if (!TXDB_TRYLOCK(txq)) | ||||
return; | return; | ||||
dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; | dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; | ||||
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); | ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); | ||||
txq->ift_db_pending = txq->ift_npending = 0; | txq->ift_db_pending = txq->ift_npending = 0; | ||||
TXDB_UNLOCK(txq); | TXDB_UNLOCK(txq); | ||||
Show All 29 Lines | |||||
#define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) | #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) | ||||
#define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) | #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) | ||||
static int | static int | ||||
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) | iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) | ||||
{ | { | ||||
struct ether_vlan_header *eh; | struct ether_vlan_header *eh; | ||||
struct mbuf *m; | struct mbuf *m, *n; | ||||
m = *mp; | n = m = *mp; | ||||
/* | /* | ||||
* Determine where frame payload starts. | * Determine where frame payload starts. | ||||
* Jump over vlan headers if already present, | * Jump over vlan headers if already present, | ||||
* helpful for QinQ too. | * helpful for QinQ too. | ||||
*/ | */ | ||||
if (__predict_false(m->m_len < sizeof(*eh))) { | if (__predict_false(m->m_len < sizeof(*eh))) { | ||||
txq->ift_pullups++; | txq->ift_pullups++; | ||||
if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) | if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) | ||||
Show All 9 Lines | iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) | ||||
} | } | ||||
switch (pi->ipi_etype) { | switch (pi->ipi_etype) { | ||||
#ifdef INET | #ifdef INET | ||||
case ETHERTYPE_IP: | case ETHERTYPE_IP: | ||||
{ | { | ||||
struct ip *ip = NULL; | struct ip *ip = NULL; | ||||
struct tcphdr *th = NULL; | struct tcphdr *th = NULL; | ||||
struct mbuf *n; | |||||
int minthlen; | int minthlen; | ||||
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); | minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); | ||||
if (__predict_false(m->m_len < minthlen)) { | if (__predict_false(m->m_len < minthlen)) { | ||||
/* | /* | ||||
* if this code bloat is causing too much of a hit | * if this code bloat is causing too much of a hit | ||||
* move it to a separate function and mark it noinline | * move it to a separate function and mark it noinline | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 125 Lines • ▼ Show 20 Lines | if ((m_next->m_flags & M_EXT) == 0) { | ||||
m = m_next; | m = m_next; | ||||
m->m_next = tmp; | m->m_next = tmp; | ||||
} | } | ||||
return (m); | return (m); | ||||
} | } | ||||
/* | /* | ||||
* If dodgy hardware rejects the scatter gather chain we've handed it | * If dodgy hardware rejects the scatter gather chain we've handed it | ||||
* we'll need to rebuild the mbuf chain before we can call m_defrag | * we'll need to remove the mbuf chain from ifsg_m[] before we can add the | ||||
* m_defrag'd mbufs | |||||
*/ | */ | ||||
static __noinline struct mbuf * | static __noinline struct mbuf * | ||||
iflib_rebuild_mbuf(iflib_txq_t txq) | iflib_remove_mbuf(iflib_txq_t txq) | ||||
{ | { | ||||
int ntxd, i, pidx; | |||||
int ntxd, mhlen, len, i, pidx; | |||||
struct mbuf *m, *mh, **ifsd_m; | struct mbuf *m, *mh, **ifsd_m; | ||||
if_shared_ctx_t sctx; | |||||
pidx = txq->ift_pidx; | pidx = txq->ift_pidx; | ||||
ifsd_m = txq->ift_sds.ifsd_m; | ifsd_m = txq->ift_sds.ifsd_m; | ||||
sctx = txq->ift_ctx->ifc_sctx; | ntxd = txq->ift_size; | ||||
ntxd = sctx->isc_ntxd; | |||||
mh = m = ifsd_m[pidx]; | mh = m = ifsd_m[pidx]; | ||||
ifsd_m[pidx] = NULL; | ifsd_m[pidx] = NULL; | ||||
#if MEMORY_LOGGING | #if MEMORY_LOGGING | ||||
txq->ift_dequeued++; | txq->ift_dequeued++; | ||||
#endif | #endif | ||||
len = m->m_len; | |||||
mhlen = m->m_pkthdr.len; | |||||
i = 1; | i = 1; | ||||
while (len < mhlen && (m->m_next == NULL)) { | while (m) { | ||||
m->m_next = ifsd_m[(pidx + i) & (ntxd-1)]; | |||||
ifsd_m[(pidx + i) & (ntxd -1)] = NULL; | ifsd_m[(pidx + i) & (ntxd -1)] = NULL; | ||||
#if MEMORY_LOGGING | #if MEMORY_LOGGING | ||||
txq->ift_dequeued++; | txq->ift_dequeued++; | ||||
#endif | #endif | ||||
m = m->m_next; | m = m->m_next; | ||||
len += m->m_len; | |||||
i++; | i++; | ||||
} | } | ||||
return (mh); | return (mh); | ||||
} | } | ||||
static int | static int | ||||
iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, | iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, | ||||
struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, | struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, | ||||
int max_segs, int flags) | int max_segs, int flags) | ||||
{ | { | ||||
if_ctx_t ctx; | if_ctx_t ctx; | ||||
if_shared_ctx_t sctx; | if_shared_ctx_t sctx; | ||||
if_softc_ctx_t scctx; | |||||
int i, next, pidx, mask, err, maxsegsz, ntxd, count; | int i, next, pidx, mask, err, maxsegsz, ntxd, count; | ||||
struct mbuf *m, *tmp, **ifsd_m, **mp; | struct mbuf *m, *tmp, **ifsd_m, **mp; | ||||
m = *m0; | m = *m0; | ||||
/* | /* | ||||
* Please don't ever do this | * Please don't ever do this | ||||
*/ | */ | ||||
if (__predict_false(m->m_len == 0)) | if (__predict_false(m->m_len == 0)) | ||||
*m0 = m = collapse_pkthdr(m); | *m0 = m = collapse_pkthdr(m); | ||||
ctx = txq->ift_ctx; | ctx = txq->ift_ctx; | ||||
sctx = ctx->ifc_sctx; | sctx = ctx->ifc_sctx; | ||||
scctx = &ctx->ifc_softc_ctx; | |||||
ifsd_m = txq->ift_sds.ifsd_m; | ifsd_m = txq->ift_sds.ifsd_m; | ||||
ntxd = sctx->isc_ntxd; | ntxd = txq->ift_size; | ||||
pidx = txq->ift_pidx; | pidx = txq->ift_pidx; | ||||
if (map != NULL) { | if (map != NULL) { | ||||
uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; | uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; | ||||
err = bus_dmamap_load_mbuf_sg(tag, map, | err = bus_dmamap_load_mbuf_sg(tag, map, | ||||
*m0, segs, nsegs, BUS_DMA_NOWAIT); | *m0, segs, nsegs, BUS_DMA_NOWAIT); | ||||
if (err) | if (err) | ||||
return (err); | return (err); | ||||
ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; | ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; | ||||
i = 0; | i = 0; | ||||
next = pidx; | next = pidx; | ||||
mask = (sctx->isc_ntxd-1); | mask = (txq->ift_size-1); | ||||
m = *m0; | m = *m0; | ||||
do { | do { | ||||
mp = &ifsd_m[next]; | mp = &ifsd_m[next]; | ||||
*mp = m; | *mp = m; | ||||
m = m->m_next; | m = m->m_next; | ||||
(*mp)->m_next = NULL; | |||||
if (__predict_false((*mp)->m_len == 0)) { | if (__predict_false((*mp)->m_len == 0)) { | ||||
m_free(*mp); | m_free(*mp); | ||||
*mp = NULL; | *mp = NULL; | ||||
} else | } else | ||||
next = (pidx + i) & (ntxd-1); | next = (pidx + i) & (ntxd-1); | ||||
} while (m != NULL); | } while (m != NULL); | ||||
} else { | } else { | ||||
int buflen, sgsize, max_sgsize; | int buflen, sgsize, max_sgsize; | ||||
Show All 34 Lines | #endif | ||||
buflen -= sgsize; | buflen -= sgsize; | ||||
i++; | i++; | ||||
if (i >= max_segs) | if (i >= max_segs) | ||||
goto err; | goto err; | ||||
} | } | ||||
count++; | count++; | ||||
tmp = m; | tmp = m; | ||||
m = m->m_next; | m = m->m_next; | ||||
tmp->m_next = NULL; | |||||
} while (m != NULL); | } while (m != NULL); | ||||
*nsegs = i; | *nsegs = i; | ||||
} | } | ||||
return (0); | return (0); | ||||
err: | err: | ||||
*m0 = iflib_rebuild_mbuf(txq); | *m0 = iflib_remove_mbuf(txq); | ||||
return (EFBIG); | return (EFBIG); | ||||
} | } | ||||
static int | static int | ||||
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) | iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) | ||||
{ | { | ||||
if_ctx_t ctx; | if_ctx_t ctx; | ||||
if_shared_ctx_t sctx; | if_shared_ctx_t sctx; | ||||
if_softc_ctx_t scctx; | if_softc_ctx_t scctx; | ||||
bus_dma_segment_t *segs; | bus_dma_segment_t *segs; | ||||
struct mbuf *m_head; | struct mbuf *m_head; | ||||
bus_dmamap_t map; | bus_dmamap_t map; | ||||
struct if_pkt_info pi; | struct if_pkt_info pi; | ||||
int remap = 0; | int remap = 0; | ||||
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; | int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; | ||||
bus_dma_tag_t desc_tag; | bus_dma_tag_t desc_tag; | ||||
segs = txq->ift_segs; | segs = txq->ift_segs; | ||||
ctx = txq->ift_ctx; | ctx = txq->ift_ctx; | ||||
sctx = ctx->ifc_sctx; | sctx = ctx->ifc_sctx; | ||||
scctx = &ctx->ifc_softc_ctx; | scctx = &ctx->ifc_softc_ctx; | ||||
segs = txq->ift_segs; | segs = txq->ift_segs; | ||||
ntxd = sctx->isc_ntxd; | ntxd = txq->ift_size; | ||||
m_head = *m_headp; | m_head = *m_headp; | ||||
map = NULL; | map = NULL; | ||||
/* | /* | ||||
* If we're doing TSO the next descriptor to clean may be quite far ahead | * If we're doing TSO the next descriptor to clean may be quite far ahead | ||||
*/ | */ | ||||
cidx = txq->ift_cidx; | cidx = txq->ift_cidx; | ||||
pidx = txq->ift_pidx; | pidx = txq->ift_pidx; | ||||
▲ Show 20 Lines • Show All 70 Lines • ▼ Show 20 Lines | defrag: | ||||
* descriptors - this does not hold true on all drivers, e.g. | * descriptors - this does not hold true on all drivers, e.g. | ||||
* cxgb | * cxgb | ||||
*/ | */ | ||||
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { | if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { | ||||
txq->ift_no_desc_avail++; | txq->ift_no_desc_avail++; | ||||
if (map != NULL) | if (map != NULL) | ||||
bus_dmamap_unload(desc_tag, map); | bus_dmamap_unload(desc_tag, map); | ||||
DBG_COUNTER_INC(encap_txq_avail_fail); | DBG_COUNTER_INC(encap_txq_avail_fail); | ||||
if (txq->ift_task.gt_task.ta_pending == 0) | if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) | ||||
GROUPTASK_ENQUEUE(&txq->ift_task); | GROUPTASK_ENQUEUE(&txq->ift_task); | ||||
return (ENOBUFS); | return (ENOBUFS); | ||||
} | } | ||||
pi.ipi_segs = segs; | pi.ipi_segs = segs; | ||||
pi.ipi_nsegs = nsegs; | pi.ipi_nsegs = nsegs; | ||||
MPASS(pidx >= 0 && pidx < sctx->isc_ntxd); | MPASS(pidx >= 0 && pidx < txq->ift_size); | ||||
#ifdef PKT_DEBUG | #ifdef PKT_DEBUG | ||||
print_pkt(&pi); | print_pkt(&pi); | ||||
#endif | #endif | ||||
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { | if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { | ||||
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, | bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, | ||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | ||||
DBG_COUNTER_INC(tx_encap); | DBG_COUNTER_INC(tx_encap); | ||||
MPASS(pi.ipi_new_pidx >= 0 && pi.ipi_new_pidx < sctx->isc_ntxd); | MPASS(pi.ipi_new_pidx >= 0 && | ||||
pi.ipi_new_pidx < txq->ift_size); | |||||
ndesc = pi.ipi_new_pidx - pi.ipi_pidx; | ndesc = pi.ipi_new_pidx - pi.ipi_pidx; | ||||
if (pi.ipi_new_pidx < pi.ipi_pidx) { | if (pi.ipi_new_pidx < pi.ipi_pidx) { | ||||
ndesc += sctx->isc_ntxd; | ndesc += txq->ift_size; | ||||
txq->ift_gen = 1; | txq->ift_gen = 1; | ||||
} | } | ||||
MPASS(pi.ipi_new_pidx != pidx); | MPASS(pi.ipi_new_pidx != pidx); | ||||
MPASS(ndesc > 0); | MPASS(ndesc > 0); | ||||
txq->ift_in_use += ndesc; | txq->ift_in_use += ndesc; | ||||
/* | /* | ||||
* We update the last software descriptor again here because there may | * We update the last software descriptor again here because there may | ||||
* be a sentinel and/or there may be more mbufs than segments | * be a sentinel and/or there may be more mbufs than segments | ||||
*/ | */ | ||||
txq->ift_pidx = pi.ipi_new_pidx; | txq->ift_pidx = pi.ipi_new_pidx; | ||||
txq->ift_npending += pi.ipi_ndescs; | txq->ift_npending += pi.ipi_ndescs; | ||||
} else if (__predict_false(err == EFBIG && remap < 2)) { | } else if (__predict_false(err == EFBIG && remap < 2)) { | ||||
*m_headp = m_head = iflib_rebuild_mbuf(txq); | *m_headp = m_head = iflib_remove_mbuf(txq); | ||||
remap = 1; | remap = 1; | ||||
txq->ift_txd_encap_efbig++; | txq->ift_txd_encap_efbig++; | ||||
goto defrag; | goto defrag; | ||||
} else | } else | ||||
DBG_COUNTER_INC(encap_txd_encap_fail); | DBG_COUNTER_INC(encap_txd_encap_fail); | ||||
return (err); | return (err); | ||||
defrag_failed: | defrag_failed: | ||||
txq->ift_mbuf_defrag_failed++; | txq->ift_mbuf_defrag_failed++; | ||||
txq->ift_map_failed++; | txq->ift_map_failed++; | ||||
m_freem(*m_headp); | m_freem(*m_headp); | ||||
DBG_COUNTER_INC(tx_frees); | DBG_COUNTER_INC(tx_frees); | ||||
*m_headp = NULL; | *m_headp = NULL; | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* forward compatibility for cxgb */ | /* forward compatibility for cxgb */ | ||||
#define FIRST_QSET(ctx) 0 | #define FIRST_QSET(ctx) 0 | ||||
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) | #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) | ||||
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) | #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) | ||||
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NRXQSETS(ctx)) + FIRST_QSET(ctx)) | #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) | ||||
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) | #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) | ||||
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) | #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) | ||||
#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) | #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) | ||||
/* if there are more than TXQ_MIN_OCCUPANCY packets pending we consider deferring | /* if there are more than TXQ_MIN_OCCUPANCY packets pending we consider deferring | ||||
* doorbell writes | * doorbell writes | ||||
* | * | ||||
* ORing with 2 assures that min occupancy is never less than 2 without any conditional logic | * ORing with 2 assures that min occupancy is never less than 2 without any conditional logic | ||||
*/ | */ | ||||
#define TXQ_MIN_OCCUPANCY(ctx) ((ctx->ifc_sctx->isc_ntxd >> 6)| 0x2) | #define TXQ_MIN_OCCUPANCY(size) ((size >> 6)| 0x2) | ||||
static inline int | static inline int | ||||
iflib_txq_min_occupancy(iflib_txq_t txq) | iflib_txq_min_occupancy(iflib_txq_t txq) | ||||
{ | { | ||||
if_ctx_t ctx; | if_ctx_t ctx; | ||||
ctx = txq->ift_ctx; | ctx = txq->ift_ctx; | ||||
return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen) < TXQ_MIN_OCCUPANCY(ctx) + MAX_TX_DESC(ctx)); | return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, | ||||
txq->ift_gen) < TXQ_MIN_OCCUPANCY(txq->ift_size) + | |||||
MAX_TX_DESC(ctx)); | |||||
} | } | ||||
static void | static void | ||||
iflib_tx_desc_free(iflib_txq_t txq, int n) | iflib_tx_desc_free(iflib_txq_t txq, int n) | ||||
{ | { | ||||
int hasmap; | int hasmap; | ||||
uint32_t qsize, cidx, mask, gen; | uint32_t qsize, cidx, mask, gen; | ||||
struct mbuf *m, **ifsd_m; | struct mbuf *m, **ifsd_m; | ||||
uint8_t *ifsd_flags; | uint8_t *ifsd_flags; | ||||
bus_dmamap_t *ifsd_map; | bus_dmamap_t *ifsd_map; | ||||
cidx = txq->ift_cidx; | cidx = txq->ift_cidx; | ||||
gen = txq->ift_gen; | gen = txq->ift_gen; | ||||
qsize = txq->ift_ctx->ifc_sctx->isc_ntxd; | qsize = txq->ift_size; | ||||
mask = qsize-1; | mask = qsize-1; | ||||
hasmap = txq->ift_sds.ifsd_map != NULL; | hasmap = txq->ift_sds.ifsd_map != NULL; | ||||
ifsd_flags = txq->ift_sds.ifsd_flags; | ifsd_flags = txq->ift_sds.ifsd_flags; | ||||
ifsd_m = txq->ift_sds.ifsd_m; | ifsd_m = txq->ift_sds.ifsd_m; | ||||
ifsd_map = txq->ift_sds.ifsd_map; | ifsd_map = txq->ift_sds.ifsd_map; | ||||
while (n--) { | while (n--) { | ||||
prefetch(ifsd_m[(cidx + 3) & mask]); | prefetch(ifsd_m[(cidx + 3) & mask]); | ||||
Show All 9 Lines | if (ifsd_m[cidx] != NULL) { | ||||
*/ | */ | ||||
bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); | bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); | ||||
ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; | ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; | ||||
} | } | ||||
if ((m = ifsd_m[cidx]) != NULL) { | if ((m = ifsd_m[cidx]) != NULL) { | ||||
/* XXX we don't support any drivers that batch packets yet */ | /* XXX we don't support any drivers that batch packets yet */ | ||||
MPASS(m->m_nextpkt == NULL); | MPASS(m->m_nextpkt == NULL); | ||||
m_freem(m); | m_free(m); | ||||
ifsd_m[cidx] = NULL; | ifsd_m[cidx] = NULL; | ||||
#if MEMORY_LOGGING | #if MEMORY_LOGGING | ||||
txq->ift_dequeued++; | txq->ift_dequeued++; | ||||
#endif | #endif | ||||
DBG_COUNTER_INC(tx_frees); | DBG_COUNTER_INC(tx_frees); | ||||
} | } | ||||
} | } | ||||
if (__predict_false(++cidx == qsize)) { | if (__predict_false(++cidx == qsize)) { | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || | ||||
DBG_COUNTER_INC(txq_drain_notready); | DBG_COUNTER_INC(txq_drain_notready); | ||||
return (0); | return (0); | ||||
} | } | ||||
avail = IDXDIFF(pidx, cidx, r->size); | avail = IDXDIFF(pidx, cidx, r->size); | ||||
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { | if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { | ||||
DBG_COUNTER_INC(txq_drain_flushing); | DBG_COUNTER_INC(txq_drain_flushing); | ||||
for (i = 0; i < avail; i++) { | for (i = 0; i < avail; i++) { | ||||
m_freem(r->items[(cidx + i) & (r->size-1)]); | m_free(r->items[(cidx + i) & (r->size-1)]); | ||||
r->items[(cidx + i) & (r->size-1)] = NULL; | r->items[(cidx + i) & (r->size-1)] = NULL; | ||||
} | } | ||||
return (avail); | return (avail); | ||||
} | } | ||||
iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); | iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); | ||||
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { | if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { | ||||
txq->ift_qstatus = IFLIB_QUEUE_IDLE; | txq->ift_qstatus = IFLIB_QUEUE_IDLE; | ||||
CALLOUT_LOCK(txq); | CALLOUT_LOCK(txq); | ||||
Show All 30 Lines | for (desc_used = i = 0; i < count && TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2; i++) { | ||||
txq->ift_db_pending += (txq->ift_in_use - in_use_prev); | txq->ift_db_pending += (txq->ift_in_use - in_use_prev); | ||||
desc_used += (txq->ift_in_use - in_use_prev); | desc_used += (txq->ift_in_use - in_use_prev); | ||||
iflib_txd_db_check(ctx, txq, FALSE); | iflib_txd_db_check(ctx, txq, FALSE); | ||||
ETHER_BPF_MTAP(ifp, m); | ETHER_BPF_MTAP(ifp, m); | ||||
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) | if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) | ||||
break; | break; | ||||
if (desc_used > TXQ_MAX_DB_CONSUMED(ctx)) | if (desc_used > TXQ_MAX_DB_CONSUMED(txq->ift_size)) | ||||
break; | break; | ||||
} | } | ||||
if ((iflib_min_tx_latency || iflib_txq_min_occupancy(txq)) && txq->ift_db_pending) | if ((iflib_min_tx_latency || iflib_txq_min_occupancy(txq)) && txq->ift_db_pending) | ||||
iflib_txd_db_check(ctx, txq, TRUE); | iflib_txd_db_check(ctx, txq, TRUE); | ||||
else if ((txq->ift_db_pending || TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)) && | else if ((txq->ift_db_pending || TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)) && | ||||
(callout_pending(&txq->ift_db_check) == 0)) { | (callout_pending(&txq->ift_db_check) == 0)) { | ||||
txq->ift_db_pending_queued = txq->ift_db_pending; | txq->ift_db_pending_queued = txq->ift_db_pending; | ||||
callout_reset_on(&txq->ift_db_check, 1, iflib_txd_deferred_db_check, | callout_reset_on(&txq->ift_db_check, 1, iflib_txd_deferred_db_check, | ||||
txq, txq->ift_db_check.c_cpu); | txq, txq->ift_db_check.c_cpu); | ||||
} | } | ||||
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); | if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); | ||||
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); | if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); | ||||
if (mcast_sent) | if (mcast_sent) | ||||
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); | if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); | ||||
return (consumed); | return (consumed); | ||||
} | } | ||||
static void | static void | ||||
_task_fn_tx(void *context, int pending) | _task_fn_tx(void *context) | ||||
{ | { | ||||
iflib_txq_t txq = context; | iflib_txq_t txq = context; | ||||
if_ctx_t ctx = txq->ift_ctx; | if_ctx_t ctx = txq->ift_ctx; | ||||
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) | if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) | ||||
return; | return; | ||||
ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE); | ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE); | ||||
} | } | ||||
static void | static void | ||||
_task_fn_rx(void *context, int pending) | _task_fn_rx(void *context) | ||||
{ | { | ||||
iflib_rxq_t rxq = context; | iflib_rxq_t rxq = context; | ||||
if_ctx_t ctx = rxq->ifr_ctx; | if_ctx_t ctx = rxq->ifr_ctx; | ||||
bool more; | bool more; | ||||
int rc; | |||||
DBG_COUNTER_INC(task_fn_rxs); | DBG_COUNTER_INC(task_fn_rxs); | ||||
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) | if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) | ||||
return; | return; | ||||
if ((more = iflib_rxeof(rxq, 16 /* XXX */)) == false) { | if ((more = iflib_rxeof(rxq, 16 /* XXX */)) == false) { | ||||
if (ctx->ifc_flags & IFC_LEGACY) | if (ctx->ifc_flags & IFC_LEGACY) | ||||
IFDI_INTR_ENABLE(ctx); | IFDI_INTR_ENABLE(ctx); | ||||
else { | else { | ||||
DBG_COUNTER_INC(rx_intr_enables); | DBG_COUNTER_INC(rx_intr_enables); | ||||
IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); | rc = IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); | ||||
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); | |||||
} | } | ||||
} | } | ||||
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) | if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) | ||||
return; | return; | ||||
if (more) | if (more) | ||||
GROUPTASK_ENQUEUE(&rxq->ifr_task); | GROUPTASK_ENQUEUE(&rxq->ifr_task); | ||||
} | } | ||||
static void | static void | ||||
_task_fn_admin(void *context, int pending) | _task_fn_admin(void *context) | ||||
{ | { | ||||
if_ctx_t ctx = context; | if_ctx_t ctx = context; | ||||
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; | if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; | ||||
iflib_txq_t txq; | iflib_txq_t txq; | ||||
int i; | int i; | ||||
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) | if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) | ||||
return; | return; | ||||
Show All 13 Lines | _task_fn_admin(void *context) | ||||
if (LINK_ACTIVE(ctx) == 0) | if (LINK_ACTIVE(ctx) == 0) | ||||
return; | return; | ||||
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) | for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) | ||||
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); | iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); | ||||
} | } | ||||
static void | static void | ||||
_task_fn_iov(void *context, int pending) | _task_fn_iov(void *context) | ||||
{ | { | ||||
if_ctx_t ctx = context; | if_ctx_t ctx = context; | ||||
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) | if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) | ||||
return; | return; | ||||
CTX_LOCK(ctx); | CTX_LOCK(ctx); | ||||
IFDI_VFLR_HANDLE(ctx); | IFDI_VFLR_HANDLE(ctx); | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static int | static int | ||||
iflib_if_transmit(if_t ifp, struct mbuf *m) | iflib_if_transmit(if_t ifp, struct mbuf *m) | ||||
{ | { | ||||
if_ctx_t ctx = if_getsoftc(ifp); | if_ctx_t ctx = if_getsoftc(ifp); | ||||
iflib_txq_t txq; | iflib_txq_t txq; | ||||
struct mbuf *marr[8], **mp, *next; | int err, qidx; | ||||
int err, i, count, qidx; | |||||
if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { | if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { | ||||
DBG_COUNTER_INC(tx_frees); | DBG_COUNTER_INC(tx_frees); | ||||
m_freem(m); | m_freem(m); | ||||
return (0); | return (0); | ||||
} | } | ||||
MPASS(m->m_nextpkt == NULL); | |||||
qidx = 0; | qidx = 0; | ||||
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) | if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) | ||||
qidx = QIDX(ctx, m); | qidx = QIDX(ctx, m); | ||||
/* | /* | ||||
* XXX calculate buf_ring based on flowid (divvy up bits?) | * XXX calculate buf_ring based on flowid (divvy up bits?) | ||||
*/ | */ | ||||
txq = &ctx->ifc_txqs[qidx]; | txq = &ctx->ifc_txqs[qidx]; | ||||
#ifdef DRIVER_BACKPRESSURE | #ifdef DRIVER_BACKPRESSURE | ||||
if (txq->ift_closed) { | if (txq->ift_closed) { | ||||
while (m != NULL) { | while (m != NULL) { | ||||
next = m->m_nextpkt; | next = m->m_nextpkt; | ||||
m->m_nextpkt = NULL; | m->m_nextpkt = NULL; | ||||
m_freem(m); | m_freem(m); | ||||
m = next; | m = next; | ||||
} | } | ||||
return (ENOBUFS); | return (ENOBUFS); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef notyet | |||||
qidx = count = 0; | qidx = count = 0; | ||||
mp = marr; | mp = marr; | ||||
next = m; | next = m; | ||||
do { | do { | ||||
count++; | count++; | ||||
next = next->m_nextpkt; | next = next->m_nextpkt; | ||||
} while (next != NULL); | } while (next != NULL); | ||||
if (count > nitems(marr)) | if (count > nitems(marr)) | ||||
if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { | if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { | ||||
/* XXX check nextpkt */ | /* XXX check nextpkt */ | ||||
m_freem(m); | m_freem(m); | ||||
/* XXX simplify for now */ | /* XXX simplify for now */ | ||||
DBG_COUNTER_INC(tx_frees); | DBG_COUNTER_INC(tx_frees); | ||||
return (ENOBUFS); | return (ENOBUFS); | ||||
} | } | ||||
for (next = m, i = 0; next != NULL; i++) { | for (next = m, i = 0; next != NULL; i++) { | ||||
mp[i] = next; | mp[i] = next; | ||||
next = next->m_nextpkt; | next = next->m_nextpkt; | ||||
mp[i]->m_nextpkt = NULL; | mp[i]->m_nextpkt = NULL; | ||||
} | } | ||||
#endif | |||||
DBG_COUNTER_INC(tx_seen); | DBG_COUNTER_INC(tx_seen); | ||||
err = ifmp_ring_enqueue(txq->ift_br[0], (void **)mp, count, TX_BATCH_SIZE); | err = ifmp_ring_enqueue(txq->ift_br[0], (void **)&m, 1, TX_BATCH_SIZE); | ||||
if (iflib_txq_can_drain(txq->ift_br[0])) | |||||
GROUPTASK_ENQUEUE(&txq->ift_task); | |||||
if (err) { | if (err) { | ||||
GROUPTASK_ENQUEUE(&txq->ift_task); | |||||
/* support forthcoming later */ | /* support forthcoming later */ | ||||
#ifdef DRIVER_BACKPRESSURE | #ifdef DRIVER_BACKPRESSURE | ||||
txq->ift_closed = TRUE; | txq->ift_closed = TRUE; | ||||
#endif | #endif | ||||
for (i = 0; i < count; i++) | |||||
m_freem(mp[i]); | |||||
ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE); | ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE); | ||||
m_freem(m); | |||||
} else if (TXQ_AVAIL(txq) < (txq->ift_size >> 1)) { | |||||
GROUPTASK_ENQUEUE(&txq->ift_task); | |||||
} | } | ||||
if (count > nitems(marr)) | |||||
free(mp, M_IFLIB); | |||||
return (err); | return (err); | ||||
} | } | ||||
static void | static void | ||||
iflib_if_qflush(if_t ifp) | iflib_if_qflush(if_t ifp) | ||||
{ | { | ||||
if_ctx_t ctx = if_getsoftc(ifp); | if_ctx_t ctx = if_getsoftc(ifp); | ||||
iflib_txq_t txq = ctx->ifc_txqs; | iflib_txq_t txq = ctx->ifc_txqs; | ||||
int i; | int i; | ||||
CTX_LOCK(ctx); | CTX_LOCK(ctx); | ||||
ctx->ifc_flags |= IFC_QFLUSH; | ctx->ifc_flags |= IFC_QFLUSH; | ||||
CTX_UNLOCK(ctx); | CTX_UNLOCK(ctx); | ||||
for (i = 0; i < NTXQSETS(ctx); i++, txq++) | for (i = 0; i < NTXQSETS(ctx); i++, txq++) | ||||
while (!(ifmp_ring_is_idle(txq->ift_br[0]) || ifmp_ring_is_stalled(txq->ift_br[0]))) | while (!(ifmp_ring_is_idle(txq->ift_br[0]) || ifmp_ring_is_stalled(txq->ift_br[0]))) | ||||
iflib_txq_check_drain(txq, 0); | iflib_txq_check_drain(txq, 0); | ||||
CTX_LOCK(ctx); | CTX_LOCK(ctx); | ||||
ctx->ifc_flags &= ~IFC_QFLUSH; | ctx->ifc_flags &= ~IFC_QFLUSH; | ||||
CTX_UNLOCK(ctx); | CTX_UNLOCK(ctx); | ||||
if_qflush(ifp); | if_qflush(ifp); | ||||
} | } | ||||
#define IFCAP_REINIT (IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_TSO6|IFCAP_VLAN_HWTAGGING|IFCAP_VLAN_MTU | \ | |||||
IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) | |||||
#define IFCAP_FLAGS (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ | #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ | ||||
IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | \ | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | \ | ||||
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) | IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) | ||||
#define IFCAP_REINIT IFCAP_FLAGS | |||||
static int | static int | ||||
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) | iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) | ||||
{ | { | ||||
if_ctx_t ctx = if_getsoftc(ifp); | if_ctx_t ctx = if_getsoftc(ifp); | ||||
struct ifreq *ifr = (struct ifreq *)data; | struct ifreq *ifr = (struct ifreq *)data; | ||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
struct ifaddr *ifa = (struct ifaddr *)data; | struct ifaddr *ifa = (struct ifaddr *)data; | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 267 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) | iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) | ||||
{ | { | ||||
int err, rid, msix, msix_bar; | int err, rid, msix, msix_bar; | ||||
if_ctx_t ctx; | if_ctx_t ctx; | ||||
if_t ifp; | if_t ifp; | ||||
if_softc_ctx_t scctx; | if_softc_ctx_t scctx; | ||||
int i; | |||||
uint16_t main_txq; | |||||
uint16_t main_rxq; | |||||
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); | ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); | ||||
if (sc == NULL) { | if (sc == NULL) { | ||||
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); | sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); | ||||
device_set_softc(dev, ctx); | device_set_softc(dev, ctx); | ||||
ctx->ifc_flags |= IFC_SC_ALLOCATED; | |||||
} | } | ||||
ctx->ifc_sctx = sctx; | ctx->ifc_sctx = sctx; | ||||
ctx->ifc_dev = dev; | ctx->ifc_dev = dev; | ||||
ctx->ifc_txrx = *sctx->isc_txrx; | ctx->ifc_txrx = *sctx->isc_txrx; | ||||
ctx->ifc_softc = sc; | ctx->ifc_softc = sc; | ||||
if ((err = iflib_register(ctx)) != 0) { | if ((err = iflib_register(ctx)) != 0) { | ||||
device_printf(dev, "iflib_register failed %d\n", err); | device_printf(dev, "iflib_register failed %d\n", err); | ||||
return (err); | return (err); | ||||
} | } | ||||
iflib_add_device_sysctl_pre(ctx); | iflib_add_device_sysctl_pre(ctx); | ||||
scctx = &ctx->ifc_softc_ctx; | |||||
/* | |||||
* XXX sanity check that ntxd & nrxd are a power of 2 | |||||
*/ | |||||
if (ctx->ifc_sysctl_ntxqs != 0) | |||||
scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; | |||||
if (ctx->ifc_sysctl_nrxqs != 0) | |||||
scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; | |||||
for (i = 0; i < sctx->isc_ntxqs; i++) { | |||||
if (ctx->ifc_sysctl_ntxds[i] != 0) | |||||
scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; | |||||
else | |||||
scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; | |||||
} | |||||
for (i = 0; i < sctx->isc_nrxqs; i++) { | |||||
if (ctx->ifc_sysctl_nrxds[i] != 0) | |||||
scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; | |||||
else | |||||
scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; | |||||
} | |||||
for (i = 0; i < sctx->isc_nrxqs; i++) { | |||||
if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { | |||||
device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", | |||||
i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); | |||||
scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; | |||||
} | |||||
if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { | |||||
device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", | |||||
i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); | |||||
scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; | |||||
} | |||||
} | |||||
for (i = 0; i < sctx->isc_ntxqs; i++) { | |||||
if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { | |||||
device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", | |||||
i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); | |||||
scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; | |||||
} | |||||
if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { | |||||
device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", | |||||
i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); | |||||
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; | |||||
} | |||||
} | |||||
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { | if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { | ||||
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); | device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); | ||||
return (err); | return (err); | ||||
} | } | ||||
if (scctx->isc_ntxqsets_max) | |||||
scctx->isc_ntxqsets = min(scctx->isc_ntxqsets, scctx->isc_ntxqsets_max); | |||||
if (scctx->isc_nrxqsets_max) | |||||
scctx->isc_nrxqsets = min(scctx->isc_nrxqsets, scctx->isc_nrxqsets_max); | |||||
#ifdef ACPI_DMAR | #ifdef ACPI_DMAR | ||||
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) | if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) | ||||
ctx->ifc_flags |= IFC_DMAR; | ctx->ifc_flags |= IFC_DMAR; | ||||
#endif | #endif | ||||
scctx = &ctx->ifc_softc_ctx; | |||||
msix_bar = scctx->isc_msix_bar; | msix_bar = scctx->isc_msix_bar; | ||||
if (scctx->isc_tx_nsegments > sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION) | |||||
scctx->isc_tx_nsegments = max(1, sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION); | |||||
if (scctx->isc_tx_tso_segments_max > sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION) | |||||
scctx->isc_tx_tso_segments_max = max(1, sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION); | |||||
ifp = ctx->ifc_ifp; | ifp = ctx->ifc_ifp; | ||||
/* | if(sctx->isc_flags & IFLIB_HAS_TXCQ) | ||||
* XXX sanity check that ntxd & nrxd are a power of 2 | main_txq = 1; | ||||
*/ | else | ||||
main_txq = 0; | |||||
if(sctx->isc_flags & IFLIB_HAS_RXCQ) | |||||
main_rxq = 1; | |||||
else | |||||
main_rxq = 0; | |||||
/* XXX change for per-queue sizes */ | |||||
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", | |||||
scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); | |||||
for (i = 0; i < sctx->isc_nrxqs; i++) { | |||||
if (!powerof2(scctx->isc_nrxd[i])) { | |||||
/* round down instead? */ | |||||
device_printf(dev, "# rx descriptors must be a power of 2\n"); | |||||
err = EINVAL; | |||||
goto fail; | |||||
} | |||||
} | |||||
for (i = 0; i < sctx->isc_ntxqs; i++) { | |||||
if (!powerof2(scctx->isc_ntxd[i])) { | |||||
device_printf(dev, | |||||
"# tx descriptors must be a power of 2"); | |||||
err = EINVAL; | |||||
goto fail; | |||||
} | |||||
} | |||||
if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / | |||||
MAX_SINGLE_PACKET_FRACTION) | |||||
scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / | |||||
MAX_SINGLE_PACKET_FRACTION); | |||||
if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / | |||||
MAX_SINGLE_PACKET_FRACTION) | |||||
scctx->isc_tx_tso_segments_max = max(1, | |||||
scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); | |||||
/* | /* | ||||
* Protect the stack against modern hardware | * Protect the stack against modern hardware | ||||
*/ | */ | ||||
if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) | if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) | ||||
scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; | scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; | ||||
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ | /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ | ||||
ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; | ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; | ||||
ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; | ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; | ||||
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; | ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; | ||||
if (scctx->isc_rss_table_size == 0) | if (scctx->isc_rss_table_size == 0) | ||||
scctx->isc_rss_table_size = 64; | scctx->isc_rss_table_size = 64; | ||||
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;; | scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; | ||||
/* | /* | ||||
** Now setup MSI or MSI/X, should | ** Now setup MSI or MSI/X, should | ||||
** return us the number of supported | ** return us the number of supported | ||||
** vectors. (Will be 1 for MSI) | ** vectors. (Will be 1 for MSI) | ||||
*/ | */ | ||||
if (sctx->isc_flags & IFLIB_SKIP_MSIX) { | if (sctx->isc_flags & IFLIB_SKIP_MSIX) { | ||||
msix = scctx->isc_vectors; | msix = scctx->isc_vectors; | ||||
} else if (scctx->isc_msix_bar != 0) | } else if (scctx->isc_msix_bar != 0) | ||||
Show All 21 Lines | if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { | ||||
goto fail_intr_free; | goto fail_intr_free; | ||||
} | } | ||||
if (msix <= 1) { | if (msix <= 1) { | ||||
rid = 0; | rid = 0; | ||||
if (scctx->isc_intr == IFLIB_INTR_MSI) { | if (scctx->isc_intr == IFLIB_INTR_MSI) { | ||||
MPASS(msix == 1); | MPASS(msix == 1); | ||||
rid = 1; | rid = 1; | ||||
} | } | ||||
if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx, &rid, "irq0")) != 0) { | if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { | ||||
device_printf(dev, "iflib_legacy_setup failed %d\n", err); | device_printf(dev, "iflib_legacy_setup failed %d\n", err); | ||||
goto fail_intr_free; | goto fail_intr_free; | ||||
} | } | ||||
} | } | ||||
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); | ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); | ||||
if ((err = IFDI_ATTACH_POST(ctx)) != 0) { | if ((err = IFDI_ATTACH_POST(ctx)) != 0) { | ||||
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); | device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); | ||||
goto fail_detach; | goto fail_detach; | ||||
} | } | ||||
if ((err = iflib_netmap_attach(ctx))) { | if ((err = iflib_netmap_attach(ctx))) { | ||||
device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); | device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); | ||||
goto fail_detach; | goto fail_detach; | ||||
} | } | ||||
*ctxp = ctx; | *ctxp = ctx; | ||||
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); | |||||
iflib_add_device_sysctl_post(ctx); | iflib_add_device_sysctl_post(ctx); | ||||
return (0); | return (0); | ||||
fail_detach: | fail_detach: | ||||
ether_ifdetach(ctx->ifc_ifp); | ether_ifdetach(ctx->ifc_ifp); | ||||
fail_intr_free: | fail_intr_free: | ||||
if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) | if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) | ||||
pci_release_msi(ctx->ifc_dev); | pci_release_msi(ctx->ifc_dev); | ||||
fail_queues: | fail_queues: | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | iflib_device_deregister(if_ctx_t ctx) | ||||
iflib_netmap_detach(ifp); | iflib_netmap_detach(ifp); | ||||
ether_ifdetach(ifp); | ether_ifdetach(ifp); | ||||
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ | /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ | ||||
CTX_LOCK_DESTROY(ctx); | CTX_LOCK_DESTROY(ctx); | ||||
if (ctx->ifc_led_dev != NULL) | if (ctx->ifc_led_dev != NULL) | ||||
led_destroy(ctx->ifc_led_dev); | led_destroy(ctx->ifc_led_dev); | ||||
/* XXX drain any dependent tasks */ | /* XXX drain any dependent tasks */ | ||||
tqg = qgroup_if_io_tqg; | tqg = qgroup_if_io_tqg; | ||||
for (txq = ctx->ifc_txqs, i = 0, rxq = ctx->ifc_rxqs; i < NTXQSETS(ctx); i++, txq++) { | for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { | ||||
callout_drain(&txq->ift_timer); | callout_drain(&txq->ift_timer); | ||||
callout_drain(&txq->ift_db_check); | callout_drain(&txq->ift_db_check); | ||||
if (txq->ift_task.gt_uniq != NULL) | if (txq->ift_task.gt_uniq != NULL) | ||||
taskqgroup_detach(tqg, &txq->ift_task); | taskqgroup_detach(tqg, &txq->ift_task); | ||||
} | } | ||||
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { | for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { | ||||
if (rxq->ifr_task.gt_uniq != NULL) | if (rxq->ifr_task.gt_uniq != NULL) | ||||
taskqgroup_detach(tqg, &rxq->ifr_task); | taskqgroup_detach(tqg, &rxq->ifr_task); | ||||
} | } | ||||
tqg = qgroup_if_config_tqg; | tqg = qgroup_if_config_tqg; | ||||
if (ctx->ifc_admin_task.gt_uniq != NULL) | if (ctx->ifc_admin_task.gt_uniq != NULL) | ||||
taskqgroup_detach(tqg, &ctx->ifc_admin_task); | taskqgroup_detach(tqg, &ctx->ifc_admin_task); | ||||
if (ctx->ifc_vflr_task.gt_uniq != NULL) | if (ctx->ifc_vflr_task.gt_uniq != NULL) | ||||
taskqgroup_detach(tqg, &ctx->ifc_vflr_task); | taskqgroup_detach(tqg, &ctx->ifc_vflr_task); | ||||
IFDI_DETACH(ctx); | IFDI_DETACH(ctx); | ||||
device_set_softc(ctx->ifc_dev, NULL); | |||||
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { | if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { | ||||
pci_release_msi(dev); | pci_release_msi(dev); | ||||
} | } | ||||
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { | if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { | ||||
iflib_irq_free(ctx, &ctx->ifc_legacy_irq); | iflib_irq_free(ctx, &ctx->ifc_legacy_irq); | ||||
} | } | ||||
if (ctx->ifc_msix_mem != NULL) { | if (ctx->ifc_msix_mem != NULL) { | ||||
bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, | bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, | ||||
ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); | ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); | ||||
ctx->ifc_msix_mem = NULL; | ctx->ifc_msix_mem = NULL; | ||||
} | } | ||||
bus_generic_detach(dev); | bus_generic_detach(dev); | ||||
if_free(ifp); | if_free(ifp); | ||||
iflib_tx_structures_free(ctx); | iflib_tx_structures_free(ctx); | ||||
iflib_rx_structures_free(ctx); | iflib_rx_structures_free(ctx); | ||||
if (ctx->ifc_flags & IFC_SC_ALLOCATED) | |||||
free(ctx->ifc_softc, M_IFLIB); | |||||
free(ctx, M_IFLIB); | |||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
iflib_device_detach(device_t dev) | iflib_device_detach(device_t dev) | ||||
{ | { | ||||
if_ctx_t ctx = device_get_softc(dev); | if_ctx_t ctx = device_get_softc(dev); | ||||
▲ Show 20 Lines • Show All 133 Lines • ▼ Show 20 Lines | _iflib_assert(if_shared_ctx_t sctx) | ||||
MPASS(sctx->isc_txrx->ift_txd_encap); | MPASS(sctx->isc_txrx->ift_txd_encap); | ||||
MPASS(sctx->isc_txrx->ift_txd_flush); | MPASS(sctx->isc_txrx->ift_txd_flush); | ||||
MPASS(sctx->isc_txrx->ift_txd_credits_update); | MPASS(sctx->isc_txrx->ift_txd_credits_update); | ||||
MPASS(sctx->isc_txrx->ift_rxd_available); | MPASS(sctx->isc_txrx->ift_rxd_available); | ||||
MPASS(sctx->isc_txrx->ift_rxd_pkt_get); | MPASS(sctx->isc_txrx->ift_rxd_pkt_get); | ||||
MPASS(sctx->isc_txrx->ift_rxd_refill); | MPASS(sctx->isc_txrx->ift_rxd_refill); | ||||
MPASS(sctx->isc_txrx->ift_rxd_flush); | MPASS(sctx->isc_txrx->ift_rxd_flush); | ||||
MPASS(sctx->isc_nrxd); | |||||
MPASS(sctx->isc_nrxd_min[0]); | |||||
MPASS(sctx->isc_nrxd_max[0]); | |||||
MPASS(sctx->isc_nrxd_default[0]); | |||||
MPASS(sctx->isc_ntxd_min[0]); | |||||
MPASS(sctx->isc_ntxd_max[0]); | |||||
MPASS(sctx->isc_ntxd_default[0]); | |||||
} | } | ||||
static int | static int | ||||
iflib_register(if_ctx_t ctx) | iflib_register(if_ctx_t ctx) | ||||
{ | { | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
driver_t *driver = sctx->isc_driver; | driver_t *driver = sctx->isc_driver; | ||||
device_t dev = ctx->ifc_dev; | device_t dev = ctx->ifc_dev; | ||||
if_t ifp; | if_t ifp; | ||||
_iflib_assert(sctx); | _iflib_assert(sctx); | ||||
CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); | CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); | ||||
MPASS(ctx->ifc_flags == 0); | |||||
ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); | ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); | ||||
if (ifp == NULL) { | if (ifp == NULL) { | ||||
device_printf(dev, "can not allocate ifnet structure\n"); | device_printf(dev, "can not allocate ifnet structure\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* | /* | ||||
* Initialize our context's device specific methods | * Initialize our context's device specific methods | ||||
*/ | */ | ||||
kobj_init((kobj_t) ctx, (kobj_class_t) driver); | kobj_init((kobj_t) ctx, (kobj_class_t) driver); | ||||
kobj_class_compile((kobj_class_t) driver); | kobj_class_compile((kobj_class_t) driver); | ||||
driver->refs++; | driver->refs++; | ||||
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | ||||
if_setsoftc(ifp, ctx); | if_setsoftc(ifp, ctx); | ||||
if_setdev(ifp, dev); | if_setdev(ifp, dev); | ||||
if_setinitfn(ifp, iflib_if_init); | if_setinitfn(ifp, iflib_if_init); | ||||
if_setioctlfn(ifp, iflib_if_ioctl); | if_setioctlfn(ifp, iflib_if_ioctl); | ||||
if_settransmitfn(ifp, iflib_if_transmit); | if_settransmitfn(ifp, iflib_if_transmit); | ||||
if_setqflushfn(ifp, iflib_if_qflush); | if_setqflushfn(ifp, iflib_if_qflush); | ||||
if_setgetcounterfn(ifp, iflib_if_get_counter); | |||||
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); | if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); | ||||
if_setcapabilities(ifp, 0); | if_setcapabilities(ifp, 0); | ||||
if_setcapenable(ifp, 0); | if_setcapenable(ifp, 0); | ||||
ctx->ifc_vlan_attach_event = | ctx->ifc_vlan_attach_event = | ||||
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, | EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, | ||||
EVENTHANDLER_PRI_FIRST); | EVENTHANDLER_PRI_FIRST); | ||||
ctx->ifc_vlan_detach_event = | ctx->ifc_vlan_detach_event = | ||||
EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, | EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, | ||||
EVENTHANDLER_PRI_FIRST); | EVENTHANDLER_PRI_FIRST); | ||||
ifmedia_init(&ctx->ifc_media, IFM_IMASK, | ifmedia_init(&ctx->ifc_media, IFM_IMASK, | ||||
iflib_media_change, iflib_media_status); | iflib_media_change, iflib_media_status); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
iflib_queues_alloc(if_ctx_t ctx) | iflib_queues_alloc(if_ctx_t ctx) | ||||
{ | { | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | |||||
device_t dev = ctx->ifc_dev; | device_t dev = ctx->ifc_dev; | ||||
int nrxqsets = ctx->ifc_softc_ctx.isc_nrxqsets; | int nrxqsets = scctx->isc_nrxqsets; | ||||
int ntxqsets = ctx->ifc_softc_ctx.isc_ntxqsets; | int ntxqsets = scctx->isc_ntxqsets; | ||||
iflib_txq_t txq; | iflib_txq_t txq; | ||||
iflib_rxq_t rxq; | iflib_rxq_t rxq; | ||||
iflib_fl_t fl = NULL; | iflib_fl_t fl = NULL; | ||||
int i, j, cpu, err, txconf, rxconf, fl_ifdi_offset; | int i, j, cpu, err, txconf, rxconf; | ||||
iflib_dma_info_t ifdip; | iflib_dma_info_t ifdip; | ||||
uint32_t *rxqsizes = sctx->isc_rxqsizes; | uint32_t *rxqsizes = scctx->isc_rxqsizes; | ||||
uint32_t *txqsizes = sctx->isc_txqsizes; | uint32_t *txqsizes = scctx->isc_txqsizes; | ||||
uint8_t nrxqs = sctx->isc_nrxqs; | uint8_t nrxqs = sctx->isc_nrxqs; | ||||
uint8_t ntxqs = sctx->isc_ntxqs; | uint8_t ntxqs = sctx->isc_ntxqs; | ||||
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; | int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; | ||||
caddr_t *vaddrs; | caddr_t *vaddrs; | ||||
uint64_t *paddrs; | uint64_t *paddrs; | ||||
struct ifmp_ring **brscp; | struct ifmp_ring **brscp; | ||||
int nbuf_rings = 1; /* XXX determine dynamically */ | int nbuf_rings = 1; /* XXX determine dynamically */ | ||||
KASSERT(ntxqs > 0, ("number of queues must be at least 1")); | KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); | ||||
KASSERT(nrxqs > 0, ("number of queues must be at least 1")); | KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); | ||||
brscp = NULL; | brscp = NULL; | ||||
txq = NULL; | |||||
rxq = NULL; | rxq = NULL; | ||||
/* Allocate the TX ring struct memory */ | /* Allocate the TX ring struct memory */ | ||||
if (!(txq = | if (!(txq = | ||||
(iflib_txq_t) malloc(sizeof(struct iflib_txq) * | (iflib_txq_t) malloc(sizeof(struct iflib_txq) * | ||||
ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { | ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate TX ring memory\n"); | device_printf(dev, "Unable to allocate TX ring memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
Show All 11 Lines | /* Allocate the TX ring struct memory */ | ||||
if (!(brscp = malloc(sizeof(void *) * nbuf_rings * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { | if (!(brscp = malloc(sizeof(void *) * nbuf_rings * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to buf_ring_sc * memory\n"); | device_printf(dev, "Unable to buf_ring_sc * memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto rx_fail; | goto rx_fail; | ||||
} | } | ||||
ctx->ifc_txqs = txq; | ctx->ifc_txqs = txq; | ||||
ctx->ifc_rxqs = rxq; | ctx->ifc_rxqs = rxq; | ||||
txq = NULL; | |||||
rxq = NULL; | |||||
/* | /* | ||||
* XXX handle allocation failure | * XXX handle allocation failure | ||||
*/ | */ | ||||
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { | for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { | ||||
/* Set up some basics */ | /* Set up some basics */ | ||||
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { | if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { | ||||
device_printf(dev, "failed to allocate iflib_dma_info\n"); | device_printf(dev, "failed to allocate iflib_dma_info\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto err_tx_desc; | goto err_tx_desc; | ||||
} | } | ||||
txq->ift_ifdi = ifdip; | txq->ift_ifdi = ifdip; | ||||
for (j = 0; j < ntxqs; j++, ifdip++) { | for (j = 0; j < ntxqs; j++, ifdip++) { | ||||
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { | if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { | ||||
device_printf(dev, "Unable to allocate Descriptor memory\n"); | device_printf(dev, "Unable to allocate Descriptor memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto err_tx_desc; | goto err_tx_desc; | ||||
} | } | ||||
bzero((void *)ifdip->idi_vaddr, txqsizes[j]); | bzero((void *)ifdip->idi_vaddr, txqsizes[j]); | ||||
} | } | ||||
txq->ift_ctx = ctx; | txq->ift_ctx = ctx; | ||||
txq->ift_id = i; | txq->ift_id = i; | ||||
if (sctx->isc_flags & IFLIB_HAS_TXCQ) { | |||||
txq->ift_br_offset = 1; | |||||
} else { | |||||
txq->ift_br_offset = 0; | |||||
} | |||||
/* XXX fix this */ | /* XXX fix this */ | ||||
txq->ift_timer.c_cpu = cpu; | txq->ift_timer.c_cpu = cpu; | ||||
txq->ift_db_check.c_cpu = cpu; | txq->ift_db_check.c_cpu = cpu; | ||||
txq->ift_nbr = nbuf_rings; | txq->ift_nbr = nbuf_rings; | ||||
if (iflib_txsd_alloc(txq)) { | if (iflib_txsd_alloc(txq)) { | ||||
device_printf(dev, "Critical Failure setting up TX buffers\n"); | device_printf(dev, "Critical Failure setting up TX buffers\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
Show All 38 Lines | for (j = 0; j < nrxqs; j++, ifdip++) { | ||||
device_printf(dev, "Unable to allocate Descriptor memory\n"); | device_printf(dev, "Unable to allocate Descriptor memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto err_tx_desc; | goto err_tx_desc; | ||||
} | } | ||||
bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); | bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); | ||||
} | } | ||||
rxq->ifr_ctx = ctx; | rxq->ifr_ctx = ctx; | ||||
rxq->ifr_id = i; | rxq->ifr_id = i; | ||||
if (sctx->isc_flags & IFLIB_HAS_CQ) { | if (sctx->isc_flags & IFLIB_HAS_RXCQ) { | ||||
fl_ifdi_offset = 1; | rxq->ifr_fl_offset = 1; | ||||
} else { | } else { | ||||
fl_ifdi_offset = 0; | rxq->ifr_fl_offset = 0; | ||||
} | } | ||||
rxq->ifr_nfl = nfree_lists; | rxq->ifr_nfl = nfree_lists; | ||||
if (!(fl = | if (!(fl = | ||||
(iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { | (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate free list memory\n"); | device_printf(dev, "Unable to allocate free list memory\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto err_tx_desc; | goto err_tx_desc; | ||||
} | } | ||||
rxq->ifr_fl = fl; | rxq->ifr_fl = fl; | ||||
for (j = 0; j < nfree_lists; j++) { | for (j = 0; j < nfree_lists; j++) { | ||||
rxq->ifr_fl[j].ifl_rxq = rxq; | rxq->ifr_fl[j].ifl_rxq = rxq; | ||||
rxq->ifr_fl[j].ifl_id = j; | rxq->ifr_fl[j].ifl_id = j; | ||||
rxq->ifr_fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + fl_ifdi_offset]; | rxq->ifr_fl[j].ifl_ifdi = | ||||
&rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; | |||||
} | } | ||||
/* Allocate receive buffers for the ring*/ | /* Allocate receive buffers for the ring*/ | ||||
if (iflib_rxsd_alloc(rxq)) { | if (iflib_rxsd_alloc(rxq)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up receive buffers\n"); | "Critical Failure setting up receive buffers\n"); | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto err_rx_desc; | goto err_rx_desc; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | iflib_rx_structures_setup(if_ctx_t ctx) | ||||
int q; | int q; | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
int i, err; | int i, err; | ||||
#endif | #endif | ||||
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { | for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
tcp_lro_free(&rxq->ifr_lc); | tcp_lro_free(&rxq->ifr_lc); | ||||
if ((err = tcp_lro_init(&rxq->ifr_lc)) != 0) { | if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, | ||||
TCP_LRO_ENTRIES, min(1024, | |||||
ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { | |||||
device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); | device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
rxq->ifr_lro_enabled = TRUE; | rxq->ifr_lro_enabled = TRUE; | ||||
rxq->ifr_lc.ifp = ctx->ifc_ifp; | |||||
#endif | #endif | ||||
IFDI_RXQ_SETUP(ctx, rxq->ifr_id); | IFDI_RXQ_SETUP(ctx, rxq->ifr_id); | ||||
} | } | ||||
return (0); | return (0); | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
fail: | fail: | ||||
/* | /* | ||||
* Free RX software descriptors allocated so far, we will only handle | * Free RX software descriptors allocated so far, we will only handle | ||||
Show All 14 Lines | |||||
* Free all receive rings. | * Free all receive rings. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
iflib_rx_structures_free(if_ctx_t ctx) | iflib_rx_structures_free(if_ctx_t ctx) | ||||
{ | { | ||||
iflib_rxq_t rxq = ctx->ifc_rxqs; | iflib_rxq_t rxq = ctx->ifc_rxqs; | ||||
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, rxq++) { | for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { | ||||
iflib_rx_sds_free(rxq); | iflib_rx_sds_free(rxq); | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
iflib_qset_structures_setup(if_ctx_t ctx) | iflib_qset_structures_setup(if_ctx_t ctx) | ||||
{ | { | ||||
int err; | int err; | ||||
Show All 34 Lines | |||||
iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, | iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, | ||||
iflib_intr_type_t type, driver_filter_t *filter, | iflib_intr_type_t type, driver_filter_t *filter, | ||||
void *filter_arg, int qid, char *name) | void *filter_arg, int qid, char *name) | ||||
{ | { | ||||
struct grouptask *gtask; | struct grouptask *gtask; | ||||
struct taskqgroup *tqg; | struct taskqgroup *tqg; | ||||
iflib_filter_info_t info; | iflib_filter_info_t info; | ||||
cpuset_t cpus; | cpuset_t cpus; | ||||
task_fn_t *fn; | gtask_fn_t *fn; | ||||
int tqrid, err; | int tqrid, err; | ||||
void *q; | void *q; | ||||
info = &ctx->ifc_filter_info; | info = &ctx->ifc_filter_info; | ||||
switch (type) { | switch (type) { | ||||
/* XXX merge tx/rx for netmap? */ | /* XXX merge tx/rx for netmap? */ | ||||
case IFLIB_INTR_TX: | case IFLIB_INTR_TX: | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
iflib_softirq_alloc_generic(if_ctx_t ctx, int rid, iflib_intr_type_t type, void *arg, int qid, char *name) | iflib_softirq_alloc_generic(if_ctx_t ctx, int rid, iflib_intr_type_t type, void *arg, int qid, char *name) | ||||
{ | { | ||||
struct grouptask *gtask; | struct grouptask *gtask; | ||||
struct taskqgroup *tqg; | struct taskqgroup *tqg; | ||||
task_fn_t *fn; | gtask_fn_t *fn; | ||||
void *q; | void *q; | ||||
switch (type) { | switch (type) { | ||||
case IFLIB_INTR_TX: | case IFLIB_INTR_TX: | ||||
q = &ctx->ifc_txqs[qid]; | q = &ctx->ifc_txqs[qid]; | ||||
gtask = &ctx->ifc_txqs[qid].ift_task; | gtask = &ctx->ifc_txqs[qid].ift_task; | ||||
tqg = qgroup_if_io_tqg; | tqg = qgroup_if_io_tqg; | ||||
fn = _task_fn_tx; | fn = _task_fn_tx; | ||||
Show All 39 Lines | |||||
iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) | iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) | ||||
{ | { | ||||
iflib_txq_t txq = ctx->ifc_txqs; | iflib_txq_t txq = ctx->ifc_txqs; | ||||
iflib_rxq_t rxq = ctx->ifc_rxqs; | iflib_rxq_t rxq = ctx->ifc_rxqs; | ||||
if_irq_t irq = &ctx->ifc_legacy_irq; | if_irq_t irq = &ctx->ifc_legacy_irq; | ||||
iflib_filter_info_t info; | iflib_filter_info_t info; | ||||
struct grouptask *gtask; | struct grouptask *gtask; | ||||
struct taskqgroup *tqg; | struct taskqgroup *tqg; | ||||
task_fn_t *fn; | gtask_fn_t *fn; | ||||
int tqrid; | int tqrid; | ||||
void *q; | void *q; | ||||
int err; | int err; | ||||
q = &ctx->ifc_rxqs[0]; | q = &ctx->ifc_rxqs[0]; | ||||
info = &rxq[0].ifr_filter_info; | info = &rxq[0].ifr_filter_info; | ||||
gtask = &rxq[0].ifr_task; | gtask = &rxq[0].ifr_task; | ||||
tqg = qgroup_if_io_tqg; | tqg = qgroup_if_io_tqg; | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) | iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) | ||||
{ | { | ||||
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); | taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); | ||||
} | } | ||||
void | void | ||||
iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, task_fn_t *fn, | iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn, | ||||
char *name) | char *name) | ||||
{ | { | ||||
GROUPTASK_INIT(gtask, 0, fn, ctx); | GROUPTASK_INIT(gtask, 0, fn, ctx); | ||||
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); | taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); | ||||
} | } | ||||
void | void | ||||
iflib_link_state_change(if_ctx_t ctx, int link_state) | iflib_config_gtask_deinit(struct grouptask *gtask) | ||||
{ | { | ||||
taskqgroup_detach(qgroup_if_config_tqg, gtask); | |||||
} | |||||
void | |||||
iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) | |||||
{ | |||||
if_t ifp = ctx->ifc_ifp; | if_t ifp = ctx->ifc_ifp; | ||||
iflib_txq_t txq = ctx->ifc_txqs; | iflib_txq_t txq = ctx->ifc_txqs; | ||||
#if 0 | |||||
if_setbaudrate(ifp, baudrate); | if_setbaudrate(ifp, baudrate); | ||||
#endif | |||||
/* If link down, disable watchdog */ | /* If link down, disable watchdog */ | ||||
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { | if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { | ||||
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) | for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) | ||||
txq->ift_qstatus = IFLIB_QUEUE_IDLE; | txq->ift_qstatus = IFLIB_QUEUE_IDLE; | ||||
} | } | ||||
ctx->ifc_link_state = link_state; | ctx->ifc_link_state = link_state; | ||||
if_link_state_change(ifp, link_state); | if_link_state_change(ifp, link_state); | ||||
} | } | ||||
Show All 13 Lines | iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) | ||||
txq->ift_cidx_processed += credits; | txq->ift_cidx_processed += credits; | ||||
if (txq->ift_cidx_processed >= txq->ift_size) | if (txq->ift_cidx_processed >= txq->ift_size) | ||||
txq->ift_cidx_processed -= txq->ift_size; | txq->ift_cidx_processed -= txq->ift_size; | ||||
return (credits); | return (credits); | ||||
} | } | ||||
static int | static int | ||||
iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx) | iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget) | ||||
{ | { | ||||
return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx)); | return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, | ||||
budget)); | |||||
} | } | ||||
void | void | ||||
iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, | iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, | ||||
const char *description, if_int_delay_info_t info, | const char *description, if_int_delay_info_t info, | ||||
int offset, int value) | int offset, int value) | ||||
{ | { | ||||
info->iidi_ctx = ctx; | info->iidi_ctx = ctx; | ||||
Show All 17 Lines | |||||
{ | { | ||||
device_t dev = ctx->ifc_dev; | device_t dev = ctx->ifc_dev; | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | ||||
int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; | int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; | ||||
int iflib_num_tx_queues, iflib_num_rx_queues; | int iflib_num_tx_queues, iflib_num_rx_queues; | ||||
int err, admincnt, bar; | int err, admincnt, bar; | ||||
iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; | iflib_num_tx_queues = scctx->isc_ntxqsets; | ||||
iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; | iflib_num_rx_queues = scctx->isc_nrxqsets; | ||||
bar = ctx->ifc_softc_ctx.isc_msix_bar; | bar = ctx->ifc_softc_ctx.isc_msix_bar; | ||||
admincnt = sctx->isc_admin_intrcnt; | admincnt = sctx->isc_admin_intrcnt; | ||||
/* Override by tuneable */ | /* Override by tuneable */ | ||||
if (enable_msix == 0) | if (enable_msix == 0) | ||||
goto msi; | goto msi; | ||||
/* | /* | ||||
** When used in a virtualized environment | ** When used in a virtualized environment | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | #endif | ||||
/* Figure out a reasonable auto config value */ | /* Figure out a reasonable auto config value */ | ||||
queues = min(queuemsgs, mp_ncpus); | queues = min(queuemsgs, mp_ncpus); | ||||
} | } | ||||
#ifdef RSS | #ifdef RSS | ||||
/* If we're doing RSS, clamp at the number of RSS buckets */ | /* If we're doing RSS, clamp at the number of RSS buckets */ | ||||
if (queues > rss_getnumbuckets()) | if (queues > rss_getnumbuckets()) | ||||
queues = rss_getnumbuckets(); | queues = rss_getnumbuckets(); | ||||
#endif | #endif | ||||
if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queues) | if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) | ||||
queues = rx_queues = iflib_num_rx_queues; | rx_queues = iflib_num_rx_queues; | ||||
else | else | ||||
rx_queues = queues; | rx_queues = queues; | ||||
/* | |||||
* We want this to be all logical CPUs by default | |||||
*/ | |||||
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) | if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) | ||||
tx_queues = iflib_num_tx_queues; | tx_queues = iflib_num_tx_queues; | ||||
else | else | ||||
tx_queues = queues; | tx_queues = mp_ncpus; | ||||
if (ctx->ifc_sysctl_qs_eq_override == 0) { | |||||
#ifdef INVARIANTS | |||||
if (tx_queues != rx_queues) | |||||
device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", | |||||
min(rx_queues, tx_queues), min(rx_queues, tx_queues)); | |||||
#endif | |||||
tx_queues = min(rx_queues, tx_queues); | |||||
rx_queues = min(rx_queues, tx_queues); | |||||
} | |||||
device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); | device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); | ||||
vectors = queues + admincnt; | vectors = rx_queues + admincnt; | ||||
if ((err = pci_alloc_msix(dev, &vectors)) == 0) { | if ((err = pci_alloc_msix(dev, &vectors)) == 0) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Using MSIX interrupts with %d vectors\n", vectors); | "Using MSIX interrupts with %d vectors\n", vectors); | ||||
scctx->isc_vectors = vectors; | scctx->isc_vectors = vectors; | ||||
scctx->isc_nrxqsets = rx_queues; | scctx->isc_nrxqsets = rx_queues; | ||||
scctx->isc_ntxqsets = tx_queues; | scctx->isc_ntxqsets = tx_queues; | ||||
scctx->isc_intr = IFLIB_INTR_MSIX; | scctx->isc_intr = IFLIB_INTR_MSIX; | ||||
return (vectors); | return (vectors); | ||||
} else { | } else { | ||||
device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); | device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); | ||||
} | } | ||||
msi: | msi: | ||||
vectors = pci_msi_count(dev); | vectors = pci_msi_count(dev); | ||||
scctx->isc_nrxqsets = 1; | scctx->isc_nrxqsets = 1; | ||||
scctx->isc_ntxqsets = 1; | scctx->isc_ntxqsets = 1; | ||||
Show All 33 Lines | mp_ring_state_handler(SYSCTL_HANDLER_ARGS) | ||||
sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", | sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", | ||||
state[0], state[1], state[2], ring_state); | state[0], state[1], state[2], ring_state); | ||||
rc = sbuf_finish(sb); | rc = sbuf_finish(sb); | ||||
sbuf_delete(sb); | sbuf_delete(sb); | ||||
return(rc); | return(rc); | ||||
} | } | ||||
enum iflib_ndesc_handler { | |||||
IFLIB_NTXD_HANDLER, | |||||
IFLIB_NRXD_HANDLER, | |||||
}; | |||||
static int | |||||
mp_ndesc_handler(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
if_ctx_t ctx = (void *)arg1; | |||||
enum iflib_ndesc_handler type = arg2; | |||||
char buf[256] = {0}; | |||||
uint16_t *ndesc; | |||||
char *p, *next; | |||||
int nqs, rc, i; | |||||
MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); | |||||
nqs = 8; | |||||
switch(type) { | |||||
case IFLIB_NTXD_HANDLER: | |||||
ndesc = ctx->ifc_sysctl_ntxds; | |||||
if (ctx->ifc_sctx) | |||||
nqs = ctx->ifc_sctx->isc_ntxqs; | |||||
break; | |||||
case IFLIB_NRXD_HANDLER: | |||||
ndesc = ctx->ifc_sysctl_nrxds; | |||||
if (ctx->ifc_sctx) | |||||
nqs = ctx->ifc_sctx->isc_nrxqs; | |||||
break; | |||||
} | |||||
if (nqs == 0) | |||||
nqs = 8; | |||||
for (i=0; i<8; i++) { | |||||
if (i >= nqs) | |||||
break; | |||||
if (i) | |||||
strcat(buf, ","); | |||||
sprintf(strchr(buf, 0), "%d", ndesc[i]); | |||||
} | |||||
rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); | |||||
if (rc || req->newptr == NULL) | |||||
return rc; | |||||
for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; | |||||
i++, p = strsep(&next, " ,")) { | |||||
ndesc[i] = strtoul(p, NULL, 10); | |||||
} | |||||
return(rc); | |||||
} | |||||
#define NAME_BUFLEN 32 | #define NAME_BUFLEN 32 | ||||
static void | static void | ||||
iflib_add_device_sysctl_pre(if_ctx_t ctx) | iflib_add_device_sysctl_pre(if_ctx_t ctx) | ||||
{ | { | ||||
device_t dev = iflib_get_dev(ctx); | device_t dev = iflib_get_dev(ctx); | ||||
struct sysctl_oid_list *child, *oid_list; | struct sysctl_oid_list *child, *oid_list; | ||||
struct sysctl_ctx_list *ctx_list; | struct sysctl_ctx_list *ctx_list; | ||||
struct sysctl_oid *node; | struct sysctl_oid *node; | ||||
ctx_list = device_get_sysctl_ctx(dev); | ctx_list = device_get_sysctl_ctx(dev); | ||||
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); | child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); | ||||
ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", | ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", | ||||
CTLFLAG_RD, NULL, "IFLIB fields"); | CTLFLAG_RD, NULL, "IFLIB fields"); | ||||
oid_list = SYSCTL_CHILDREN(node); | oid_list = SYSCTL_CHILDREN(node); | ||||
SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", | |||||
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, | |||||
"driver version"); | |||||
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", | SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", | ||||
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, | CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, | ||||
"# of txqs to use, 0 => use default #"); | "# of txqs to use, 0 => use default #"); | ||||
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", | SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", | ||||
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, | CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, | ||||
"# of txqs to use, 0 => use default #"); | "# of rxqs to use, 0 => use default #"); | ||||
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxds", | SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", | ||||
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxds, 0, | CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, | ||||
"# of tx descriptors to use, 0 => use default #"); | "permit #txq != #rxq"); | ||||
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxds", | |||||
CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxds, 0, | |||||
"# of rx descriptors to use, 0 => use default #"); | |||||
/* XXX change for per-queue sizes */ | |||||
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", | |||||
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, | |||||
mp_ndesc_handler, "A", | |||||
"list of # of tx descriptors to use, 0 = use default #"); | |||||
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", | |||||
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, | |||||
mp_ndesc_handler, "A", | |||||
"list of # of rx descriptors to use, 0 = use default #"); | |||||
} | } | ||||
static void | static void | ||||
iflib_add_device_sysctl_post(if_ctx_t ctx) | iflib_add_device_sysctl_post(if_ctx_t ctx) | ||||
{ | { | ||||
if_shared_ctx_t sctx = ctx->ifc_sctx; | if_shared_ctx_t sctx = ctx->ifc_sctx; | ||||
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; | ||||
device_t dev = iflib_get_dev(ctx); | device_t dev = iflib_get_dev(ctx); | ||||
Show All 37 Lines | #endif | ||||
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", | SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&txq->ift_pullups, "# of times m_pullup was called"); | &txq->ift_pullups, "# of times m_pullup was called"); | ||||
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", | SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); | &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); | ||||
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", | SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&txq->ift_mbuf_defrag_failed, "# of times no descriptors were available"); | &txq->ift_no_desc_avail, "# of times no descriptors were available"); | ||||
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", | SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&txq->ift_map_failed, "# of times dma map failed"); | &txq->ift_map_failed, "# of times dma map failed"); | ||||
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", | SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); | &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); | ||||
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", | SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | else if (scctx->isc_nrxqsets > 10) | ||||
qfmt = "rxq%02d"; | qfmt = "rxq%02d"; | ||||
else | else | ||||
qfmt = "rxq%d"; | qfmt = "rxq%d"; | ||||
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { | for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { | ||||
snprintf(namebuf, NAME_BUFLEN, qfmt, i); | snprintf(namebuf, NAME_BUFLEN, qfmt, i); | ||||
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, | queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, | ||||
CTLFLAG_RD, NULL, "Queue Name"); | CTLFLAG_RD, NULL, "Queue Name"); | ||||
queue_list = SYSCTL_CHILDREN(queue_node); | queue_list = SYSCTL_CHILDREN(queue_node); | ||||
if (sctx->isc_flags & IFLIB_HAS_CQ) { | if (sctx->isc_flags & IFLIB_HAS_RXCQ) { | ||||
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", | SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&rxq->ifr_cq_pidx, 1, "Producer Index"); | &rxq->ifr_cq_pidx, 1, "Producer Index"); | ||||
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", | SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", | ||||
CTLFLAG_RD, | CTLFLAG_RD, | ||||
&rxq->ifr_cq_cidx, 1, "Consumer Index"); | &rxq->ifr_cq_cidx, 1, "Consumer Index"); | ||||
} | } | ||||
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { | for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { | ||||
Show All 32 Lines |