Changeset View
Changeset View
Standalone View
Standalone View
sys/net/iflib.c
Show First 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | |||||
typedef struct iflib_txq *iflib_txq_t; | typedef struct iflib_txq *iflib_txq_t; | ||||
struct iflib_rxq; | struct iflib_rxq; | ||||
typedef struct iflib_rxq *iflib_rxq_t; | typedef struct iflib_rxq *iflib_rxq_t; | ||||
struct iflib_fl; | struct iflib_fl; | ||||
typedef struct iflib_fl *iflib_fl_t; | typedef struct iflib_fl *iflib_fl_t; | ||||
struct iflib_ctx; | struct iflib_ctx; | ||||
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); | |||||
typedef struct iflib_filter_info { | typedef struct iflib_filter_info { | ||||
driver_filter_t *ifi_filter; | driver_filter_t *ifi_filter; | ||||
void *ifi_filter_arg; | void *ifi_filter_arg; | ||||
struct grouptask *ifi_task; | struct grouptask *ifi_task; | ||||
void *ifi_ctx; | void *ifi_ctx; | ||||
} *iflib_filter_info_t; | } *iflib_filter_info_t; | ||||
struct iflib_ctx { | struct iflib_ctx { | ||||
▲ Show 20 Lines • Show All 570 Lines • ▼ Show 20 Lines | |||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
#include <sys/selinfo.h> | #include <sys/selinfo.h> | ||||
#include <net/netmap.h> | #include <net/netmap.h> | ||||
#include <dev/netmap/netmap_kern.h> | #include <dev/netmap/netmap_kern.h> | ||||
MODULE_DEPEND(iflib, netmap, 1, 1, 1); | MODULE_DEPEND(iflib, netmap, 1, 1, 1); | ||||
static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); | |||||
/* | /* | ||||
* device-specific sysctl variables: | * device-specific sysctl variables: | ||||
* | * | ||||
* iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. | * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. | ||||
* During regular operations the CRC is stripped, but on some | * During regular operations the CRC is stripped, but on some | ||||
* hardware reception of frames not multiple of 64 is slower, | * hardware reception of frames not multiple of 64 is slower, | ||||
* so using crcstrip=0 helps in benchmarks. | * so using crcstrip=0 helps in benchmarks. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | iflib_netmap_register(struct netmap_adapter *na, int onoff) | ||||
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? | IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? | ||||
status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; | status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; | ||||
if (status) | if (status) | ||||
nm_clear_native_flags(na); | nm_clear_native_flags(na); | ||||
CTX_UNLOCK(ctx); | CTX_UNLOCK(ctx); | ||||
return (status); | return (status); | ||||
} | } | ||||
static void | |||||
iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) | |||||
{ | |||||
iflib_fl_t fl; | |||||
fl = &rxq->ifr_fl[flid]; | |||||
iru->iru_paddrs = fl->ifl_bus_addrs; | |||||
iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; | |||||
iru->iru_idxs = fl->ifl_rxd_idxs; | |||||
iru->iru_qsidx = rxq->ifr_id; | |||||
iru->iru_buf_size = fl->ifl_buf_size; | |||||
iru->iru_flidx = fl->ifl_id; | |||||
} | |||||
static int | |||||
netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) | |||||
{ | |||||
struct netmap_adapter *na = kring->na; | |||||
u_int const lim = kring->nkr_num_slots - 1; | |||||
u_int head = kring->rhead; | |||||
struct netmap_ring *ring = kring->ring; | |||||
bus_dmamap_t *map; | |||||
struct if_rxd_update iru; | |||||
if_ctx_t ctx = rxq->ifr_ctx; | |||||
iflib_fl_t fl = &rxq->ifr_fl[0]; | |||||
uint32_t refill_pidx, nic_i; | |||||
if (nm_i == head && __predict_true(!init)) | |||||
return 0; | |||||
iru_init(&iru, rxq, 0 /* flid */); | |||||
map = fl->ifl_sds.ifsd_map; | |||||
refill_pidx = netmap_idx_k2n(kring, nm_i); | |||||
/* | /* | ||||
* IMPORTANT: we must leave one free slot in the ring, | |||||
* so move head back by one unit | |||||
*/ | |||||
head = nm_prev(head, lim); | |||||
while (nm_i != head) { | |||||
for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { | |||||
struct netmap_slot *slot = &ring->slot[nm_i]; | |||||
void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); | |||||
uint32_t nic_i_dma = refill_pidx; | |||||
nic_i = netmap_idx_k2n(kring, nm_i); | |||||
MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); | |||||
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ | |||||
return netmap_ring_reinit(kring); | |||||
fl->ifl_vm_addrs[tmp_pidx] = addr; | |||||
if (__predict_false(init) && map) { | |||||
netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); | |||||
} else if (map && (slot->flags & NS_BUF_CHANGED)) { | |||||
/* buffer has changed, reload map */ | |||||
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); | |||||
} | |||||
slot->flags &= ~NS_BUF_CHANGED; | |||||
nm_i = nm_next(nm_i, lim); | |||||
fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); | |||||
if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) | |||||
continue; | |||||
iru.iru_pidx = refill_pidx; | |||||
iru.iru_count = tmp_pidx+1; | |||||
ctx->isc_rxd_refill(ctx->ifc_softc, &iru); | |||||
refill_pidx = nic_i; | |||||
if (map == NULL) | |||||
continue; | |||||
for (int n = 0; n < iru.iru_count; n++) { | |||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], | |||||
BUS_DMASYNC_PREREAD); | |||||
/* XXX - change this to not use the netmap func*/ | |||||
nic_i_dma = nm_next(nic_i_dma, lim); | |||||
} | |||||
} | |||||
} | |||||
kring->nr_hwcur = head; | |||||
if (map) | |||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, | |||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | |||||
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); | |||||
return (0); | |||||
} | |||||
/* | |||||
* Reconcile kernel and user view of the transmit ring. | * Reconcile kernel and user view of the transmit ring. | ||||
* | * | ||||
* All information is in the kring. | * All information is in the kring. | ||||
* Userspace wants to send packets up to the one before kring->rhead, | * Userspace wants to send packets up to the one before kring->rhead, | ||||
* kernel knows kring->nr_hwcur is the first unsent packet. | * kernel knows kring->nr_hwcur is the first unsent packet. | ||||
* | * | ||||
* Here we push packets out (as many as possible), and possibly | * Here we push packets out (as many as possible), and possibly | ||||
* reclaim buffers from previously completed transmission. | * reclaim buffers from previously completed transmission. | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | iflib_netmap_txsync(struct netmap_kring *kring, int flags) | ||||
* | * | ||||
* If possible do not set the report/intr bit on all slots, | * If possible do not set the report/intr bit on all slots, | ||||
* but only a few times per ring or when NS_REPORT is set. | * but only a few times per ring or when NS_REPORT is set. | ||||
* | * | ||||
* Finally, on 10G and faster drivers, it might be useful | * Finally, on 10G and faster drivers, it might be useful | ||||
* to prefetch the next slot and txr entry. | * to prefetch the next slot and txr entry. | ||||
*/ | */ | ||||
nm_i = kring->nr_hwcur; | nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); | ||||
pkt_info_zero(&pi); | pkt_info_zero(&pi); | ||||
pi.ipi_segs = txq->ift_segs; | pi.ipi_segs = txq->ift_segs; | ||||
pi.ipi_qsidx = kring->ring_id; | pi.ipi_qsidx = kring->ring_id; | ||||
if (nm_i != head) { /* we have new packets to send */ | if (nm_i != head) { /* we have new packets to send */ | ||||
nic_i = netmap_idx_k2n(kring, nm_i); | nic_i = netmap_idx_k2n(kring, nm_i); | ||||
__builtin_prefetch(&ring->slot[nm_i]); | __builtin_prefetch(&ring->slot[nm_i]); | ||||
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); | __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | |||||
* of whether or not we received an interrupt. | * of whether or not we received an interrupt. | ||||
*/ | */ | ||||
static int | static int | ||||
iflib_netmap_rxsync(struct netmap_kring *kring, int flags) | iflib_netmap_rxsync(struct netmap_kring *kring, int flags) | ||||
{ | { | ||||
struct netmap_adapter *na = kring->na; | struct netmap_adapter *na = kring->na; | ||||
struct netmap_ring *ring = kring->ring; | struct netmap_ring *ring = kring->ring; | ||||
uint32_t nm_i; /* index into the netmap ring */ | uint32_t nm_i; /* index into the netmap ring */ | ||||
uint32_t nic_i, nic_i_start; /* index into the NIC ring */ | uint32_t nic_i; /* index into the NIC ring */ | ||||
u_int i, n; | u_int i, n; | ||||
u_int const lim = kring->nkr_num_slots - 1; | u_int const lim = kring->nkr_num_slots - 1; | ||||
u_int const head = kring->rhead; | u_int const head = netmap_idx_n2k(kring, kring->rhead); | ||||
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; | int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; | ||||
struct if_rxd_info ri; | struct if_rxd_info ri; | ||||
struct if_rxd_update iru; | |||||
struct ifnet *ifp = na->ifp; | struct ifnet *ifp = na->ifp; | ||||
if_ctx_t ctx = ifp->if_softc; | if_ctx_t ctx = ifp->if_softc; | ||||
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; | iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; | ||||
iflib_fl_t fl = rxq->ifr_fl; | iflib_fl_t fl = rxq->ifr_fl; | ||||
if (head > lim) | if (head > lim) | ||||
return netmap_ring_reinit(kring); | return netmap_ring_reinit(kring); | ||||
Show All 19 Lines | iflib_netmap_rxsync(struct netmap_kring *kring, int flags) | ||||
* | * | ||||
* rxr->next_check is set to 0 on a ring reinit | * rxr->next_check is set to 0 on a ring reinit | ||||
*/ | */ | ||||
if (netmap_no_pendintr || force_update) { | if (netmap_no_pendintr || force_update) { | ||||
int crclen = iflib_crcstrip ? 0 : 4; | int crclen = iflib_crcstrip ? 0 : 4; | ||||
int error, avail; | int error, avail; | ||||
uint16_t slot_flags = kring->nkr_slot_flags; | uint16_t slot_flags = kring->nkr_slot_flags; | ||||
for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) { | for (i = 0; i < rxq->ifr_nfl; i++) { | ||||
fl = &rxq->ifr_fl[i]; | |||||
nic_i = fl->ifl_cidx; | nic_i = fl->ifl_cidx; | ||||
nm_i = netmap_idx_n2k(kring, nic_i); | nm_i = netmap_idx_n2k(kring, nic_i); | ||||
avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); | avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); | ||||
for (n = 0; avail > 0; n++, avail--) { | for (n = 0; avail > 0; n++, avail--) { | ||||
rxd_info_zero(&ri); | rxd_info_zero(&ri); | ||||
ri.iri_frags = rxq->ifr_frags; | ri.iri_frags = rxq->ifr_frags; | ||||
ri.iri_qsidx = kring->ring_id; | ri.iri_qsidx = kring->ring_id; | ||||
ri.iri_ifp = ctx->ifc_ifp; | ri.iri_ifp = ctx->ifc_ifp; | ||||
Show All 10 Lines | for (i = 0; i < rxq->ifr_nfl; i++) { | ||||
} | } | ||||
if (n) { /* update the state variables */ | if (n) { /* update the state variables */ | ||||
if (netmap_no_pendintr && !force_update) { | if (netmap_no_pendintr && !force_update) { | ||||
/* diagnostics */ | /* diagnostics */ | ||||
iflib_rx_miss ++; | iflib_rx_miss ++; | ||||
iflib_rx_miss_bufs += n; | iflib_rx_miss_bufs += n; | ||||
} | } | ||||
fl->ifl_cidx = nic_i; | fl->ifl_cidx = nic_i; | ||||
kring->nr_hwtail = nm_i; | kring->nr_hwtail = netmap_idx_k2n(kring, nm_i); | ||||
} | } | ||||
kring->nr_kflags &= ~NKR_PENDINTR; | kring->nr_kflags &= ~NKR_PENDINTR; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Second part: skip past packets that userspace has released. | * Second part: skip past packets that userspace has released. | ||||
* (kring->nr_hwcur to head excluded), | * (kring->nr_hwcur to head excluded), | ||||
* and make the buffers available for reception. | * and make the buffers available for reception. | ||||
* As usual nm_i is the index in the netmap ring, | * As usual nm_i is the index in the netmap ring, | ||||
* nic_i is the index in the NIC ring, and | * nic_i is the index in the NIC ring, and | ||||
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size | * nm_i == (nic_i + kring->nkr_hwofs) % ring_size | ||||
*/ | */ | ||||
/* XXX not sure how this will work with multiple free lists */ | /* XXX not sure how this will work with multiple free lists */ | ||||
nm_i = kring->nr_hwcur; | nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); | ||||
if (nm_i == head) | |||||
return (0); | |||||
iru.iru_paddrs = fl->ifl_bus_addrs; | return (netmap_fl_refill(rxq, kring, nm_i, false)); | ||||
iru.iru_vaddrs = &fl->ifl_vm_addrs[0]; | |||||
iru.iru_idxs = fl->ifl_rxd_idxs; | |||||
iru.iru_qsidx = rxq->ifr_id; | |||||
iru.iru_buf_size = fl->ifl_buf_size; | |||||
iru.iru_flidx = fl->ifl_id; | |||||
nic_i_start = nic_i = netmap_idx_k2n(kring, nm_i); | |||||
for (i = 0; nm_i != head; i++) { | |||||
struct netmap_slot *slot = &ring->slot[nm_i]; | |||||
void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[i]); | |||||
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ | |||||
goto ring_reset; | |||||
fl->ifl_vm_addrs[i] = addr; | |||||
if (fl->ifl_sds.ifsd_map && (slot->flags & NS_BUF_CHANGED)) { | |||||
/* buffer has changed, reload map */ | |||||
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], addr); | |||||
} | } | ||||
slot->flags &= ~NS_BUF_CHANGED; | |||||
nm_i = nm_next(nm_i, lim); | |||||
fl->ifl_rxd_idxs[i] = nic_i = nm_next(nic_i, lim); | |||||
if (nm_i != head && i < IFLIB_MAX_RX_REFRESH) | |||||
continue; | |||||
iru.iru_pidx = nic_i_start; | |||||
iru.iru_count = i; | |||||
i = 0; | |||||
ctx->isc_rxd_refill(ctx->ifc_softc, &iru); | |||||
if (fl->ifl_sds.ifsd_map == NULL) { | |||||
nic_i_start = nic_i; | |||||
continue; | |||||
} | |||||
nic_i = nic_i_start; | |||||
for (n = 0; n < iru.iru_count; n++) { | |||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], | |||||
BUS_DMASYNC_PREREAD); | |||||
nic_i = nm_next(nic_i, lim); | |||||
} | |||||
nic_i_start = nic_i; | |||||
} | |||||
kring->nr_hwcur = head; | |||||
if (fl->ifl_sds.ifsd_map) | |||||
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, | |||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | |||||
/* | |||||
* IMPORTANT: we must leave one free slot in the ring, | |||||
* so move nic_i back by one unit | |||||
*/ | |||||
nic_i = nm_prev(nic_i, lim); | |||||
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); | |||||
return 0; | |||||
ring_reset: | |||||
return netmap_ring_reinit(kring); | |||||
} | |||||
static void | static void | ||||
iflib_netmap_intr(struct netmap_adapter *na, int onoff) | iflib_netmap_intr(struct netmap_adapter *na, int onoff) | ||||
{ | { | ||||
struct ifnet *ifp = na->ifp; | struct ifnet *ifp = na->ifp; | ||||
if_ctx_t ctx = ifp->if_softc; | if_ctx_t ctx = ifp->if_softc; | ||||
CTX_LOCK(ctx); | CTX_LOCK(ctx); | ||||
if (onoff) { | if (onoff) { | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { | ||||
* the physical buffer address in the NIC ring. | * the physical buffer address in the NIC ring. | ||||
* netmap_idx_n2k() maps a nic index, i, into the corresponding | * netmap_idx_n2k() maps a nic index, i, into the corresponding | ||||
* netmap slot index, si | * netmap slot index, si | ||||
*/ | */ | ||||
int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); | int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); | ||||
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); | netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) | iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) | ||||
{ | { | ||||
struct netmap_adapter *na = NA(ctx->ifc_ifp); | struct netmap_adapter *na = NA(ctx->ifc_ifp); | ||||
struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; | |||||
struct netmap_slot *slot; | struct netmap_slot *slot; | ||||
struct if_rxd_update iru; | uint32_t nm_i; | ||||
iflib_fl_t fl; | |||||
bus_dmamap_t *map; | |||||
int nrxd; | |||||
uint32_t i, j, pidx_start; | |||||
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); | slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); | ||||
if (slot == NULL) | if (slot == NULL) | ||||
return; | return; | ||||
fl = &rxq->ifr_fl[0]; | nm_i = netmap_idx_n2k(kring, 0); | ||||
map = fl->ifl_sds.ifsd_map; | netmap_fl_refill(rxq, kring, nm_i, true); | ||||
nrxd = ctx->ifc_softc_ctx.isc_nrxd[0]; | |||||
iru.iru_paddrs = fl->ifl_bus_addrs; | |||||
iru.iru_vaddrs = &fl->ifl_vm_addrs[0]; | |||||
iru.iru_idxs = fl->ifl_rxd_idxs; | |||||
iru.iru_qsidx = rxq->ifr_id; | |||||
iru.iru_buf_size = rxq->ifr_fl[0].ifl_buf_size; | |||||
iru.iru_flidx = 0; | |||||
for (pidx_start = i = j = 0; i < nrxd; i++, j++) { | |||||
int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i); | |||||
void *addr; | |||||
fl->ifl_rxd_idxs[j] = i; | |||||
addr = fl->ifl_vm_addrs[j] = PNMB(na, slot + sj, &fl->ifl_bus_addrs[j]); | |||||
if (map) { | |||||
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, *map, addr); | |||||
map++; | |||||
} | } | ||||
if (j < IFLIB_MAX_RX_REFRESH && i < nrxd - 1) | |||||
continue; | |||||
iru.iru_pidx = pidx_start; | |||||
pidx_start = i; | |||||
iru.iru_count = j; | |||||
j = 0; | |||||
MPASS(pidx_start + j <= nrxd); | |||||
/* Update descriptors and the cached value */ | |||||
ctx->isc_rxd_refill(ctx->ifc_softc, &iru); | |||||
} | |||||
/* preserve queue */ | |||||
if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) { | |||||
struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; | |||||
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | |||||
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, t); | |||||
} else | |||||
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, nrxd-1); | |||||
} | |||||
#define iflib_netmap_detach(ifp) netmap_detach(ifp) | #define iflib_netmap_detach(ifp) netmap_detach(ifp) | ||||
#else | #else | ||||
#define iflib_netmap_txq_init(ctx, txq) | #define iflib_netmap_txq_init(ctx, txq) | ||||
#define iflib_netmap_rxq_init(ctx, rxq) | #define iflib_netmap_rxq_init(ctx, rxq) | ||||
#define iflib_netmap_detach(ifp) | #define iflib_netmap_detach(ifp) | ||||
#define iflib_netmap_attach(ctx) (0) | #define iflib_netmap_attach(ctx) (0) | ||||
▲ Show 20 Lines • Show All 610 Lines • ▼ Show 20 Lines | _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) | ||||
if (pidx == fl->ifl_cidx && (fl->ifl_credits < fl->ifl_size)) | if (pidx == fl->ifl_cidx && (fl->ifl_credits < fl->ifl_size)) | ||||
MPASS(fl->ifl_gen == 0); | MPASS(fl->ifl_gen == 0); | ||||
if (pidx > fl->ifl_cidx) | if (pidx > fl->ifl_cidx) | ||||
MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); | MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); | ||||
DBG_COUNTER_INC(fl_refills); | DBG_COUNTER_INC(fl_refills); | ||||
if (n > 8) | if (n > 8) | ||||
DBG_COUNTER_INC(fl_refills_large); | DBG_COUNTER_INC(fl_refills_large); | ||||
iru.iru_paddrs = fl->ifl_bus_addrs; | iru_init(&iru, fl->ifl_rxq, fl->ifl_id); | ||||
iru.iru_vaddrs = &fl->ifl_vm_addrs[0]; | |||||
iru.iru_idxs = fl->ifl_rxd_idxs; | |||||
iru.iru_qsidx = fl->ifl_rxq->ifr_id; | |||||
iru.iru_buf_size = fl->ifl_buf_size; | |||||
iru.iru_flidx = fl->ifl_id; | |||||
while (n--) { | while (n--) { | ||||
/* | /* | ||||
* We allocate an uninitialized mbuf + cluster, mbuf is | * We allocate an uninitialized mbuf + cluster, mbuf is | ||||
* initialized after rx. | * initialized after rx. | ||||
* | * | ||||
* If the cluster is still set then we know a minimum sized packet was received | * If the cluster is still set then we know a minimum sized packet was received | ||||
*/ | */ | ||||
bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); | bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); | ||||
▲ Show 20 Lines • Show All 3,844 Lines • Show Last 20 Lines |