Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F137888131
D23348.id67463.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
62 KB
Referenced Files
None
Subscribers
None
D23348.id67463.diff
View Options
Index: sys/arm/allwinner/if_awg.c
===================================================================
--- sys/arm/allwinner/if_awg.c
+++ sys/arm/allwinner/if_awg.c
@@ -1023,7 +1023,7 @@
WR4(sc, EMAC_INT_STA, val);
if (val & RX_INT)
- awg_rxintr(sc);
+ NET_EPOCH_WRAP(awg_rxintr, (sc));
if (val & TX_INT)
awg_txeof(sc);
@@ -1054,7 +1054,7 @@
return (0);
}
- rx_npkts = awg_rxintr(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(awg_rxintr, (sc));
awg_txeof(sc);
if (!if_sendq_empty(ifp))
awg_start_locked(sc);
Index: sys/arm/allwinner/if_emac.c
===================================================================
--- sys/arm/allwinner/if_emac.c
+++ sys/arm/allwinner/if_emac.c
@@ -720,7 +720,7 @@
/* Received incoming packet */
if (reg_val & EMAC_INT_STA_RX)
- emac_rxeof(sc, sc->emac_rx_process_limit);
+ NET_EPOCH_WRAP(emac_rxeof, (sc, sc->emac_rx_process_limit));
/* Transmit Interrupt check */
if (reg_val & EMAC_INT_STA_TX) {
Index: sys/arm/ralink/if_fv.c
===================================================================
--- sys/arm/ralink/if_fv.c
+++ sys/arm/ralink/if_fv.c
@@ -1736,7 +1736,7 @@
device_printf(sc->fv_dev, "Transmit Underflow\n");
}
if (status & sc->sc_rxint_mask) {
- fv_rx(sc);
+ NET_EPOCH_WRAP(fv_rx, (sc));
}
if (status & sc->sc_txint_mask) {
fv_tx(sc);
Index: sys/arm/ti/cpsw/if_cpsw.c
===================================================================
--- sys/arm/ti/cpsw/if_cpsw.c
+++ sys/arm/ti/cpsw/if_cpsw.c
@@ -1563,6 +1563,7 @@
static void
cpsw_intr_rx(void *arg)
{
+ struct epoch_tracker et;
struct cpsw_softc *sc;
struct ifnet *ifp;
struct mbuf *received, *next;
@@ -1579,6 +1580,7 @@
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
CPSW_RX_UNLOCK(sc);
+ NET_EPOCH_ENTER(et);
while (received != NULL) {
next = received->m_nextpkt;
received->m_nextpkt = NULL;
@@ -1587,6 +1589,7 @@
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
received = next;
}
+ NET_EPOCH_EXIT(et);
}
static struct mbuf *
@@ -2056,6 +2059,7 @@
static void
cpsw_intr_rx_thresh(void *arg)
{
+ struct epoch_tracker et;
struct cpsw_softc *sc;
struct ifnet *ifp;
struct mbuf *received, *next;
@@ -2067,6 +2071,7 @@
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
CPSW_RX_UNLOCK(sc);
+ NET_EPOCH_ENTER(et);
while (received != NULL) {
next = received->m_nextpkt;
received->m_nextpkt = NULL;
@@ -2075,6 +2080,7 @@
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
received = next;
}
+ NET_EPOCH_EXIT(et);
}
static void
Index: sys/dev/ae/if_ae.c
===================================================================
--- sys/dev/ae/if_ae.c
+++ sys/dev/ae/if_ae.c
@@ -1790,7 +1790,7 @@
if ((val & AE_ISR_TX_EVENT) != 0)
ae_tx_intr(sc);
if ((val & AE_ISR_RX_EVENT) != 0)
- ae_rx_intr(sc);
+ NET_EPOCH_WRAP(ae_rx_intr, (sc));
/*
* Re-enable interrupts.
*/
Index: sys/dev/age/if_age.c
===================================================================
--- sys/dev/age/if_age.c
+++ sys/dev/age/if_age.c
@@ -2168,8 +2168,8 @@
ifp = sc->age_ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if ((status & INTR_CMB_RX) != 0)
- sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
- sc->age_process_limit);
+ sc->age_morework = NET_EPOCH_WRAP_RET(age_rxintr,
+ (sc, sc->age_rr_prod, sc->age_process_limit));
if ((status & INTR_CMB_TX) != 0)
age_txintr(sc, sc->age_tpd_cons);
if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
Index: sys/dev/al_eth/al_eth.c
===================================================================
--- sys/dev/al_eth/al_eth.c
+++ sys/dev/al_eth/al_eth.c
@@ -1585,6 +1585,7 @@
static void
al_eth_rx_recv_work(void *arg, int pending)
{
+ struct epoch_tracker et;
struct al_eth_ring *rx_ring = arg;
struct mbuf *mbuf;
struct lro_entry *queued;
@@ -1595,6 +1596,8 @@
uint32_t refill_actual;
uint32_t do_if_input;
+ NET_EPOCH_ENTER(et);
+
if (napi != 0) {
rx_ring->enqueue_is_running = 1;
al_data_memory_barrier();
@@ -1691,6 +1694,8 @@
}
/* unmask irq */
al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
+
+ NET_EPOCH_EXIT(et);
}
static void
Index: sys/dev/alc/if_alc.c
===================================================================
--- sys/dev/alc/if_alc.c
+++ sys/dev/alc/if_alc.c
@@ -3375,7 +3375,7 @@
more = 0;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if ((status & INTR_RX_PKT) != 0) {
- more = alc_rxintr(sc, sc->alc_process_limit);
+ more = NET_EPOCH_WRAP_RET(alc_rxintr, (sc, sc->alc_process_limit));
if (more == EAGAIN)
sc->alc_morework = 1;
else if (more == EIO) {
Index: sys/dev/ale/if_ale.c
===================================================================
--- sys/dev/ale/if_ale.c
+++ sys/dev/ale/if_ale.c
@@ -2266,7 +2266,7 @@
ifp = sc->ale_ifp;
more = 0;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
- more = ale_rxeof(sc, sc->ale_process_limit);
+ more = NET_EPOCH_WRAP_RET(ale_rxeof, (sc, sc->ale_process_limit));
if (more == EAGAIN)
sc->ale_morework = 1;
else if (more == EIO) {
Index: sys/dev/altera/atse/if_atse.c
===================================================================
--- sys/dev/altera/atse/if_atse.c
+++ sys/dev/altera/atse/if_atse.c
@@ -257,6 +257,7 @@
static int
atse_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
+ struct epoch_tracker et;
xdma_transfer_status_t st;
struct atse_softc *sc;
struct ifnet *ifp;
@@ -267,6 +268,7 @@
sc = arg;
ATSE_LOCK(sc);
+ NET_EPOCH_ENTER(et);
ifp = sc->atse_ifp;
@@ -291,6 +293,7 @@
(*ifp->if_input)(ifp, m);
ATSE_LOCK(sc);
}
+ NET_EPOCH_EXIT(et);
atse_rx_enqueue(sc, cnt_processed);
@@ -1381,8 +1384,7 @@
}
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
- IFF_NEEDSEPOCH;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = atse_ioctl;
ifp->if_transmit = atse_transmit;
ifp->if_qflush = atse_qflush;
Index: sys/dev/an/if_an.c
===================================================================
--- sys/dev/an/if_an.c
+++ sys/dev/an/if_an.c
@@ -1222,7 +1222,7 @@
}
if (status & AN_EV_RX) {
- an_rxeof(sc);
+ NET_EPOCH_WRAP(an_rxeof, (sc));
CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_RX);
}
Index: sys/dev/axgbe/xgbe-drv.c
===================================================================
--- sys/dev/axgbe/xgbe-drv.c
+++ sys/dev/axgbe/xgbe-drv.c
@@ -1037,7 +1037,7 @@
xgbe_tx_poll(channel);
/* Process Rx ring next */
- processed = xgbe_rx_poll(channel, budget);
+ processed = NET_EPOCH_WRAP_RET(xgbe_rx_poll, (channel, budget));
DBGPR("<--xgbe_one_poll: received = %d\n", processed);
@@ -1066,7 +1066,7 @@
/* Process Rx ring next */
if (ring_budget > (budget - processed))
ring_budget = budget - processed;
- processed += xgbe_rx_poll(channel, ring_budget);
+ processed += NET_EPOCH_WRAP_RET(xgbe_rx_poll, (channel, ring_budget));
}
} while ((processed < budget) && (processed != last_processed));
Index: sys/dev/bce/if_bce.c
===================================================================
--- sys/dev/bce/if_bce.c
+++ sys/dev/bce/if_bce.c
@@ -8014,7 +8014,7 @@
/* Check for any completed RX frames. */
if (hw_rx_cons != sc->hw_rx_cons)
- bce_rx_intr(sc);
+ NET_EPOCH_WRAP(bce_rx_intr, (sc));
/* Check for any completed TX frames. */
if (hw_tx_cons != sc->hw_tx_cons)
Index: sys/dev/beri/virtio/network/if_vtbe.c
===================================================================
--- sys/dev/beri/virtio/network/if_vtbe.c
+++ sys/dev/beri/virtio/network/if_vtbe.c
@@ -436,6 +436,7 @@
static void
vtbe_rxfinish_locked(struct vtbe_softc *sc)
{
+ struct epoch_tracker et;
struct vqueue_info *vq;
int reg;
@@ -447,9 +448,11 @@
/* Process new descriptors */
vq->vq_save_used = be16toh(vq->vq_used->idx);
+ NET_EPOCH_ENTER(et);
while (vq_has_descs(vq)) {
vtbe_proc_rx(sc, vq);
}
+ NET_EPOCH_EXIT(et);
/* Interrupt the other side */
reg = htobe32(VIRTIO_MMIO_INT_VRING);
@@ -613,7 +616,7 @@
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX |
- IFF_MULTICAST | IFF_PROMISC | IFF_NEEDSEPOCH);
+ IFF_MULTICAST | IFF_PROMISC);
ifp->if_capabilities = IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
ifp->if_start = vtbe_txstart;
Index: sys/dev/bfe/if_bfe.c
===================================================================
--- sys/dev/bfe/if_bfe.c
+++ sys/dev/bfe/if_bfe.c
@@ -1474,7 +1474,7 @@
/* A packet was received */
if (istat & BFE_ISTAT_RX)
- bfe_rxeof(sc);
+ NET_EPOCH_WRAP(bfe_rxeof, (sc));
/* A packet was sent */
if (istat & BFE_ISTAT_TX)
Index: sys/dev/bge/if_bge.c
===================================================================
--- sys/dev/bge/if_bge.c
+++ sys/dev/bge/if_bge.c
@@ -4615,7 +4615,7 @@
bge_link_upd(sc);
sc->rxcycles = count;
- rx_npkts = bge_rxeof(sc, rx_prod, 1);
+ rx_npkts = NET_EPOCH_WRAP_RET(bge_rxeof, (sc, rx_prod, 1));
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
@@ -4646,7 +4646,6 @@
static void
bge_intr_task(void *arg, int pending)
{
- struct epoch_tracker et;
struct bge_softc *sc;
if_t ifp;
uint32_t status, status_tag;
@@ -4689,9 +4688,7 @@
sc->bge_rx_saved_considx != rx_prod) {
/* Check RX return ring producer/consumer. */
BGE_UNLOCK(sc);
- NET_EPOCH_ENTER(et);
- bge_rxeof(sc, rx_prod, 0);
- NET_EPOCH_EXIT(et);
+ NET_EPOCH_WRAP(bge_rxeof, (sc, rx_prod, 0));
BGE_LOCK(sc);
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
@@ -4769,7 +4766,7 @@
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check RX return ring producer/consumer. */
- bge_rxeof(sc, rx_prod, 1);
+ NET_EPOCH_WRAP(bge_rxeof, (sc, rx_prod, 1));
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
Index: sys/dev/bwi/if_bwi.c
===================================================================
--- sys/dev/bwi/if_bwi.c
+++ sys/dev/bwi/if_bwi.c
@@ -1625,7 +1625,7 @@
device_printf(sc->sc_dev, "intr noise\n");
if (txrx_intr_status[0] & BWI_TXRX_INTR_RX) {
- rx_data = sc->sc_rxeof(sc);
+ rx_data = NET_EPOCH_WRAP_RET(sc->sc_rxeof, (sc));
if (sc->sc_flags & BWI_F_STOP) {
BWI_UNLOCK(sc);
return;
Index: sys/dev/bwn/if_bwn.c
===================================================================
--- sys/dev/bwn/if_bwn.c
+++ sys/dev/bwn/if_bwn.c
@@ -5134,11 +5134,11 @@
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) {
- bwn_dma_rx(mac->mac_method.dma.rx);
+ NET_EPOCH_WRAP(bwn_dma_rx, (mac->mac_method.dma.rx));
rx = 1;
}
} else
- rx = bwn_pio_rx(&mac->mac_method.pio.rx);
+ rx = NET_EPOCH_WRAP_RET(bwn_pio_rx, (&mac->mac_method.pio.rx));
KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
Index: sys/dev/bxe/bxe.c
===================================================================
--- sys/dev/bxe/bxe.c
+++ sys/dev/bxe/bxe.c
@@ -8765,7 +8765,7 @@
}
if (bxe_has_rx_work(fp)) {
- more_rx = bxe_rxeof(sc, fp);
+ more_rx = NET_EPOCH_WRAP_RET(bxe_rxeof, (sc, fp));
}
if (more_rx /*|| more_tx*/) {
@@ -8799,7 +8799,7 @@
}
if (bxe_has_rx_work(fp)) {
- more_rx = bxe_rxeof(sc, fp);
+ more_rx = NET_EPOCH_WRAP_RET(bxe_rxeof, (sc, fp));
}
if (more_rx /*|| more_tx*/) {
Index: sys/dev/cadence/if_cgem.c
===================================================================
--- sys/dev/cadence/if_cgem.c
+++ sys/dev/cadence/if_cgem.c
@@ -943,7 +943,7 @@
/* Packets received. */
if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
- cgem_recv(sc);
+ NET_EPOCH_WRAP(cgem_recv, (sc));
/* Free up any completed transmit buffers. */
cgem_clean_tx(sc);
Index: sys/dev/cas/if_cas.c
===================================================================
--- sys/dev/cas/if_cas.c
+++ sys/dev/cas/if_cas.c
@@ -1616,7 +1616,7 @@
CAS_LOCK_ASSERT(sc, MA_OWNED);
- cas_rint(sc);
+ NET_EPOCH_WRAP(cas_rint, (sc));
}
static void
@@ -2043,7 +2043,7 @@
if ((status &
(CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) {
- cas_rint(sc);
+ NET_EPOCH_WRAP(cas_rint, (sc));
#ifdef CAS_DEBUG
if (__predict_false((status &
(CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
Index: sys/dev/cxgb/cxgb_sge.c
===================================================================
--- sys/dev/cxgb/cxgb_sge.c
+++ sys/dev/cxgb/cxgb_sge.c
@@ -3028,7 +3028,7 @@
int work;
static int last_holdoff = 0;
- work = process_responses(adap, rspq_to_qset(rq), -1);
+ work = NET_EPOCH_WRAP_RET(process_responses, (adap, rspq_to_qset(rq), -1));
if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
printf("next_holdoff=%d\n", rq->next_holdoff);
Index: sys/dev/cxgbe/t4_sge.c
===================================================================
--- sys/dev/cxgbe/t4_sge.c
+++ sys/dev/cxgbe/t4_sge.c
@@ -1409,7 +1409,7 @@
struct sge_iq *iq = arg;
if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
- service_iq(iq, 0);
+ NET_EPOCH_WRAP(service_iq, (iq, 0));
(void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
}
}
@@ -1423,7 +1423,7 @@
struct sge_iq *iq = arg;
if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
- service_iq_fl(iq, 0);
+ NET_EPOCH_WRAP(service_iq_fl, (iq, 0));
(void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
}
}
@@ -1438,7 +1438,7 @@
struct sge_nm_rxq *nm_rxq = arg;
if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) {
- service_nm_rxq(nm_rxq);
+ NET_EPOCH_WRAP(service_nm_rxq, (nm_rxq));
(void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON);
}
}
Index: sys/dev/dc/if_dc.c
===================================================================
--- sys/dev/dc/if_dc.c
+++ sys/dev/dc/if_dc.c
@@ -3254,7 +3254,7 @@
}
sc->rxcycles = count;
- rx_npkts = dc_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(dc_rxeof, (sc));
dc_txeof(sc);
if (!IFQ_IS_EMPTY(&ifp->if_snd) &&
!(ifp->if_drv_flags & IFF_DRV_OACTIVE))
@@ -3279,7 +3279,7 @@
if_inc_counter(ifp, IFCOUNTER_IERRORS, (r & 0xffff) + ((r >> 17) & 0x7ff));
if (dc_rx_resync(sc))
- dc_rxeof(sc);
+ NET_EPOCH_WRAP(dc_rxeof, (sc));
}
/* restart transmit unit if necessary */
if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt)
@@ -3302,6 +3302,7 @@
static void
dc_intr(void *arg)
{
+ struct epoch_tracker et;
struct dc_softc *sc;
struct ifnet *ifp;
uint32_t r, status;
@@ -3335,10 +3336,12 @@
CSR_WRITE_4(sc, DC_ISR, status);
if (status & DC_ISR_RX_OK) {
+ NET_EPOCH_ENTER(et);
if (dc_rxeof(sc) == 0) {
while (dc_rx_resync(sc))
dc_rxeof(sc);
}
+ NET_EPOCH_EXIT(et);
}
if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF))
@@ -3359,10 +3362,12 @@
|| (status & DC_ISR_RX_NOBUF)) {
r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
if_inc_counter(ifp, IFCOUNTER_IERRORS, (r & 0xffff) + ((r >> 17) & 0x7ff));
+ NET_EPOCH_ENTER(et);
if (dc_rxeof(sc) == 0) {
while (dc_rx_resync(sc))
dc_rxeof(sc);
}
+ NET_EPOCH_EXIT(et);
}
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
Index: sys/dev/dme/if_dme.c
===================================================================
--- sys/dev/dme/if_dme.c
+++ sys/dev/dme/if_dme.c
@@ -610,7 +610,7 @@
if (intr_status & ISR_PR) {
/* Read the packets off the device */
- while (dme_rxeof(sc) == 0)
+ while (NET_EPOCH_WRAP_RET(dme_rxeof, (sc)) == 0
continue;
}
DME_UNLOCK(sc);
Index: sys/dev/dpaa/if_dtsec_im.c
===================================================================
--- sys/dev/dpaa/if_dtsec_im.c
+++ sys/dev/dpaa/if_dtsec_im.c
@@ -77,7 +77,7 @@
m = m_devget(data, length, 0, sc->sc_ifnet, NULL);
if (m)
- (*sc->sc_ifnet->if_input)(sc->sc_ifnet, m);
+ NET_EPOCH_WRAP(if_input, (sc->sc_ifnet, m));
XX_FreeSmart(data);
Index: sys/dev/dpaa/if_dtsec_rm.c
===================================================================
--- sys/dev/dpaa/if_dtsec_rm.c
+++ sys/dev/dpaa/if_dtsec_rm.c
@@ -389,7 +389,7 @@
m->m_len = DPAA_FD_GET_LENGTH(frame);
m_fixhdr(m);
- (*sc->sc_ifnet->if_input)(sc->sc_ifnet, m);
+ NET_EPOCH_WRAP(if_input, (sc->sc_ifnet, m));
return (e_RX_STORE_RESPONSE_CONTINUE);
Index: sys/dev/dwc/if_dwc.c
===================================================================
--- sys/dev/dwc/if_dwc.c
+++ sys/dev/dwc/if_dwc.c
@@ -822,7 +822,7 @@
reg = READ4(sc, DMA_STATUS);
if (reg & DMA_STATUS_NIS) {
if (reg & DMA_STATUS_RI)
- dwc_rxfinish_locked(sc);
+ NET_EPOCH_WRAP(dwc_rxfinish_locked, (sc));
if (reg & DMA_STATUS_TI) {
dwc_txfinish_locked(sc);
Index: sys/dev/ena/ena_datapath.c
===================================================================
--- sys/dev/ena/ena_datapath.c
+++ sys/dev/ena/ena_datapath.c
@@ -63,6 +63,7 @@
void
ena_cleanup(void *arg, int pending)
{
+ struct epoch_tracker et;
struct ena_que *que = arg;
struct ena_adapter *adapter = que->adapter;
if_t ifp = adapter->ifp;
@@ -78,6 +79,8 @@
ena_trace(ENA_DBG, "MSI-X TX/RX routine\n");
+ NET_EPOCH_ENTER(et);
+
tx_ring = que->tx_ring;
rx_ring = que->rx_ring;
qid = que->id;
@@ -92,7 +95,7 @@
txc = ena_tx_cleanup(tx_ring);
if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
- return;
+ goto done;
if ((txc != TX_BUDGET) && (rxc != RX_BUDGET))
break;
@@ -104,6 +107,8 @@
TX_IRQ_INTERVAL,
true);
ena_com_unmask_intr(io_cq, &intr_reg);
+done:
+ NET_EPOCH_EXIT(et);
}
void
Index: sys/dev/et/if_et.c
===================================================================
--- sys/dev/et/if_et.c
+++ sys/dev/et/if_et.c
@@ -1206,7 +1206,7 @@
return;
}
if (status & ET_INTR_RXDMA)
- et_rxeof(sc);
+ NET_EPOCH_WRAP(et_rxeof, (sc));
if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
et_txeof(sc);
if (status & ET_INTR_TIMER)
Index: sys/dev/ffec/if_ffec.c
===================================================================
--- sys/dev/ffec/if_ffec.c
+++ sys/dev/ffec/if_ffec.c
@@ -1301,7 +1301,7 @@
if (ier & FEC_IER_RXF) {
WR4(sc, FEC_IER_REG, FEC_IER_RXF);
- ffec_rxfinish_locked(sc);
+ NET_EPOCH_WRAP(ffec_rxfinish_locked, (sc));
}
/*
Index: sys/dev/firewire/if_fwe.c
===================================================================
--- sys/dev/firewire/if_fwe.c
+++ sys/dev/firewire/if_fwe.c
@@ -549,6 +549,7 @@
static void
fwe_as_input(struct fw_xferq *xferq)
{
+ struct epoch_tracker et;
struct mbuf *m, *m0;
struct ifnet *ifp;
struct fwe_softc *fwe;
@@ -559,6 +560,8 @@
fwe = (struct fwe_softc *)xferq->sc;
ifp = fwe->eth_softc.ifp;
+ NET_EPOCH_ENTER(et);
+
/* We do not need a lock here because the bottom half is serialized */
while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
@@ -604,6 +607,8 @@
(*ifp->if_input)(ifp, m);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
}
+ NET_EPOCH_EXIT(et);
+
if (STAILQ_FIRST(&xferq->stfree) != NULL)
fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch);
}
Index: sys/dev/fxp/if_fxp.c
===================================================================
--- sys/dev/fxp/if_fxp.c
+++ sys/dev/fxp/if_fxp.c
@@ -1706,7 +1706,7 @@
CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
statack |= tmp;
}
- rx_npkts = fxp_intr_body(sc, ifp, statack, count);
+ rx_npkts = NET_EPOCH_WRAP_RET(fxp_intr_body, (sc, ifp, statack, count));
FXP_UNLOCK(sc);
return (rx_npkts);
}
@@ -1751,7 +1751,7 @@
*/
CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
- fxp_intr_body(sc, ifp, statack, -1);
+ NET_EPOCH_WRAP(fxp_intr_body, (sc, ifp, statack, -1));
}
FXP_UNLOCK(sc);
}
Index: sys/dev/gem/if_gem.c
===================================================================
--- sys/dev/gem/if_gem.c
+++ sys/dev/gem/if_gem.c
@@ -1528,7 +1528,7 @@
GEM_LOCK_ASSERT(sc, MA_OWNED);
- gem_rint(sc);
+ NET_EPOCH_WRAP(gem_rint, (sc));
}
#endif
@@ -1772,7 +1772,7 @@
gem_eint(sc, status);
if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
- gem_rint(sc);
+ NET_EPOCH_WRAP(gem_rint, (sc));
if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
gem_tint(sc);
Index: sys/dev/gxemul/ether/if_gx.c
===================================================================
--- sys/dev/gxemul/ether/if_gx.c
+++ sys/dev/gxemul/ether/if_gx.c
@@ -348,9 +348,11 @@
static void
gx_rx_intr(void *arg)
{
+ struct epoch_tracker et;
struct gx_softc *sc = arg;
GXEMUL_ETHER_LOCK(sc);
+ NET_EPOCH_ENTER(et);
for (;;) {
uint64_t status, length;
struct mbuf *m;
@@ -376,8 +378,7 @@
if (m == NULL) {
device_printf(sc->sc_dev, "no memory for receive mbuf.\n");
if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1);
- GXEMUL_ETHER_UNLOCK(sc);
- return;
+ break;
}
/* Align incoming frame so IP headers are aligned. */
@@ -396,5 +397,6 @@
GXEMUL_ETHER_LOCK(sc);
}
+ NET_EPOCH_EXIT(et);
GXEMUL_ETHER_UNLOCK(sc);
}
Index: sys/dev/hme/if_hme.c
===================================================================
--- sys/dev/hme/if_hme.c
+++ sys/dev/hme/if_hme.c
@@ -1350,7 +1350,7 @@
hme_eint(sc, status);
if ((status & HME_SEB_STAT_RXTOHOST) != 0)
- hme_rint(sc);
+ NET_EPOCH_WRAP(hme_rint, (sc));
if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
hme_tint(sc);
Index: sys/dev/ipw/if_ipw.c
===================================================================
--- sys/dev/ipw/if_ipw.c
+++ sys/dev/ipw/if_ipw.c
@@ -1400,7 +1400,7 @@
wakeup(sc);
if (r & IPW_INTR_RX_TRANSFER)
- ipw_rx_intr(sc);
+ NET_EPOCH_WRAP(ipw_rx_intr, (sc));
if (r & IPW_INTR_TX_TRANSFER)
ipw_tx_intr(sc);
Index: sys/dev/iwi/if_iwi.c
===================================================================
--- sys/dev/iwi/if_iwi.c
+++ sys/dev/iwi/if_iwi.c
@@ -1705,7 +1705,7 @@
iwi_tx_intr(sc, &sc->txq[3]);
if (r & IWI_INTR_RX_DONE)
- iwi_rx_intr(sc);
+ NET_EPOCH_WRAP(iwi_rx_intr, (sc));
if (r & IWI_INTR_PARITY_ERROR) {
/* XXX rate-limit */
Index: sys/dev/iwm/if_iwm.c
===================================================================
--- sys/dev/iwm/if_iwm.c
+++ sys/dev/iwm/if_iwm.c
@@ -5655,10 +5655,12 @@
static void
iwm_notif_intr(struct iwm_softc *sc)
{
+ struct epoch_tracker et;
int count;
uint32_t wreg;
uint16_t hw;
+ NET_EPOCH_ENTER(et);
bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
BUS_DMASYNC_POSTREAD);
@@ -5697,6 +5699,7 @@
*/
hw = (hw == 0) ? count - 1 : hw - 1;
IWM_WRITE(sc, wreg, rounddown2(hw, 8));
+ NET_EPOCH_EXIT(et);
}
static void
Index: sys/dev/iwn/if_iwn.c
===================================================================
--- sys/dev/iwn/if_iwn.c
+++ sys/dev/iwn/if_iwn.c
@@ -4345,13 +4345,13 @@
IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
IWN_WRITE_1(sc, IWN_INT_PERIODIC,
IWN_INT_PERIODIC_DIS);
- iwn_notif_intr(sc);
+ NET_EPOCH_WRAP(iwn_notif_intr, (sc));
if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
IWN_WRITE_1(sc, IWN_INT_PERIODIC,
IWN_INT_PERIODIC_ENA);
}
} else
- iwn_notif_intr(sc);
+ NET_EPOCH_WRAP(iwn_notif_intr, (sc));
}
if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
Index: sys/dev/jme/if_jme.c
===================================================================
--- sys/dev/jme/if_jme.c
+++ sys/dev/jme/if_jme.c
@@ -2386,7 +2386,7 @@
more = 0;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
- more = jme_rxintr(sc, sc->jme_process_limit);
+ more = NET_EPOCH_WRAP_RET(jme_rxintr, (sc, sc->jme_process_limit));
if (more != 0)
sc->jme_morework = 1;
}
@@ -3033,7 +3033,7 @@
jme_stop_tx(sc);
/* Reclaim Rx/Tx buffers that have been completed. */
- jme_rxintr(sc, JME_RX_RING_CNT);
+ NET_EPOCH_WRAP(jme_rxintr, (sc, JME_RX_RING_CNT));
if (sc->jme_cdata.jme_rxhead != NULL)
m_freem(sc->jme_cdata.jme_rxhead);
JME_RXCHAIN_RESET(sc);
Index: sys/dev/le/am7990.c
===================================================================
--- sys/dev/le/am7990.c
+++ sys/dev/le/am7990.c
@@ -473,7 +473,7 @@
sc->sc_flags |= LE_CARRIER;
if (isr & LE_C0_RINT)
- am7990_rint(sc);
+ NET_EPOCH_WRAP(am7990_rint, (sc));
if (isr & LE_C0_TINT)
am7990_tint(sc);
Index: sys/dev/le/am79900.c
===================================================================
--- sys/dev/le/am79900.c
+++ sys/dev/le/am79900.c
@@ -511,7 +511,7 @@
sc->sc_flags |= LE_CARRIER;
if (isr & LE_C0_RINT)
- am79900_rint(sc);
+ NET_EPOCH_WRAP(am79900_rint, (sc));
if (isr & LE_C0_TINT)
am79900_tint(sc);
Index: sys/dev/lge/if_lge.c
===================================================================
--- sys/dev/lge/if_lge.c
+++ sys/dev/lge/if_lge.c
@@ -1098,7 +1098,7 @@
lge_txeof(sc);
if (status & LGE_ISR_RXDMA_DONE)
- lge_rxeof(sc, LGE_RX_DMACNT(status));
+ NET_EPOCH_WRAP(lge_rxeof, (sc, LGE_RX_DMACNT(status)));
if (status & LGE_ISR_RXCMDFIFO_EMPTY)
lge_rxeoc(sc);
Index: sys/dev/liquidio/lio_core.c
===================================================================
--- sys/dev/liquidio/lio_core.c
+++ sys/dev/liquidio/lio_core.c
@@ -262,10 +262,13 @@
lio_push_packet(void *m_buff, uint32_t len, union octeon_rh *rh, void *rxq,
void *arg)
{
+ struct epoch_tracker et;
struct mbuf *mbuf = m_buff;
struct ifnet *ifp = arg;
struct lio_droq *droq = rxq;
+ NET_EPOCH_ENTER(et);
+
if (ifp != NULL) {
struct lio *lio = if_getsoftc(ifp);
@@ -273,7 +276,7 @@
if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
lio_recv_buffer_free(mbuf);
droq->stats.rx_dropped++;
- return;
+ goto done;
}
if (rh->r_dh.has_hash) {
@@ -377,7 +380,7 @@
if (tcp_lro_rx(&droq->lro, mbuf, 0) == 0) {
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
- return;
+ goto done;
}
}
}
@@ -391,6 +394,8 @@
lio_recv_buffer_free(mbuf);
droq->stats.rx_dropped++;
}
+done:
+ NET_EPOCH_EXIT(et);
}
/*
Index: sys/dev/mge/if_mge.c
===================================================================
--- sys/dev/mge/if_mge.c
+++ sys/dev/mge/if_mge.c
@@ -769,7 +769,7 @@
}
- rx_npkts = mge_intr_rx_locked(sc, count);
+ rx_npkts = NET_EPOCH_WRAP_RET(mge_intr_rx_locked, (sc, count));
MGE_RECEIVE_UNLOCK(sc);
MGE_TRANSMIT_LOCK(sc);
@@ -1319,7 +1319,7 @@
if (int_cause || int_cause_ext) {
MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
- mge_intr_rx_locked(sc, -1);
+ NET_EPOCH_WRAP(mge_intr_rx_locked, (sc, -1));
}
}
Index: sys/dev/mlx4/mlx4_en/mlx4_en_rx.c
===================================================================
--- sys/dev/mlx4/mlx4_en/mlx4_en_rx.c
+++ sys/dev/mlx4/mlx4_en/mlx4_en_rx.c
@@ -869,7 +869,7 @@
struct net_device *dev = cq->dev;
int done;
- done = mlx4_en_process_rx_cq(dev, cq, budget);
+ done = NET_EPOCH_WRAP_RET(mlx4_en_process_rx_cq, (dev, cq, budget));
cq->tot_rx += done;
return done;
Index: sys/dev/msk/if_msk.c
===================================================================
--- sys/dev/msk/if_msk.c
+++ sys/dev/msk/if_msk.c
@@ -3632,9 +3632,9 @@
break;
if (sc_if->msk_framesize >
(MCLBYTES - MSK_RX_BUF_ALIGN))
- msk_jumbo_rxeof(sc_if, status, control, len);
+ NET_EPOCH_WRAP(msk_jumbo_rxeof, (sc_if, status, control, len));
else
- msk_rxeof(sc_if, status, control, len);
+ NET_EPOCH_WRAP(msk_rxeof, (sc_if, status, control, len));
rxprog++;
/*
* Because there is no way to sync single Rx LE
Index: sys/dev/mwl/if_mwl.c
===================================================================
--- sys/dev/mwl/if_mwl.c
+++ sys/dev/mwl/if_mwl.c
@@ -2608,6 +2608,7 @@
static void
mwl_rx_proc(void *arg, int npending)
{
+ struct epoch_tracker et;
struct mwl_softc *sc = arg;
struct ieee80211com *ic = &sc->sc_ic;
struct mwl_rxbuf *bf;
@@ -2621,6 +2622,8 @@
void *newdata;
int16_t nf;
+ NET_EPOCH_ENTER(et);
+
DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
__func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
RD4(sc, sc->sc_hwspecs.rxDescWrite));
@@ -2819,6 +2822,8 @@
rx_stop:
sc->sc_rxnext = bf;
+ NET_EPOCH_EXIT(et);
+
if (mbufq_first(&sc->sc_snd) != NULL) {
/* NB: kick fw; the tx thread may have been preempted */
mwl_hal_txstart(sc->sc_mh, 0);
Index: sys/dev/mxge/if_mxge.c
===================================================================
--- sys/dev/mxge/if_mxge.c
+++ sys/dev/mxge/if_mxge.c
@@ -3087,7 +3087,7 @@
/* an interrupt on a non-zero slice is implicitly valid
since MSI-X irqs are not shared */
if (ss != sc->ss) {
- mxge_clean_rx_done(ss);
+ NET_EPOCH_WRAP(mxge_clean_rx_done, (ss));
*ss->irq_claim = be32toh(3);
return;
}
@@ -3117,7 +3117,7 @@
(rx_done->entry[rx_done->idx].length != 0)) {
if (send_done_count != tx->pkt_done)
mxge_tx_done(ss, (int)send_done_count);
- mxge_clean_rx_done(ss);
+ NET_EPOCH_WRAP(mxge_clean_rx_done, (ss));
send_done_count = be32toh(stats->send_done_count);
}
if (sc->legacy_irq && mxge_deassert_wait)
Index: sys/dev/my/if_my.c
===================================================================
--- sys/dev/my/if_my.c
+++ sys/dev/my/if_my.c
@@ -1287,7 +1287,7 @@
break;
if (status & MY_RI) /* receive interrupt */
- my_rxeof(sc);
+ NET_EPOCH_WRAP(my_rxeof, (sc));
if ((status & MY_RBU) || (status & MY_RxErr)) {
/* rx buffer unavailable or rx error */
Index: sys/dev/neta/if_mvneta.c
===================================================================
--- sys/dev/neta/if_mvneta.c
+++ sys/dev/neta/if_mvneta.c
@@ -2960,7 +2960,7 @@
more = 1;
npkt = count;
}
- mvneta_rx_queue(sc, q, npkt);
+ NET_EPOCH_WRAP(mvneta_rx_queue, (sc, q, npkt));
out:
mvneta_rx_unlockq(sc, q);
return more;
Index: sys/dev/netfpga10g/nf10bmac/if_nf10bmac.c
===================================================================
--- sys/dev/netfpga10g/nf10bmac/if_nf10bmac.c
+++ sys/dev/netfpga10g/nf10bmac/if_nf10bmac.c
@@ -611,6 +611,7 @@
static void
nf10bmac_intr(void *arg)
{
+ struct epoch_tracker et;
struct nf10bmac_softc *sc;
struct ifnet *ifp;
int rx_npkts;
@@ -629,6 +630,7 @@
/* NF10BMAC_RX_INTR_DISABLE(sc); */
NF10BMAC_RX_INTR_CLEAR_DIS(sc);
+ NET_EPOCH_ENTER(et);
/* We only have an RX interrupt and no status information. */
rx_npkts = 0;
while (rx_npkts < NF10BMAC_MAX_PKTS) {
@@ -639,6 +641,7 @@
if (c == 0)
break;
}
+ NET_EPOCH_EXIT(et);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
/* Re-enable interrupts. */
@@ -655,6 +658,7 @@
static int
nf10bmac_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
+ struct epoch_tracker et;
struct nf10bmac_softc *sc;
int rx_npkts = 0;
@@ -665,6 +669,7 @@
return (rx_npkts);
}
+ NET_EPOCH_ENTER(et);
while (rx_npkts < count) {
int c;
@@ -673,6 +678,8 @@
if (c == 0)
break;
}
+ NET_EPOCH_EXIT(et);
+
nf10bmac_start_locked(ifp);
if (rx_npkts > 0 || cmd == POLL_AND_CHECK_STATUS) {
Index: sys/dev/netmap/if_ptnet.c
===================================================================
--- sys/dev/netmap/if_ptnet.c
+++ sys/dev/netmap/if_ptnet.c
@@ -1334,7 +1334,7 @@
/* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
* receive-side processing is executed directly in the interrupt
* service routine. Alternatively, we may schedule the taskqueue. */
- ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
+ NET_EPOCH_WRAP(ptnet_rx_eof, (pq, PTNET_RX_BUDGET, true));
}
static void
@@ -1924,7 +1924,7 @@
struct ptnet_queue *pq = context;
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
- ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
+ NET_EPOCH_WRAP(ptnet_rx_eof, (pq, PTNET_RX_BUDGET, true));
}
static void
@@ -1969,8 +1969,8 @@
rcnt += ptnet_drain_transmit_queue(pq,
queue_budget, false);
} else {
- rcnt += ptnet_rx_eof(pq, queue_budget,
- false);
+ rcnt += NET_EPOCH_WRAP_RET(ptnet_rx_eof,
+ (pq, queue_budget, false));
}
}
Index: sys/dev/nfe/if_nfe.c
===================================================================
--- sys/dev/nfe/if_nfe.c
+++ sys/dev/nfe/if_nfe.c
@@ -1651,9 +1651,9 @@
}
if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
- rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
+ rx_npkts = NET_EPOCH_WRAP_RET(nfe_jrxeof, (sc, count, &rx_npkts));
else
- rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
+ rx_npkts = NET_EPOCH_WRAP_RET(nfe_rxeof, (sc, count, &rx_npkts));
nfe_txeof(sc);
if (!if_sendq_empty(ifp))
nfe_start_locked(ifp);
@@ -1923,9 +1923,9 @@
domore = 0;
/* check Rx ring */
if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
- domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
+ domore = NET_EPOCH_WRAP_RET(nfe_jrxeof, (sc, sc->nfe_process_limit, NULL));
else
- domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
+ domore = NET_EPOCH_WRAP_RET(nfe_rxeof, (sc, sc->nfe_process_limit, NULL));
/* check Tx ring */
nfe_txeof(sc);
Index: sys/dev/nge/if_nge.c
===================================================================
--- sys/dev/nge/if_nge.c
+++ sys/dev/nge/if_nge.c
@@ -550,7 +550,7 @@
device_printf(sc->nge_dev,
"%s: unable to stop Tx/Rx MAC\n", __func__);
nge_txeof(sc);
- nge_rxeof(sc);
+ NET_EPOCH_WRAP(nge_rxeof, (sc));
if (sc->nge_head != NULL) {
m_freem(sc->nge_head);
sc->nge_head = sc->nge_tail = NULL;
@@ -1776,7 +1776,7 @@
* and then call the interrupt routine.
*/
sc->rxcycles = count;
- rx_npkts = nge_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(nge_rxeof, (sc));
nge_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
nge_start_locked(ifp);
@@ -1788,7 +1788,7 @@
status = CSR_READ_4(sc, NGE_ISR);
if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0)
- rx_npkts += nge_rxeof(sc);
+ rx_npkts += NET_EPOCH_WRAP_RET(nge_rxeof, (sc));
if ((status & NGE_ISR_RX_IDLE) != 0)
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
@@ -1844,8 +1844,9 @@
if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR |
NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW |
- NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0)
- nge_rxeof(sc);
+ NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) {
+ NET_EPOCH_WRAP(nge_rxeof, (sc));
+ }
if ((status & NGE_ISR_RX_IDLE) != 0)
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
Index: sys/dev/ntb/if_ntb/if_ntb.c
===================================================================
--- sys/dev/ntb/if_ntb/if_ntb.c
+++ sys/dev/ntb/if_ntb/if_ntb.c
@@ -172,8 +172,7 @@
if_setinitfn(ifp, ntb_net_init);
if_setsoftc(ifp, sc);
- if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
- IFF_NEEDSEPOCH);
+ if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, ntb_ioctl);
if_settransmitfn(ifp, ntb_transmit);
if_setqflushfn(ifp, ntb_qflush);
@@ -470,7 +469,7 @@
}
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- if_input(ifp, m);
+ NET_EPOCH_WRAP(if_input, (ifp, m));
}
static void
Index: sys/dev/oce/oce_if.c
===================================================================
--- sys/dev/oce/oce_if.c
+++ sys/dev/oce/oce_if.c
@@ -2102,15 +2102,18 @@
uint16_t
oce_rq_handler(void *arg)
{
+ struct epoch_tracker et;
struct oce_rq *rq = (struct oce_rq *)arg;
struct oce_cq *cq = rq->cq;
POCE_SOFTC sc = rq->parent;
struct oce_nic_rx_cqe *cqe;
int num_cqes = 0;
+ NET_EPOCH_ENTER(et);
+
if(rq->islro) {
oce_rq_handler_lro(arg);
- return 0;
+ goto done;
}
LOCK(&rq->rx_lock);
bus_dmamap_sync(cq->ring->dma.tag,
@@ -2153,8 +2156,9 @@
oce_check_rx_bufs(sc, num_cqes, rq);
UNLOCK(&rq->rx_lock);
+done:
+ NET_EPOCH_EXIT(et);
return 0;
-
}
Index: sys/dev/qlnx/qlnxe/qlnx_os.c
===================================================================
--- sys/dev/qlnx/qlnxe/qlnx_os.c
+++ sys/dev/qlnx/qlnxe/qlnx_os.c
@@ -4754,8 +4754,8 @@
break;
case ETH_RX_CQE_TYPE_TPA_END:
- rx_pkt += qlnx_tpa_end(ha, fp, rxq,
- &cqe->fast_path_tpa_end);
+ rx_pkt += NET_EPOCH_WRAP_RET(qlnx_tpa_end,
+ (ha, fp, rxq, &cqe->fast_path_tpa_end));
fp->tpa_end++;
break;
@@ -5041,8 +5041,8 @@
}
}
- rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
- lro_enable);
+ rx_int = NET_EPOCH_WRAP_RET(qlnx_rx_int,
+ (ha, fp, ha->rx_pkt_threshold, lro_enable));
if (rx_int) {
fp->rx_pkts += rx_int;
Index: sys/dev/qlxgb/qla_isr.c
===================================================================
--- sys/dev/qlxgb/qla_isr.c
+++ sys/dev/qlxgb/qla_isr.c
@@ -306,7 +306,7 @@
case Q8_STAT_DESC_OPCODE_RCV_PKT:
case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
- qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
+ NET_EPOCH_WRAP(qla_rx_intr, (ha, (sdesc->data[0]), sds_idx, lro));
break;
Index: sys/dev/qlxgbe/ql_isr.c
===================================================================
--- sys/dev/qlxgbe/ql_isr.c
+++ sys/dev/qlxgbe/ql_isr.c
@@ -544,7 +544,7 @@
sgc.rcv.vlan_tag =
Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
}
- qla_rx_intr(ha, &sgc.rcv, sds_idx);
+ NET_EPOCH_WRAP(qla_rx_intr, (ha, &sgc.rcv, sds_idx));
break;
case Q8_STAT_DESC_OPCODE_SGL_RCV:
@@ -606,8 +606,7 @@
sgc.rcv.num_handles += nhandles;
- qla_rx_intr(ha, &sgc.rcv, sds_idx);
-
+ NET_EPOCH_WRAP(qla_rx_intr, (ha, &sgc.rcv, sds_idx));
break;
case Q8_STAT_DESC_OPCODE_SGL_LRO:
@@ -678,7 +677,7 @@
sgc.lro.num_handles += nhandles;
- if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
+ if (NET_EPOCH_WRAP_RET(qla_lro_intr, (ha, &sgc.lro, sds_idx))) {
device_printf(dev,
"%s: [sds_idx, data0, data1]="\
"[%d, 0x%llx, 0x%llx]\n",\
Index: sys/dev/qlxge/qls_isr.c
===================================================================
--- sys/dev/qlxge/qls_isr.c
+++ sys/dev/qlxge/qls_isr.c
@@ -257,8 +257,7 @@
break;
case Q81_IOCB_RX:
- ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
-
+ ret = NET_EPOCH_WRAP_RET(qls_rx_comp, (ha, cq_idx, i, (q81_rx_t *)cq_e));
break;
case Q81_IOCB_MPI:
Index: sys/dev/ral/rt2560.c
===================================================================
--- sys/dev/ral/rt2560.c
+++ sys/dev/ral/rt2560.c
@@ -1359,7 +1359,7 @@
rt2560_decryption_intr(sc);
if (r & RT2560_RX_DONE) {
- rt2560_rx_intr(sc);
+ NET_EPOCH_WRAP(rt2560_rx_intr, (sc));
rt2560_encryption_intr(sc);
}
Index: sys/dev/ral/rt2661.c
===================================================================
--- sys/dev/ral/rt2661.c
+++ sys/dev/ral/rt2661.c
@@ -1149,7 +1149,7 @@
rt2661_tx_dma_intr(sc, &sc->mgtq);
if (r1 & RT2661_RX_DONE)
- rt2661_rx_intr(sc);
+ NET_EPOCH_WRAP(rt2661_rx_intr, (sc));
if (r1 & RT2661_TX0_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[0]);
Index: sys/dev/ral/rt2860.c
===================================================================
--- sys/dev/ral/rt2860.c
+++ sys/dev/ral/rt2860.c
@@ -1420,7 +1420,7 @@
rt2860_tx_intr(sc, 5);
if (r & RT2860_RX_DONE_INT)
- rt2860_rx_intr(sc);
+ NET_EPOCH_WRAP(rt2860_rx_intr, (sc));
if (r & RT2860_TX_DONE_INT4)
rt2860_tx_intr(sc, 4);
Index: sys/dev/re/if_re.c
===================================================================
--- sys/dev/re/if_re.c
+++ sys/dev/re/if_re.c
@@ -2524,7 +2524,7 @@
RL_LOCK_ASSERT(sc);
sc->rxcycles = count;
- re_rxeof(sc, &rx_npkts);
+ NET_EPOCH_WRAP(re_rxeof, (sc, &rx_npkts));
re_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
@@ -2576,7 +2576,6 @@
static void
re_int_task(void *arg, int npending)
{
- struct epoch_tracker et;
struct rl_softc *sc;
struct ifnet *ifp;
u_int16_t status;
@@ -2604,9 +2603,7 @@
#endif
if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) {
- NET_EPOCH_ENTER(et);
- rval = re_rxeof(sc, NULL);
- NET_EPOCH_EXIT(et);
+ rval = NET_EPOCH_WRAP_RET(re_rxeof, (sc, NULL));
}
/*
@@ -2683,7 +2680,7 @@
if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
- re_rxeof(sc, NULL);
+ NET_EPOCH_WRAP(re_rxeof, (sc, NULL));
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if (sc->rl_int_rx_mod != 0 &&
(status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
@@ -3583,7 +3580,7 @@
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- re_rxeof(sc, NULL);
+ NET_EPOCH_WRAP(re_rxeof, (sc, NULL));
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
re_init_locked(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
Index: sys/dev/rl/if_rl.c
===================================================================
--- sys/dev/rl/if_rl.c
+++ sys/dev/rl/if_rl.c
@@ -1459,7 +1459,7 @@
RL_LOCK_ASSERT(sc);
sc->rxcycles = count;
- rx_npkts = rl_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(rl_rxeof, (sc));
rl_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
@@ -1517,7 +1517,7 @@
CSR_WRITE_2(sc, RL_ISR, status);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
- rl_rxeof(sc);
+ NET_EPOCH_WRAP(rl_rxeof, (sc));
if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
rl_txeof(sc);
if (status & RL_ISR_SYSTEM_ERR) {
@@ -1904,7 +1904,7 @@
if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1);
rl_txeof(sc);
- rl_rxeof(sc);
+ NET_EPOCH_WRAP(rl_rxeof, (sc));
sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
rl_init_locked(sc);
}
Index: sys/dev/rt/if_rt.c
===================================================================
--- sys/dev/rt/if_rt.c
+++ sys/dev/rt/if_rt.c
@@ -1715,7 +1715,7 @@
sc->intr_pending_mask &= ~sc->int_rx_done_mask;
- again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
+ again = NET_EPOCH_WRAP_RET(rt_rx_eof, (sc, &sc->rx_ring[0], sc->rx_process_limit));
RT_SOFTC_LOCK(sc);
Index: sys/dev/rtwn/pci/rtwn_pci_rx.c
===================================================================
--- sys/dev/rtwn/pci/rtwn_pci_rx.c
+++ sys/dev/rtwn/pci/rtwn_pci_rx.c
@@ -35,6 +35,7 @@
#include <sys/taskqueue.h>
#include <sys/bus.h>
#include <sys/endian.h>
+#include <sys/epoch.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -384,7 +385,7 @@
goto unlock;
if (status & (RTWN_PCI_INTR_RX | RTWN_PCI_INTR_TX_REPORT)) {
- rtwn_pci_rx_done(sc);
+ NET_EPOCH_WRAP(rtwn_pci_rx_done, (sc));
if (!(sc->sc_flags & RTWN_RUNNING))
goto unlock;
}
Index: sys/dev/sbni/if_sbni.c
===================================================================
--- sys/dev/sbni/if_sbni.c
+++ sys/dev/sbni/if_sbni.c
@@ -243,8 +243,7 @@
ifp->if_baudrate =
(csr0 & 0x01 ? 500000 : 2000000) / (1 << flags.rate);
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
- IFF_NEEDSEPOCH;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
mtx_init(&sc->lock, ifp->if_xname, MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->wch, &sc->lock, 0);
@@ -689,7 +688,7 @@
sc->inppos += framelen - 4;
if (--sc->wait_frameno == 0) { /* last frame received */
- indicate_pkt(sc);
+ NET_EPOCH_WRAP(indicate_pkt, (sc));
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
}
Index: sys/dev/sfxge/sfxge_ev.c
===================================================================
--- sys/dev/sfxge/sfxge_ev.c
+++ sys/dev/sfxge/sfxge_ev.c
@@ -83,7 +83,7 @@
}
if (rxq->pending != rxq->completed)
- sfxge_rx_qcomplete(rxq, eop);
+ NET_EPOCH_WRAP(sfxge_rx_qcomplete, (rxq, eop));
}
static struct sfxge_rxq *
Index: sys/dev/sfxge/sfxge_rx.c
===================================================================
--- sys/dev/sfxge/sfxge_rx.c
+++ sys/dev/sfxge/sfxge_rx.c
@@ -992,7 +992,7 @@
}
rxq->pending = rxq->added;
- sfxge_rx_qcomplete(rxq, B_TRUE);
+ NET_EPOCH_WRAP(sfxge_rx_qcomplete, (rxq, B_TRUE));
KASSERT(rxq->completed == rxq->pending,
("rxq->completed != rxq->pending"));
Index: sys/dev/sge/if_sge.c
===================================================================
--- sys/dev/sge/if_sge.c
+++ sys/dev/sge/if_sge.c
@@ -1362,7 +1362,7 @@
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
- sge_rxeof(sc);
+ NET_EPOCH_WRAP(sge_rxeof, (sc));
/* Wakeup Rx MAC. */
if ((status & INTR_RX_IDLE) != 0)
CSR_WRITE_4(sc, RX_CTL,
Index: sys/dev/sis/if_sis.c
===================================================================
--- sys/dev/sis/if_sis.c
+++ sys/dev/sis/if_sis.c
@@ -1664,7 +1664,7 @@
* and then call the interrupt routine
*/
sc->rxcycles = count;
- rx_npkts = sis_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(sis_rxeof, (sc));
sis_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
sis_startl(ifp);
@@ -1731,7 +1731,7 @@
if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
- sis_rxeof(sc);
+ NET_EPOCH_WRAP(sis_rxeof, (sc));
if (status & SIS_ISR_RX_OFLOW)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
Index: sys/dev/sk/if_sk.c
===================================================================
--- sys/dev/sk/if_sk.c
+++ sys/dev/sk/if_sk.c
@@ -3117,17 +3117,17 @@
/* Handle receive interrupts first. */
if (status & SK_ISR_RX1_EOF) {
if (ifp0->if_mtu > SK_MAX_FRAMELEN)
- sk_jumbo_rxeof(sc_if0);
+ NET_EPOCH_WRAP(sk_jumbo_rxeof, (sc_if0));
else
- sk_rxeof(sc_if0);
+ NET_EPOCH_WRAP(sk_rxeof, (sc_if0));
CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
}
if (status & SK_ISR_RX2_EOF) {
if (ifp1->if_mtu > SK_MAX_FRAMELEN)
- sk_jumbo_rxeof(sc_if1);
+ NET_EPOCH_WRAP(sk_jumbo_rxeof, (sc_if1));
else
- sk_rxeof(sc_if1);
+ NET_EPOCH_WRAP(sk_rxeof, (sc_if1));
CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
}
Index: sys/dev/smc/if_smc.c
===================================================================
--- sys/dev/smc/if_smc.c
+++ sys/dev/smc/if_smc.c
@@ -673,6 +673,7 @@
static void
smc_task_rx(void *context, int pending)
{
+ struct epoch_tracker et;
u_int packet, status, len;
uint8_t *data;
struct ifnet *ifp;
@@ -774,6 +775,7 @@
SMC_UNLOCK(sc);
+ NET_EPOCH_ENTER(et);
while (mhead != NULL) {
m = mhead;
mhead = mhead->m_next;
@@ -781,6 +783,7 @@
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
(*ifp->if_input)(ifp, m);
}
+ NET_EPOCH_EXIT(et);
}
#ifdef DEVICE_POLLING
Index: sys/dev/ste/if_ste.c
===================================================================
--- sys/dev/ste/if_ste.c
+++ sys/dev/ste/if_ste.c
@@ -481,7 +481,7 @@
STE_LOCK_ASSERT(sc);
- rx_npkts = ste_rxeof(sc, count);
+ rx_npkts = NET_EPOCH_WRAP_RET(ste_rxeof, (sc, count));
ste_txeof(sc);
ste_txeoc(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
@@ -538,7 +538,7 @@
}
if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) {
- ste_rxeof(sc, -1);
+ NET_EPOCH_WRAP(ste_rxeof, (sc, -1));
/*
* The controller has no ability to Rx interrupt
* moderation feature. Receiving 64 bytes frames
@@ -1969,7 +1969,7 @@
ste_txeof(sc);
ste_txeoc(sc);
- ste_rxeof(sc, -1);
+ NET_EPOCH_WRAP(ste_rxeof, (sc, -1));
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ste_init_locked(sc);
Index: sys/dev/stge/if_stge.c
===================================================================
--- sys/dev/stge/if_stge.c
+++ sys/dev/stge/if_stge.c
@@ -1486,7 +1486,7 @@
/* Receive interrupts. */
if ((status & IS_RxDMAComplete) != 0) {
- stge_rxeof(sc);
+ NET_EPOCH_WRAP(stge_rxeof, (sc));
if ((status & IS_RFDListEnd) != 0)
CSR_WRITE_4(sc, STGE_DMACtrl,
DMAC_RxDMAPollNow);
@@ -1793,7 +1793,7 @@
}
sc->sc_cdata.stge_rxcycles = count;
- rx_npkts = stge_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(stge_rxeof, (sc));
stge_txeof(sc);
if (cmd == POLL_AND_CHECK_STATUS) {
Index: sys/dev/ti/if_ti.c
===================================================================
--- sys/dev/ti/if_ti.c
+++ sys/dev/ti/if_ti.c
@@ -2979,7 +2979,7 @@
bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
sc->ti_cdata.ti_status_map, BUS_DMASYNC_POSTREAD);
/* Check RX return ring producer/consumer */
- ti_rxeof(sc);
+ NET_EPOCH_WRAP(ti_rxeof, (sc));
/* Check TX ring producer/consumer */
ti_txeof(sc);
Index: sys/dev/tsec/if_tsec.c
===================================================================
--- sys/dev/tsec/if_tsec.c
+++ sys/dev/tsec/if_tsec.c
@@ -920,7 +920,7 @@
TSEC_GLOBAL_TO_RECEIVE_LOCK(sc);
- rx_npkts = tsec_receive_intr_locked(sc, count);
+ rx_npkts = NET_EPOCH_WRAP_RET(tsec_receive_intr_locked, (sc, count));
TSEC_RECEIVE_UNLOCK(sc);
@@ -1427,7 +1427,7 @@
/* Confirm the interrupt was received by driver */
TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
- tsec_receive_intr_locked(sc, -1);
+ NET_EPOCH_WRAP(tsec_receive_intr_locked, (sc, -1));
TSEC_RECEIVE_UNLOCK(sc);
}
Index: sys/dev/usb/wlan/if_rum.c
===================================================================
--- sys/dev/usb/wlan/if_rum.c
+++ sys/dev/usb/wlan/if_rum.c
@@ -1168,6 +1168,7 @@
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame_min *wh;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct mbuf *m = NULL;
struct usb_page_cache *pc;
uint32_t flags;
@@ -1286,6 +1287,7 @@
else
ni = NULL;
+ NET_EPOCH_ENTER(et);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi,
RT2573_NOISE_FLOOR);
@@ -1293,6 +1295,7 @@
} else
(void) ieee80211_input_all(ic, m, rssi,
RT2573_NOISE_FLOOR);
+ NET_EPOCH_EXIT(et);
}
RUM_LOCK(sc);
rum_start(sc);
Index: sys/dev/usb/wlan/if_run.c
===================================================================
--- sys/dev/usb/wlan/if_run.c
+++ sys/dev/usb/wlan/if_run.c
@@ -2811,6 +2811,7 @@
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct rt2870_rxd *rxd;
struct rt2860_rxwi *rxwi;
uint32_t flags;
@@ -2929,12 +2930,14 @@
}
}
+ NET_EPOCH_ENTER(et);
if (ni != NULL) {
(void)ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else {
(void)ieee80211_input_all(ic, m, rssi, nf);
}
+ NET_EPOCH_EXIT(et);
return;
Index: sys/dev/usb/wlan/if_uath.c
===================================================================
--- sys/dev/usb/wlan/if_uath.c
+++ sys/dev/usb/wlan/if_uath.c
@@ -2705,6 +2705,7 @@
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct mbuf *m = NULL;
struct uath_data *data;
struct uath_rx_desc *desc = NULL;
@@ -2751,6 +2752,7 @@
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
nf = -95; /* XXX */
+ NET_EPOCH_ENTER(et);
if (ni != NULL) {
(void) ieee80211_input(ni, m,
(int)be32toh(desc->rssi), nf);
@@ -2759,6 +2761,7 @@
} else
(void) ieee80211_input_all(ic, m,
(int)be32toh(desc->rssi), nf);
+ NET_EPOCH_EXIT(et);
m = NULL;
desc = NULL;
}
Index: sys/dev/usb/wlan/if_upgt.c
===================================================================
--- sys/dev/usb/wlan/if_upgt.c
+++ sys/dev/usb/wlan/if_upgt.c
@@ -2211,6 +2211,7 @@
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct mbuf *m = NULL;
struct upgt_data *data;
int8_t nf;
@@ -2248,12 +2249,14 @@
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
nf = -95; /* XXX */
+ NET_EPOCH_ENTER(et);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
/* node is no longer needed */
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
+ NET_EPOCH_EXIT(et);
m = NULL;
}
UPGT_LOCK(sc);
Index: sys/dev/usb/wlan/if_ural.c
===================================================================
--- sys/dev/usb/wlan/if_ural.c
+++ sys/dev/usb/wlan/if_ural.c
@@ -851,6 +851,7 @@
struct ural_softc *sc = usbd_xfer_softc(xfer);
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct mbuf *m = NULL;
struct usb_page_cache *pc;
uint32_t flags;
@@ -931,11 +932,13 @@
if (m) {
ni = ieee80211_find_rxnode(ic,
mtod(m, struct ieee80211_frame_min *));
+ NET_EPOCH_ENTER(et);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
+ NET_EPOCH_EXIT(et);
}
RAL_LOCK(sc);
ural_start(sc);
Index: sys/dev/usb/wlan/if_urtw.c
===================================================================
--- sys/dev/usb/wlan/if_urtw.c
+++ sys/dev/usb/wlan/if_urtw.c
@@ -4042,6 +4042,7 @@
struct urtw_softc *sc = usbd_xfer_softc(xfer);
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct mbuf *m = NULL;
struct urtw_data *data;
int8_t nf = -95;
@@ -4085,12 +4086,14 @@
} else
ni = NULL;
+ NET_EPOCH_ENTER(et);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
/* node is no longer needed */
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
+ NET_EPOCH_EXIT(et);
m = NULL;
}
URTW_LOCK(sc);
Index: sys/dev/usb/wlan/if_zyd.c
===================================================================
--- sys/dev/usb/wlan/if_zyd.c
+++ sys/dev/usb/wlan/if_zyd.c
@@ -2223,6 +2223,7 @@
struct zyd_softc *sc = usbd_xfer_softc(xfer);
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
+ struct epoch_tracker et;
struct zyd_rx_desc desc;
struct mbuf *m;
struct usb_page_cache *pc;
@@ -2278,6 +2279,7 @@
* "ieee80211_input" here, and not some lines up!
*/
ZYD_UNLOCK(sc);
+ NET_EPOCH_ENTER(et);
for (i = 0; i < sc->sc_rx_count; i++) {
rssi = sc->sc_rx_data[i].rssi;
m = sc->sc_rx_data[i].m;
@@ -2293,6 +2295,7 @@
} else
(void)ieee80211_input_all(ic, m, rssi, nf);
}
+ NET_EPOCH_EXIT(et);
ZYD_LOCK(sc);
zyd_start(sc);
break;
Index: sys/dev/vge/if_vge.c
===================================================================
--- sys/dev/vge/if_vge.c
+++ sys/dev/vge/if_vge.c
@@ -1714,7 +1714,7 @@
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
goto done;
- rx_npkts = vge_rxeof(sc, count);
+ rx_npkts = NET_EPOCH_WRAP_RET(vge_rxeof, (sc, count));
vge_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
@@ -1739,7 +1739,7 @@
}
if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
- vge_rxeof(sc, count);
+ NET_EPOCH_WRAP(vge_rxeof, (sc, count));
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
@@ -1787,9 +1787,9 @@
goto done;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
- vge_rxeof(sc, VGE_RX_DESC_CNT);
+ NET_EPOCH_WRAP(vge_rxeof, (sc, VGE_RX_DESC_CNT));
if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
- vge_rxeof(sc, VGE_RX_DESC_CNT);
+ NET_EPOCH_WRAP(vge_rxeof, (sc, VGE_RX_DESC_CNT));
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
@@ -2393,7 +2393,7 @@
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
vge_txeof(sc);
- vge_rxeof(sc, VGE_RX_DESC_CNT);
+ NET_EPOCH_WRAP(vge_rxeof, (sc, VGE_RX_DESC_CNT));
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vge_init_locked(sc);
Index: sys/dev/virtio/network/if_vtnet.c
===================================================================
--- sys/dev/virtio/network/if_vtnet.c
+++ sys/dev/virtio/network/if_vtnet.c
@@ -1944,7 +1944,7 @@
return;
}
- more = vtnet_rxq_eof(rxq);
+ more = NET_EPOCH_WRAP_RET(vtnet_rxq_eof, (rxq));
if (more || vtnet_rxq_enable_intr(rxq) != 0) {
if (!more)
vtnet_rxq_disable_intr(rxq);
@@ -1981,7 +1981,7 @@
return;
}
- more = vtnet_rxq_eof(rxq);
+ more = NET_EPOCH_WRAP_RET(vtnet_rxq_eof, (rxq));
if (more || vtnet_rxq_enable_intr(rxq) != 0) {
if (!more)
vtnet_rxq_disable_intr(rxq);
Index: sys/dev/vnic/nicvf_queues.c
===================================================================
--- sys/dev/vnic/nicvf_queues.c
+++ sys/dev/vnic/nicvf_queues.c
@@ -731,6 +731,7 @@
static int
nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
{
+ struct epoch_tracker et;
struct mbuf *mbuf;
struct ifnet *ifp;
int processed_cqe, work_done = 0, tx_done = 0;
@@ -821,6 +822,7 @@
NICVF_CMP_UNLOCK(cq);
+ NET_EPOCH_ENTER(et);
ifp = nic->ifp;
/* Push received MBUFs to the stack */
while (!buf_ring_empty(cq->rx_br)) {
@@ -828,6 +830,7 @@
if (__predict_true(mbuf != NULL))
(*ifp->if_input)(ifp, mbuf);
}
+ NET_EPOCH_EXIT(et);
return (cmp_err);
}
Index: sys/dev/vr/if_vr.c
===================================================================
--- sys/dev/vr/if_vr.c
+++ sys/dev/vr/if_vr.c
@@ -1610,7 +1610,7 @@
VR_LOCK_ASSERT(sc);
sc->rxcycles = count;
- rx_npkts = vr_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(vr_rxeof, (sc));
vr_txeof(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
vr_start_locked(ifp);
@@ -1730,7 +1730,7 @@
return;
}
}
- vr_rxeof(sc);
+ NET_EPOCH_WRAP(vr_rxeof, (sc));
if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
#ifdef VR_SHOW_ERRORS
device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
Index: sys/dev/vte/if_vte.c
===================================================================
--- sys/dev/vte/if_vte.c
+++ sys/dev/vte/if_vte.c
@@ -1361,7 +1361,7 @@
break;
if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
MISR_RX_FIFO_FULL)) != 0)
- vte_rxeof(sc);
+ NET_EPOCH_WRAP(vte_rxeof, (sc));
if ((status & MISR_TX_DONE) != 0)
vte_txeof(sc);
if ((status & MISR_EVENT_CNT_OFLOW) != 0)
Index: sys/dev/wi/if_wi.c
===================================================================
--- sys/dev/wi/if_wi.c
+++ sys/dev/wi/if_wi.c
@@ -575,7 +575,7 @@
status = CSR_READ_2(sc, WI_EVENT_STAT);
if (status & WI_EV_RX)
- wi_rx_intr(sc);
+ NET_EPOCH_WRAP(wi_rx_intr, (sc));
if (status & WI_EV_ALLOC)
wi_tx_intr(sc);
if (status & WI_EV_TX_EXC)
Index: sys/dev/wpi/if_wpi.c
===================================================================
--- sys/dev/wpi/if_wpi.c
+++ sys/dev/wpi/if_wpi.c
@@ -2577,7 +2577,7 @@
if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) ||
(r2 & WPI_FH_INT_RX))
- wpi_notif_intr(sc);
+ NET_EPOCH_WRAP(wpi_notif_intr, (sc));
if (r1 & WPI_INT_ALIVE)
wakeup(sc); /* Firmware is alive. */
Index: sys/dev/xen/netback/netback.c
===================================================================
--- sys/dev/xen/netback/netback.c
+++ sys/dev/xen/netback/netback.c
@@ -1429,6 +1429,7 @@
static void
xnb_intr(void *arg)
{
+ struct epoch_tracker et;
struct xnb_softc *xnb;
struct ifnet *ifp;
netif_tx_back_ring_t *txb;
@@ -1439,6 +1440,7 @@
txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
mtx_lock(&xnb->tx_lock);
+ NET_EPOCH_ENTER(et);
do {
int notify;
req_prod_local = txb->sring->req_prod;
@@ -1464,6 +1466,7 @@
txb->sring->req_event = txb->req_cons + 1;
xen_mb();
} while (txb->sring->req_prod != req_prod_local) ;
+ NET_EPOCH_EXIT(et);
mtx_unlock(&xnb->tx_lock);
xnb_start(ifp);
Index: sys/dev/xen/netfront/netfront.c
===================================================================
--- sys/dev/xen/netfront/netfront.c
+++ sys/dev/xen/netfront/netfront.c
@@ -627,7 +627,7 @@
{
XN_RX_LOCK(rxq);
- xn_rxeof(rxq);
+ NET_EPOCH_WRAP(xn_rxeof, (rxq));
XN_RX_UNLOCK(rxq);
}
@@ -1722,7 +1722,7 @@
xn_alloc_rx_buffers(rxq);
rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
- xn_rxeof(rxq);
+ NET_EPOCH_WRAP(xn_rxeof, (rxq));
XN_RX_UNLOCK(rxq);
}
Index: sys/dev/xilinx/if_xae.c
===================================================================
--- sys/dev/xilinx/if_xae.c
+++ sys/dev/xilinx/if_xae.c
@@ -208,6 +208,7 @@
static int
xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
+ struct epoch_tracker et;
xdma_transfer_status_t st;
struct xae_softc *sc;
struct ifnet *ifp;
@@ -221,6 +222,7 @@
XAE_LOCK(sc);
+ NET_EPOCH_ENTER(et);
ifp = sc->ifp;
cnt_processed = 0;
@@ -245,6 +247,7 @@
}
xae_rx_enqueue(sc, cnt_processed);
+ NET_EPOCH_EXIT(et);
XAE_UNLOCK(sc);
Index: sys/dev/xl/if_xl.c
===================================================================
--- sys/dev/xl/if_xl.c
+++ sys/dev/xl/if_xl.c
@@ -1979,7 +1979,7 @@
XL_LOCK(sc);
if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
- xl_rxeof(sc);
+ NET_EPOCH_WRAP(xl_rxeof, (sc));
XL_UNLOCK(sc);
}
@@ -2150,6 +2150,7 @@
static void
xl_intr(void *arg)
{
+ struct epoch_tracker et;
struct xl_softc *sc = arg;
struct ifnet *ifp = sc->xl_ifp;
u_int16_t status;
@@ -2173,10 +2174,12 @@
break;
if (status & XL_STAT_UP_COMPLETE) {
+ NET_EPOCH_ENTER(et);
if (xl_rxeof(sc) == 0) {
while (xl_rx_resync(sc))
xl_rxeof(sc);
}
+ NET_EPOCH_EXIT(et);
}
if (status & XL_STAT_DOWN_COMPLETE) {
@@ -2235,7 +2238,7 @@
XL_LOCK_ASSERT(sc);
sc->rxcycles = count;
- rx_npkts = xl_rxeof(sc);
+ rx_npkts = NET_EPOCH_WRAP_RET(xl_rxeof, (sc));
if (sc->xl_type == XL_TYPE_905B)
xl_txeof_90xB(sc);
else
@@ -3098,6 +3101,7 @@
static int
xl_watchdog(struct xl_softc *sc)
{
+ struct epoch_tracker et;
struct ifnet *ifp = sc->xl_ifp;
u_int16_t status = 0;
int misintr;
@@ -3107,7 +3111,9 @@
if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
return (0);
+ NET_EPOCH_ENTER(et);
xl_rxeof(sc);
+ NET_EPOCH_EXIT(et);
xl_txeoc(sc);
misintr = 0;
if (sc->xl_type == XL_TYPE_905B) {
Index: sys/net/netisr.c
===================================================================
--- sys/net/netisr.c
+++ sys/net/netisr.c
@@ -920,6 +920,7 @@
static void
swi_net(void *arg)
{
+ struct epoch_tracker et;
#ifdef NETISR_LOCKING
struct rm_priotracker tracker;
#endif
@@ -931,7 +932,9 @@
#ifdef DEVICE_POLLING
KASSERT(nws_count == 1,
("%s: device_polling but nws_count != 1", __func__));
+ NET_EPOCH_ENTER(et);
netisr_poll();
+ NET_EPOCH_EXIT(et);
#endif
#ifdef NETISR_LOCKING
NETISR_RLOCK(&tracker);
@@ -940,6 +943,7 @@
KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
if (nwsp->nws_flags & NWS_DISPATCHING)
goto out;
+ NET_EPOCH_ENTER(et);
nwsp->nws_flags |= NWS_RUNNING;
nwsp->nws_flags &= ~NWS_SCHEDULED;
while ((bits = nwsp->nws_pendingbits) != 0) {
@@ -950,6 +954,7 @@
}
}
nwsp->nws_flags &= ~NWS_RUNNING;
+ NET_EPOCH_EXIT(et);
out:
NWS_UNLOCK(nwsp);
#ifdef NETISR_LOCKING
Index: sys/sys/epoch.h
===================================================================
--- sys/sys/epoch.h
+++ sys/sys/epoch.h
@@ -104,6 +104,20 @@
#define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt)
#define NET_EPOCH_CALL(f, c) epoch_call(net_epoch_preempt, (f), (c))
#define NET_EPOCH_ASSERT() MPASS(in_epoch(net_epoch_preempt))
+#define NET_EPOCH_WRAP_RET(fn, arg) ({ \
+ struct epoch_tracker __et; \
+ __typeof(fn arg) __ret; \
+ NET_EPOCH_ENTER(__et); \
+ __ret = fn arg; \
+ NET_EPOCH_EXIT(__et); \
+ __ret; \
+})
+#define NET_EPOCH_WRAP(fn, arg) do { \
+ struct epoch_tracker __et; \
+ NET_EPOCH_ENTER(__et); \
+ fn arg; \
+ NET_EPOCH_EXIT(__et); \
+} while (0)
#endif /* _KERNEL */
#endif /* _SYS_EPOCH_H_ */
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Nov 27, 9:57 PM (16 h, 43 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
26263965
Default Alt Text
D23348.id67463.diff (62 KB)
Attached To
Mode
D23348: Widen EPOCH(9) usage in network drivers (as a pre-step for D23347)
Attached
Detach File
Event Timeline
Log In to Comment