Index: sys/dev/ath/if_ath.c =================================================================== --- sys/dev/ath/if_ath.c +++ sys/dev/ath/if_ath.c @@ -760,7 +760,7 @@ taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); Index: sys/dev/ath/if_ath_rx.c =================================================================== --- sys/dev/ath/if_ath_rx.c +++ sys/dev/ath/if_ath_rx.c @@ -647,7 +647,6 @@ uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf, struct mbuf *m) { - struct epoch_tracker et; uint64_t rstamp; /* XXX TODO: make this an mbuf tag? */ struct ieee80211_rx_stats rxs; @@ -942,7 +941,6 @@ rxs.c_nf_ext[i] = nf; } - NET_EPOCH_ENTER(et); if (ni != NULL) { /* * Only punt packets for ampdu reorder processing for @@ -988,7 +986,6 @@ type = ieee80211_input_mimo_all(ic, m); m = NULL; } - NET_EPOCH_EXIT(et); /* * At this point we have passed the frame up the stack; thus Index: sys/dev/bge/if_bge.c =================================================================== --- sys/dev/bge/if_bge.c +++ sys/dev/bge/if_bge.c @@ -3351,7 +3351,7 @@ sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); - TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); + NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); @@ -4646,7 +4646,6 @@ static void bge_intr_task(void *arg, int pending) { - struct epoch_tracker et; struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; @@ -4689,9 +4688,7 @@ sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); - NET_EPOCH_ENTER(et); bge_rxeof(sc, rx_prod, 0); - NET_EPOCH_EXIT(et); BGE_LOCK(sc); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { Index: sys/dev/ena/ena.c =================================================================== --- sys/dev/ena/ena.c +++ sys/dev/ena/ena.c @@ -1353,7 +1353,7 @@ for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; - TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); + NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); queue->cleanup_tq = taskqueue_create_fast("ena cleanup", M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); Index: sys/dev/re/if_re.c =================================================================== --- sys/dev/re/if_re.c +++ sys/dev/re/if_re.c @@ -1656,7 +1656,7 @@ ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); + NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 @@ -2576,7 +2576,6 @@ static void re_int_task(void *arg, int npending) { - struct epoch_tracker et; struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; @@ -2603,11 +2602,8 @@ } #endif - if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) { - NET_EPOCH_ENTER(et); + if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); - NET_EPOCH_EXIT(et); - } /* * Some chips will ignore a second TX request issued Index: sys/dev/virtio/network/if_vtnet.c =================================================================== --- sys/dev/virtio/network/if_vtnet.c +++ sys/dev/virtio/network/if_vtnet.c @@ -717,7 +717,7 @@ if (rxq->vtnrx_sg == NULL) return (ENOMEM); - TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); + NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); Index: sys/kern/subr_gtaskqueue.c =================================================================== --- sys/kern/subr_gtaskqueue.c +++ sys/kern/subr_gtaskqueue.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -343,13 +344,16 @@ static void gtaskqueue_run_locked(struct gtaskqueue *queue) { + struct epoch_tracker et; struct gtaskqueue_busy tb; struct gtask *gtask; + bool in_net_epoch; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); + in_net_epoch = false; while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); @@ -359,11 +363,17 @@ TQ_UNLOCK(queue); KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL")); + if (!in_net_epoch && TASK_IS_NET(gtask)) + NET_EPOCH_ENTER(et); + else if (in_net_epoch && !TASK_IS_NET(gtask)) + NET_EPOCH_EXIT(et); gtask->ta_func(gtask->ta_context); TQ_LOCK(queue); wakeup(gtask); } + if (in_net_epoch) + NET_EPOCH_EXIT(et); LIST_REMOVE(&tb, tb_link); } Index: sys/kern/subr_taskqueue.c =================================================================== --- sys/kern/subr_taskqueue.c +++ sys/kern/subr_taskqueue.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -371,7 +372,7 @@ * anyway) so just insert it at tail while we have the * queue lock. */ - TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier); + TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier); STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); queue->tq_hint = &t_barrier; t_barrier.ta_pending = 1; @@ -442,14 +443,17 @@ static void taskqueue_run_locked(struct taskqueue *queue) { + struct epoch_tracker et; struct taskqueue_busy tb; struct task *task; + bool in_net_epoch; int pending; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); + in_net_epoch = false; while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); @@ -462,11 +466,17 @@ TQ_UNLOCK(queue); KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); + if (!in_net_epoch && TASK_IS_NET(task)) + NET_EPOCH_ENTER(et); + else if (in_net_epoch && !TASK_IS_NET(task)) + NET_EPOCH_EXIT(et); task->ta_func(task->ta_context, pending); TQ_LOCK(queue); wakeup(task); } + if (in_net_epoch) + NET_EPOCH_EXIT(et); LIST_REMOVE(&tb, tb_link); } Index: sys/net/iflib.c =================================================================== --- sys/net/iflib.c +++ sys/net/iflib.c @@ -3781,7 +3781,6 @@ static void _task_fn_rx(void *context) { - struct epoch_tracker et; iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; @@ -3805,7 +3804,6 @@ budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ - NET_EPOCH_ENTER(et); if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); @@ -3813,7 +3811,6 @@ IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } - NET_EPOCH_EXIT(et); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) @@ -5971,7 +5968,7 @@ tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; @@ -5980,7 +5977,7 @@ tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; Index: sys/sys/_task.h =================================================================== --- sys/sys/_task.h +++ sys/sys/_task.h @@ -48,11 +48,18 @@ struct task { STAILQ_ENTRY(task) ta_link; /* (q) link for queue */ uint16_t ta_pending; /* (q) count times queued */ - u_short ta_priority; /* (c) Priority */ + uint8_t ta_priority; /* (c) Priority */ + uint8_t ta_flags; /* (c) Flags */ task_fn_t *ta_func; /* (c) task handler */ void *ta_context; /* (c) argument for handler */ }; +#define TASK_ENQUEUED 0x1 +#define TASK_NOENQUEUE 0x2 +#define TASK_NETWORK 0x4 + +#define TASK_IS_NET(ta) ((ta)->ta_flags & TASK_NETWORK) + #ifdef _KERNEL typedef void gtask_fn_t(void *context); Index: sys/sys/epoch.h =================================================================== --- sys/sys/epoch.h +++ sys/sys/epoch.h @@ -104,6 +104,9 @@ #define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt) #define NET_EPOCH_CALL(f, c) epoch_call(net_epoch_preempt, (f), (c)) #define NET_EPOCH_ASSERT() MPASS(in_epoch(net_epoch_preempt)) +#define NET_TASK_INIT(t, p, f, c) TASK_INIT_FLAGS(t, p, f, c, TASK_NETWORK) +#define NET_GROUPTASK_INIT(gtask, prio, func, ctx) \ + GTASK_INIT(&(gtask)->gt_task, TASK_NETWORK, (prio), (func), (ctx)) #endif /* _KERNEL */ #endif /* _SYS_EPOCH_H_ */ Index: sys/sys/gtaskqueue.h =================================================================== --- sys/sys/gtaskqueue.h +++ sys/sys/gtaskqueue.h @@ -84,10 +84,6 @@ gtask_fn_t *fn, const char *name); void taskqgroup_config_gtask_deinit(struct grouptask *gtask); -#define TASK_ENQUEUED 0x1 -#define TASK_SKIP_WAKEUP 0x2 -#define TASK_NOENQUEUE 0x4 - #define GTASK_INIT(gtask, flags, priority, func, context) do { \ (gtask)->ta_flags = flags; \ (gtask)->ta_priority = (priority); \ @@ -96,7 +92,7 @@ } while (0) #define GROUPTASK_INIT(gtask, priority, func, context) \ - GTASK_INIT(&(gtask)->gt_task, TASK_SKIP_WAKEUP, priority, func, context) + GTASK_INIT(&(gtask)->gt_task, 0, priority, func, context) #define GROUPTASK_ENQUEUE(gtask) \ grouptaskqueue_enqueue((gtask)->gt_taskqueue, &(gtask)->gt_task) Index: sys/sys/taskqueue.h =================================================================== --- sys/sys/taskqueue.h +++ sys/sys/taskqueue.h @@ -107,8 +107,7 @@ taskqueue_callback_fn callback, void *context); #define TASK_INITIALIZER(priority, func, context) \ - { .ta_pending = 0, \ - .ta_priority = (priority), \ + { .ta_priority = (priority), \ .ta_func = (func), \ .ta_context = (context) } @@ -121,18 +120,25 @@ /* * Initialise a task structure. */ -#define TASK_INIT(task, priority, func, context) do { \ - (task)->ta_pending = 0; \ - (task)->ta_priority = (priority); \ - (task)->ta_func = (func); \ - (task)->ta_context = (context); \ +#define TASK_INIT_FLAGS(task, priority, func, context, flags) do { \ + MPASS((priority) >= 0 && (priority) <= 255); \ + (task)->ta_pending = 0; \ + (task)->ta_priority = (priority); \ + (task)->ta_flags = (flags); \ + (task)->ta_func = (func); \ + (task)->ta_context = (context); \ } while (0) +#define TASK_INIT(t, p, f, c) TASK_INIT_FLAGS(t, p, f, c, 0) + void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context); -#define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) \ - _timeout_task_init(queue, timeout_task, priority, func, context); +#define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) do { \ + _Static_assert((priority) >= 0 && (priority) <= 255, \ + "struct task priority is 8 bit in size"); \ + _timeout_task_init(queue, timeout_task, priority, func, context); \ +} while (0) /* * Declare a reference to a taskqueue.