Index: UPDATING =================================================================== --- UPDATING +++ UPDATING @@ -26,6 +26,10 @@ disable the most expensive debugging functionality run "ln -s 'abort:false,junk:false' /etc/malloc.conf".) +20200XXX: + Size of taskqueue(9)'s struct task has changed. All kernel + modules using taskqueue(9) need recompilation. + 20200108: Clang/LLVM is now the default compiler and LLD the default linker for riscv64. Index: sys/dev/al_eth/al_eth.c =================================================================== --- sys/dev/al_eth/al_eth.c +++ sys/dev/al_eth/al_eth.c @@ -2512,7 +2512,7 @@ return (ENOMEM); /* Allocate taskqueues */ - TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); + NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, taskqueue_thread_enqueue, &rx_ring->enqueue_tq); taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", Index: sys/dev/alc/if_alc.c =================================================================== --- sys/dev/alc/if_alc.c +++ sys/dev/alc/if_alc.c @@ -1387,7 +1387,7 @@ mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0); - TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); + NET_TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); sc->alc_ident = alc_find_ident(dev); /* Map the device. */ Index: sys/dev/ale/if_ale.c =================================================================== --- sys/dev/ale/if_ale.c +++ sys/dev/ale/if_ale.c @@ -467,7 +467,7 @@ mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0); - TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc); + NET_TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc); /* Map the device. */ pci_enable_busmaster(dev); Index: sys/dev/ath/if_ath.c =================================================================== --- sys/dev/ath/if_ath.c +++ sys/dev/ath/if_ath.c @@ -760,7 +760,7 @@ taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); Index: sys/dev/ath/if_ath_rx.c =================================================================== --- sys/dev/ath/if_ath_rx.c +++ sys/dev/ath/if_ath_rx.c @@ -647,7 +647,6 @@ uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf, struct mbuf *m) { - struct epoch_tracker et; uint64_t rstamp; /* XXX TODO: make this an mbuf tag? */ struct ieee80211_rx_stats rxs; @@ -942,7 +941,6 @@ rxs.c_nf_ext[i] = nf; } - NET_EPOCH_ENTER(et); if (ni != NULL) { /* * Only punt packets for ampdu reorder processing for @@ -988,7 +986,6 @@ type = ieee80211_input_mimo_all(ic, m); m = NULL; } - NET_EPOCH_EXIT(et); /* * At this point we have passed the frame up the stack; thus Index: sys/dev/bge/if_bge.c =================================================================== --- sys/dev/bge/if_bge.c +++ sys/dev/bge/if_bge.c @@ -3306,7 +3306,7 @@ sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); - TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); + NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); @@ -4601,7 +4601,6 @@ static void bge_intr_task(void *arg, int pending) { - struct epoch_tracker et; struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; @@ -4644,9 +4643,7 @@ sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); - NET_EPOCH_ENTER(et); bge_rxeof(sc, rx_prod, 0); - NET_EPOCH_EXIT(et); BGE_LOCK(sc); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { Index: sys/dev/bwn/if_bwn.c =================================================================== --- sys/dev/bwn/if_bwn.c +++ sys/dev/bwn/if_bwn.c @@ -621,7 +621,7 @@ mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); - TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); + NET_TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); Index: sys/dev/bxe/bxe.c =================================================================== --- sys/dev/bxe/bxe.c +++ sys/dev/bxe/bxe.c @@ -9241,7 +9241,7 @@ fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); - TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); + NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, Index: sys/dev/cas/if_cas.c =================================================================== --- sys/dev/cas/if_cas.c +++ sys/dev/cas/if_cas.c @@ -208,7 +208,7 @@ callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); /* Create local taskq. */ - TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); + NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->sc_tq); @@ -1608,11 +1608,14 @@ static void cas_rint_timeout(void *arg) { + struct epoch_tracker et; struct cas_softc *sc = arg; CAS_LOCK_ASSERT(sc, MA_OWNED); + NET_EPOCH_ENTER(et); cas_rint(sc); + NET_EPOCH_EXIT(et); } static void Index: sys/dev/ena/ena.c =================================================================== --- sys/dev/ena/ena.c +++ sys/dev/ena/ena.c @@ -1353,7 +1353,7 @@ for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; - TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); + NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); queue->cleanup_tq = taskqueue_create_fast("ena cleanup", M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); Index: sys/dev/malo/if_malo.c =================================================================== --- sys/dev/malo/if_malo.c +++ sys/dev/malo/if_malo.c @@ -253,7 +253,7 @@ taskqueue_start_threads(&sc->malo_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->malo_dev)); - TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); + NET_TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc); ic->ic_softc = sc; Index: sys/dev/mwl/if_mwl.c =================================================================== --- sys/dev/mwl/if_mwl.c +++ sys/dev/mwl/if_mwl.c @@ -360,7 +360,7 @@ taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc); TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc); TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc); Index: sys/dev/netmap/if_ptnet.c =================================================================== --- sys/dev/netmap/if_ptnet.c +++ sys/dev/netmap/if_ptnet.c @@ -695,11 +695,12 @@ cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; - static void (*handler)(void *context, int pending); - handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; + if (i < sc->num_tx_rings) + TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); + else + NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); - TASK_INIT(&pq->task, 0, handler, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", Index: sys/dev/nfe/if_nfe.c =================================================================== --- sys/dev/nfe/if_nfe.c +++ sys/dev/nfe/if_nfe.c @@ -654,7 +654,7 @@ } ether_ifattach(ifp, sc->eaddr); - TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); + NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->nfe_tq); taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", Index: sys/dev/qlxgbe/ql_os.c =================================================================== --- sys/dev/qlxgbe/ql_os.c +++ sys/dev/qlxgbe/ql_os.c @@ -1543,7 +1543,7 @@ bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); - TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); + NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, taskqueue_thread_enqueue, Index: sys/dev/re/if_re.c =================================================================== --- sys/dev/re/if_re.c +++ sys/dev/re/if_re.c @@ -1656,7 +1656,7 @@ ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); + NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 @@ -2576,7 +2576,6 @@ static void re_int_task(void *arg, int npending) { - struct epoch_tracker et; struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; @@ -2603,11 +2602,8 @@ } #endif - if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) { - NET_EPOCH_ENTER(et); + if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); - NET_EPOCH_EXIT(et); - } /* * Some chips will ignore a second TX request issued Index: sys/dev/rt/if_rt.c =================================================================== --- sys/dev/rt/if_rt.c +++ sys/dev/rt/if_rt.c @@ -552,7 +552,7 @@ ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM; /* init task queue */ - TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); + NET_TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc); TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc); Index: sys/dev/smc/if_smc.c =================================================================== --- sys/dev/smc/if_smc.c +++ sys/dev/smc/if_smc.c @@ -395,7 +395,7 @@ /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); - TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); + NET_TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); Index: sys/dev/virtio/network/if_vtnet.c =================================================================== --- sys/dev/virtio/network/if_vtnet.c +++ sys/dev/virtio/network/if_vtnet.c @@ -717,7 +717,7 @@ if (rxq->vtnrx_sg == NULL) return (ENOMEM); - TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); + NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); Index: sys/dev/vnic/nicvf_queues.c =================================================================== --- sys/dev/vnic/nicvf_queues.c +++ sys/dev/vnic/nicvf_queues.c @@ -931,7 +931,7 @@ &cq->mtx); /* Allocate taskqueue */ - TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); + NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, taskqueue_thread_enqueue, &cq->cmp_taskq); taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", @@ -1577,7 +1577,7 @@ } /* Allocate QS error taskqueue */ - TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); + NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, taskqueue_thread_enqueue, &qs->qs_err_taskq); taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", Index: sys/dev/vr/if_vr.c =================================================================== --- sys/dev/vr/if_vr.c +++ sys/dev/vr/if_vr.c @@ -676,7 +676,7 @@ ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); + NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); /* Configure Tx FIFO threshold. */ sc->vr_txthresh = VR_TXTHRESH_MIN; Index: sys/dev/wtap/if_wtap.c =================================================================== --- sys/dev/wtap/if_wtap.c +++ sys/dev/wtap/if_wtap.c @@ -637,7 +637,7 @@ sc->sc_tq = taskqueue_create("wtap_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ", sc->name); - TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); ic->ic_softc = sc; ic->ic_name = sc->name; Index: sys/dev/xl/if_xl.c =================================================================== --- sys/dev/xl/if_xl.c +++ sys/dev/xl/if_xl.c @@ -1218,7 +1218,7 @@ } callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0); - TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); + NET_TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk Index: sys/kern/subr_gtaskqueue.c =================================================================== --- sys/kern/subr_gtaskqueue.c +++ sys/kern/subr_gtaskqueue.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -342,13 +343,16 @@ static void gtaskqueue_run_locked(struct gtaskqueue *queue) { + struct epoch_tracker et; struct gtaskqueue_busy tb; struct gtask *gtask; + epoch_t epoch; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); + epoch = NULL; while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); @@ -358,11 +362,23 @@ TQ_UNLOCK(queue); KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL")); + if (gtask->ta_epoch != epoch) { + if (epoch != NULL) + epoch_exit_preempt(epoch, &et); + if ( +#ifndef BURN_BRIDGES /* Remove by 1st of April 2020. */ + gtask->ta_epoch == net_epoch_preempt && +#endif + (epoch = gtask->ta_epoch) != NULL) + epoch_enter_preempt(epoch, &et); + } gtask->ta_func(gtask->ta_context); TQ_LOCK(queue); wakeup(gtask); } + if (epoch != NULL) + epoch_exit_preempt(epoch, &et); LIST_REMOVE(&tb, tb_link); } Index: sys/kern/subr_taskqueue.c =================================================================== --- sys/kern/subr_taskqueue.c +++ sys/kern/subr_taskqueue.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -442,14 +443,17 @@ static void taskqueue_run_locked(struct taskqueue *queue) { + struct epoch_tracker et; struct taskqueue_busy tb; struct task *task; + epoch_t epoch; int pending; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); + epoch = NULL; while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); @@ -462,11 +466,23 @@ TQ_UNLOCK(queue); KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); + if (task->ta_epoch != epoch) { + if (epoch != NULL) + epoch_exit_preempt(epoch, &et); + if ( +#ifndef BURN_BRIDGES /* Remove by 1st of April 2020. */ + task->ta_epoch == net_epoch_preempt && +#endif + (epoch = task->ta_epoch) != NULL) + epoch_enter_preempt(epoch, &et); + } task->ta_func(task->ta_context, pending); TQ_LOCK(queue); wakeup(task); } + if (epoch != NULL) + epoch_exit_preempt(epoch, &et); LIST_REMOVE(&tb, tb_link); } Index: sys/net/iflib.c =================================================================== --- sys/net/iflib.c +++ sys/net/iflib.c @@ -3781,7 +3781,6 @@ static void _task_fn_rx(void *context) { - struct epoch_tracker et; iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; @@ -3805,7 +3804,6 @@ budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ - NET_EPOCH_ENTER(et); if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); @@ -3813,7 +3811,6 @@ IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } - NET_EPOCH_EXIT(et); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) @@ -5971,7 +5968,7 @@ tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; @@ -5980,7 +5977,7 @@ tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; Index: sys/sys/_task.h =================================================================== --- sys/sys/_task.h +++ sys/sys/_task.h @@ -33,6 +33,8 @@ #include +struct epoch; + /* * Each task includes a function which is called from * taskqueue_run(). The first argument is taken from the 'ta_context' @@ -51,6 +53,7 @@ u_short ta_priority; /* (c) Priority */ task_fn_t *ta_func; /* (c) task handler */ void *ta_context; /* (c) argument for handler */ + struct epoch *ta_epoch; /* (c) epoch to execute in */ }; #ifdef _KERNEL @@ -63,6 +66,7 @@ u_short ta_priority; /* (c) Priority */ gtask_fn_t *ta_func; /* (c) task handler */ void *ta_context; /* (c) argument for handler */ + struct epoch *ta_epoch; /* (c) epoch to execute in */ }; #endif /* _KERNEL */ Index: sys/sys/epoch.h =================================================================== --- sys/sys/epoch.h +++ sys/sys/epoch.h @@ -104,6 +104,10 @@ #define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt) #define NET_EPOCH_CALL(f, c) epoch_call(net_epoch_preempt, (f), (c)) #define NET_EPOCH_ASSERT() MPASS(in_epoch(net_epoch_preempt)) +#define NET_TASK_INIT(t, p, f, c) TASK_INIT2(t, p, f, c, net_epoch_preempt) +#define NET_GROUPTASK_INIT(gtask, prio, func, ctx) \ + GTASK_INIT2(&(gtask)->gt_task, TASK_SKIP_WAKEUP, (prio), \ + (func), (ctx), net_epoch_preempt) #endif /* _KERNEL */ #endif /* _SYS_EPOCH_H_ */ Index: sys/sys/gtaskqueue.h =================================================================== --- sys/sys/gtaskqueue.h +++ sys/sys/gtaskqueue.h @@ -88,13 +88,16 @@ #define TASK_SKIP_WAKEUP 0x2 #define TASK_NOENQUEUE 0x4 -#define GTASK_INIT(gtask, flags, priority, func, context) do { \ - (gtask)->ta_flags = flags; \ - (gtask)->ta_priority = (priority); \ - (gtask)->ta_func = (func); \ - (gtask)->ta_context = (context); \ +#define GTASK_INIT2(gtask, flags, priority, func, context, epoch ) do { \ + (gtask)->ta_flags = flags; \ + (gtask)->ta_priority = (priority); \ + (gtask)->ta_func = (func); \ + (gtask)->ta_context = (context); \ + (gtask)->ta_epoch = (epoch); \ } while (0) +#define GTASK_INIT(t, fl, p, fu, c) GTASK_INIT2(t, fl, p, fu, c, NULL) + #define GROUPTASK_INIT(gtask, priority, func, context) \ GTASK_INIT(&(gtask)->gt_task, TASK_SKIP_WAKEUP, priority, func, context) Index: sys/sys/param.h =================================================================== --- sys/sys/param.h +++ sys/sys/param.h @@ -60,7 +60,7 @@ * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1300077 /* Master, propagated to newvers */ +#define __FreeBSD_version 1300078 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, Index: sys/sys/taskqueue.h =================================================================== --- sys/sys/taskqueue.h +++ sys/sys/taskqueue.h @@ -121,13 +121,16 @@ /* * Initialise a task structure. */ -#define TASK_INIT(task, priority, func, context) do { \ - (task)->ta_pending = 0; \ - (task)->ta_priority = (priority); \ - (task)->ta_func = (func); \ - (task)->ta_context = (context); \ +#define TASK_INIT2(task, priority, func, context, epoch) do { \ + (task)->ta_pending = 0; \ + (task)->ta_priority = (priority); \ + (task)->ta_func = (func); \ + (task)->ta_context = (context); \ + (task)->ta_epoch = (epoch); \ } while (0) +#define TASK_INIT(t, p, f, c) TASK_INIT2(t, p, f, c, NULL) + void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context);