Index: head/sys/kern/subr_gtaskqueue.c =================================================================== --- head/sys/kern/subr_gtaskqueue.c (revision 357770) +++ head/sys/kern/subr_gtaskqueue.c (revision 357771) @@ -1,1035 +1,1048 @@ /*- * Copyright (c) 2000 Doug Rabson * Copyright (c) 2014 Jeff Roberson * Copyright (c) 2016 Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues"); static void gtaskqueue_thread_enqueue(void *); static void gtaskqueue_thread_loop(void *arg); static int task_is_running(struct gtaskqueue *queue, struct gtask *gtask); static void gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask); TASKQGROUP_DEFINE(softirq, mp_ncpus, 1); TASKQGROUP_DEFINE(config, 1, 1); struct gtaskqueue_busy { struct gtask *tb_running; u_int tb_seq; LIST_ENTRY(gtaskqueue_busy) tb_link; }; typedef void (*gtaskqueue_enqueue_fn)(void *context); struct gtaskqueue { STAILQ_HEAD(, gtask) tq_queue; LIST_HEAD(, gtaskqueue_busy) tq_active; u_int tq_seq; int tq_callouts; struct mtx_padalign tq_mutex; gtaskqueue_enqueue_fn tq_enqueue; void *tq_context; char *tq_name; struct thread **tq_threads; int tq_tcount; int tq_spin; int tq_flags; taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; }; #define TQ_FLAGS_ACTIVE (1 << 0) #define TQ_FLAGS_BLOCKED (1 << 1) #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) #define DT_CALLOUT_ARMED (1 << 0) #define TQ_LOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_lock_spin(&(tq)->tq_mutex); \ else \ mtx_lock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) #define TQ_UNLOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_unlock_spin(&(tq)->tq_mutex); \ else \ mtx_unlock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) #ifdef INVARIANTS static void gtask_dump(struct gtask *gtask) { printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n", gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context); } #endif static __inline int TQ_SLEEP(struct gtaskqueue *tq, void *p, const char *wm) { if (tq->tq_spin) return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0)); return (msleep(p, &tq->tq_mutex, 0, wm, 0)); } static struct gtaskqueue * _gtaskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context, int mtxflags, const char *mtxname __unused) { struct gtaskqueue *queue; char *tq_name; tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO); if (!tq_name) return (NULL); snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO); if (!queue) { free(tq_name, M_GTASKQUEUE); return (NULL); } STAILQ_INIT(&queue->tq_queue); LIST_INIT(&queue->tq_active); queue->tq_enqueue = enqueue; queue->tq_context = context; queue->tq_name = tq_name; queue->tq_spin = (mtxflags & MTX_SPIN) != 0; queue->tq_flags |= TQ_FLAGS_ACTIVE; if (enqueue == gtaskqueue_thread_enqueue) queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); return (queue); } /* * Signal a taskqueue thread to terminate. */ static void gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq) { while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { wakeup(tq); TQ_SLEEP(tq, pp, "gtq_destroy"); } } static void gtaskqueue_free(struct gtaskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_ACTIVE; gtaskqueue_terminate(queue->tq_threads, queue); KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); mtx_destroy(&queue->tq_mutex); free(queue->tq_threads, M_GTASKQUEUE); free(queue->tq_name, M_GTASKQUEUE); free(queue, M_GTASKQUEUE); } /* * Wait for all to complete, then prevent it from being enqueued */ void grouptask_block(struct grouptask *grouptask) { struct gtaskqueue *queue = grouptask->gt_taskqueue; struct gtask *gtask = &grouptask->gt_task; #ifdef INVARIANTS if (queue == NULL) { gtask_dump(gtask); panic("queue == NULL"); } #endif TQ_LOCK(queue); gtask->ta_flags |= TASK_NOENQUEUE; gtaskqueue_drain_locked(queue, gtask); TQ_UNLOCK(queue); } void grouptask_unblock(struct grouptask *grouptask) { struct gtaskqueue *queue = grouptask->gt_taskqueue; struct gtask *gtask = &grouptask->gt_task; #ifdef INVARIANTS if (queue == NULL) { gtask_dump(gtask); panic("queue == NULL"); } #endif TQ_LOCK(queue); gtask->ta_flags &= ~TASK_NOENQUEUE; TQ_UNLOCK(queue); } int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask) { #ifdef INVARIANTS if (queue == NULL) { gtask_dump(gtask); panic("queue == NULL"); } #endif TQ_LOCK(queue); if (gtask->ta_flags & TASK_ENQUEUED) { TQ_UNLOCK(queue); return (0); } if (gtask->ta_flags & TASK_NOENQUEUE) { TQ_UNLOCK(queue); return (EAGAIN); } STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link); gtask->ta_flags |= TASK_ENQUEUED; TQ_UNLOCK(queue); if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) queue->tq_enqueue(queue->tq_context); return (0); } static void gtaskqueue_task_nop_fn(void *context) { } /* * Block until all currently queued tasks in this taskqueue * have begun execution. Tasks queued during execution of * this function are ignored. */ static void gtaskqueue_drain_tq_queue(struct gtaskqueue *queue) { struct gtask t_barrier; if (STAILQ_EMPTY(&queue->tq_queue)) return; /* * Enqueue our barrier after all current tasks, but with * the highest priority so that newly queued tasks cannot * pass it. Because of the high priority, we can not use * taskqueue_enqueue_locked directly (which drops the lock * anyway) so just insert it at tail while we have the * queue lock. */ GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier); STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); t_barrier.ta_flags |= TASK_ENQUEUED; /* * Once the barrier has executed, all previously queued tasks * have completed or are currently executing. */ while (t_barrier.ta_flags & TASK_ENQUEUED) TQ_SLEEP(queue, &t_barrier, "gtq_qdrain"); } /* * Block until all currently executing tasks for this taskqueue * complete. Tasks that begin execution during the execution * of this function are ignored. */ static void gtaskqueue_drain_tq_active(struct gtaskqueue *queue) { struct gtaskqueue_busy *tb; u_int seq; if (LIST_EMPTY(&queue->tq_active)) return; /* Block taskq_terminate().*/ queue->tq_callouts++; /* Wait for any active task with sequence from the past. */ seq = queue->tq_seq; restart: LIST_FOREACH(tb, &queue->tq_active, tb_link) { if ((int)(tb->tb_seq - seq) <= 0) { TQ_SLEEP(queue, tb->tb_running, "gtq_adrain"); goto restart; } } /* Release taskqueue_terminate(). */ queue->tq_callouts--; if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) wakeup_one(queue->tq_threads); } void gtaskqueue_block(struct gtaskqueue *queue) { TQ_LOCK(queue); queue->tq_flags |= TQ_FLAGS_BLOCKED; TQ_UNLOCK(queue); } void gtaskqueue_unblock(struct gtaskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_BLOCKED; if (!STAILQ_EMPTY(&queue->tq_queue)) queue->tq_enqueue(queue->tq_context); TQ_UNLOCK(queue); } static void gtaskqueue_run_locked(struct gtaskqueue *queue) { + struct epoch_tracker et; struct gtaskqueue_busy tb; struct gtask *gtask; + bool in_net_epoch; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); + in_net_epoch = false; while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); gtask->ta_flags &= ~TASK_ENQUEUED; tb.tb_running = gtask; tb.tb_seq = ++queue->tq_seq; TQ_UNLOCK(queue); KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL")); + if (!in_net_epoch && TASK_IS_NET(gtask)) { + in_net_epoch = true; + NET_EPOCH_ENTER(et); + } else if (in_net_epoch && !TASK_IS_NET(gtask)) { + NET_EPOCH_EXIT(et); + in_net_epoch = false; + } gtask->ta_func(gtask->ta_context); TQ_LOCK(queue); wakeup(gtask); } + if (in_net_epoch) + NET_EPOCH_EXIT(et); LIST_REMOVE(&tb, tb_link); } static int task_is_running(struct gtaskqueue *queue, struct gtask *gtask) { struct gtaskqueue_busy *tb; TQ_ASSERT_LOCKED(queue); LIST_FOREACH(tb, &queue->tq_active, tb_link) { if (tb->tb_running == gtask) return (1); } return (0); } static int gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask) { if (gtask->ta_flags & TASK_ENQUEUED) STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link); gtask->ta_flags &= ~TASK_ENQUEUED; return (task_is_running(queue, gtask) ? EBUSY : 0); } int gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask) { int error; TQ_LOCK(queue); error = gtaskqueue_cancel_locked(queue, gtask); TQ_UNLOCK(queue); return (error); } static void gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask) { while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask)) TQ_SLEEP(queue, gtask, "gtq_drain"); } void gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); gtaskqueue_drain_locked(queue, gtask); TQ_UNLOCK(queue); } void gtaskqueue_drain_all(struct gtaskqueue *queue) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); gtaskqueue_drain_tq_queue(queue); gtaskqueue_drain_tq_active(queue); TQ_UNLOCK(queue); } static int _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, va_list ap) { char ktname[MAXCOMLEN + 1]; struct thread *td; struct gtaskqueue *tq; int i, error; if (count <= 0) return (EINVAL); vsnprintf(ktname, sizeof(ktname), name, ap); tq = *tqp; tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE, M_NOWAIT | M_ZERO); if (tq->tq_threads == NULL) { printf("%s: no memory for %s threads\n", __func__, ktname); return (ENOMEM); } for (i = 0; i < count; i++) { if (count == 1) error = kthread_add(gtaskqueue_thread_loop, tqp, NULL, &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); else error = kthread_add(gtaskqueue_thread_loop, tqp, NULL, &tq->tq_threads[i], RFSTOPPED, 0, "%s_%d", ktname, i); if (error) { /* should be ok to continue, taskqueue_free will dtrt */ printf("%s: kthread_add(%s): error %d", __func__, ktname, error); tq->tq_threads[i] = NULL; /* paranoid */ } else tq->tq_tcount++; } for (i = 0; i < count; i++) { if (tq->tq_threads[i] == NULL) continue; td = tq->tq_threads[i]; if (mask) { error = cpuset_setthread(td->td_tid, mask); /* * Failing to pin is rarely an actual fatal error; * it'll just affect performance. */ if (error) printf("%s: curthread=%llu: can't pin; " "error=%d\n", __func__, (unsigned long long) td->td_tid, error); } thread_lock(td); sched_prio(td, pri); sched_add(td, SRQ_BORING); } return (0); } static int gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap); va_end(ap); return (error); } static inline void gtaskqueue_run_callback(struct gtaskqueue *tq, enum taskqueue_callback_type cb_type) { taskqueue_callback_fn tq_callback; TQ_ASSERT_UNLOCKED(tq); tq_callback = tq->tq_callbacks[cb_type]; if (tq_callback != NULL) tq_callback(tq->tq_cb_contexts[cb_type]); } static void gtaskqueue_thread_loop(void *arg) { struct gtaskqueue **tqp, *tq; tqp = arg; tq = *tqp; gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); TQ_LOCK(tq); while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { /* XXX ? */ gtaskqueue_run_locked(tq); /* * Because taskqueue_run() can drop tq_mutex, we need to * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the * meantime, which means we missed a wakeup. */ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) break; TQ_SLEEP(tq, tq, "-"); } gtaskqueue_run_locked(tq); /* * This thread is on its way out, so just drop the lock temporarily * in order to call the shutdown callback. This allows the callback * to look at the taskqueue, even just before it dies. */ TQ_UNLOCK(tq); gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); TQ_LOCK(tq); /* rendezvous with thread that asked us to terminate */ tq->tq_tcount--; wakeup_one(tq->tq_threads); TQ_UNLOCK(tq); kthread_exit(); } static void gtaskqueue_thread_enqueue(void *context) { struct gtaskqueue **tqp, *tq; tqp = context; tq = *tqp; wakeup_any(tq); } static struct gtaskqueue * gtaskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _gtaskqueue_create(name, mflags, enqueue, context, MTX_SPIN, "fast_taskqueue"); } struct taskqgroup_cpu { LIST_HEAD(, grouptask) tgc_tasks; struct gtaskqueue *tgc_taskq; int tgc_cnt; int tgc_cpu; }; struct taskqgroup { struct taskqgroup_cpu tqg_queue[MAXCPU]; struct mtx tqg_lock; const char * tqg_name; int tqg_adjusting; int tqg_stride; int tqg_cnt; }; struct taskq_bind_task { struct gtask bt_task; int bt_cpuid; }; static void taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu) { struct taskqgroup_cpu *qcpu; qcpu = &qgroup->tqg_queue[idx]; LIST_INIT(&qcpu->tgc_tasks); qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK, taskqueue_thread_enqueue, &qcpu->tgc_taskq); gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT, "%s_%d", qgroup->tqg_name, idx); qcpu->tgc_cpu = cpu; } static void taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx) { gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq); } /* * Find the taskq with least # of tasks that doesn't currently have any * other queues from the uniq identifier. */ static int taskqgroup_find(struct taskqgroup *qgroup, void *uniq) { struct grouptask *n; int i, idx, mincnt; int strict; mtx_assert(&qgroup->tqg_lock, MA_OWNED); if (qgroup->tqg_cnt == 0) return (0); idx = -1; mincnt = INT_MAX; /* * Two passes; First scan for a queue with the least tasks that * does not already service this uniq id. If that fails simply find * the queue with the least total tasks; */ for (strict = 1; mincnt == INT_MAX; strict = 0) { for (i = 0; i < qgroup->tqg_cnt; i++) { if (qgroup->tqg_queue[i].tgc_cnt > mincnt) continue; if (strict) { LIST_FOREACH(n, &qgroup->tqg_queue[i].tgc_tasks, gt_list) if (n->gt_uniq == uniq) break; if (n != NULL) continue; } mincnt = qgroup->tqg_queue[i].tgc_cnt; idx = i; } } if (idx == -1) panic("%s: failed to pick a qid.", __func__); return (idx); } /* * smp_started is unusable since it is not set for UP kernels or even for * SMP kernels when there is 1 CPU. This is usually handled by adding a * (mp_ncpus == 1) test, but that would be broken here since we need to * to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP. * * So maintain our own flag. It must be set after all CPUs are started * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed * adjustment is properly delayed. SI_ORDER_FOURTH is clearly before * SI_ORDER_ANY and unclearly after the CPUs are started. It would be * simpler for adjustment to pass a flag indicating if it is delayed. */ static int tqg_smp_started; static void tqg_record_smp_started(void *arg) { tqg_smp_started = 1; } SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH, tqg_record_smp_started, NULL); void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask, void *uniq, device_t dev, struct resource *irq, const char *name) { int cpu, qid, error; gtask->gt_uniq = uniq; snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); gtask->gt_dev = dev; gtask->gt_irq = irq; gtask->gt_cpu = -1; mtx_lock(&qgroup->tqg_lock); qid = taskqgroup_find(qgroup, uniq); qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; if (dev != NULL && irq != NULL && tqg_smp_started) { cpu = qgroup->tqg_queue[qid].tgc_cpu; gtask->gt_cpu = cpu; mtx_unlock(&qgroup->tqg_lock); error = bus_bind_intr(dev, irq, cpu); if (error) printf("%s: binding interrupt failed for %s: %d\n", __func__, gtask->gt_name, error); } else mtx_unlock(&qgroup->tqg_lock); } static void taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) { int qid, cpu, error; mtx_lock(&qgroup->tqg_lock); qid = taskqgroup_find(qgroup, gtask->gt_uniq); cpu = qgroup->tqg_queue[qid].tgc_cpu; if (gtask->gt_dev != NULL && gtask->gt_irq != NULL) { mtx_unlock(&qgroup->tqg_lock); error = bus_bind_intr(gtask->gt_dev, gtask->gt_irq, cpu); mtx_lock(&qgroup->tqg_lock); if (error) printf("%s: binding interrupt failed for %s: %d\n", __func__, gtask->gt_name, error); } qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; mtx_unlock(&qgroup->tqg_lock); } int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask, void *uniq, int cpu, device_t dev, struct resource *irq, const char *name) { int i, qid, error; qid = -1; gtask->gt_uniq = uniq; snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); gtask->gt_dev = dev; gtask->gt_irq = irq; gtask->gt_cpu = cpu; mtx_lock(&qgroup->tqg_lock); if (tqg_smp_started) { for (i = 0; i < qgroup->tqg_cnt; i++) if (qgroup->tqg_queue[i].tgc_cpu == cpu) { qid = i; break; } if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); return (EINVAL); } } else qid = 0; qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; cpu = qgroup->tqg_queue[qid].tgc_cpu; mtx_unlock(&qgroup->tqg_lock); if (dev != NULL && irq != NULL && tqg_smp_started) { error = bus_bind_intr(dev, irq, cpu); if (error) printf("%s: binding interrupt failed for %s: %d\n", __func__, gtask->gt_name, error); } return (0); } static int taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) { device_t dev; struct resource *irq; int cpu, error, i, qid; qid = -1; dev = gtask->gt_dev; irq = gtask->gt_irq; cpu = gtask->gt_cpu; MPASS(tqg_smp_started); mtx_lock(&qgroup->tqg_lock); for (i = 0; i < qgroup->tqg_cnt; i++) if (qgroup->tqg_queue[i].tgc_cpu == cpu) { qid = i; break; } if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); return (EINVAL); } qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; mtx_unlock(&qgroup->tqg_lock); if (dev != NULL && irq != NULL) { error = bus_bind_intr(dev, irq, cpu); if (error) printf("%s: binding interrupt failed for %s: %d\n", __func__, gtask->gt_name, error); } return (0); } void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask) { int i; grouptask_block(gtask); mtx_lock(&qgroup->tqg_lock); for (i = 0; i < qgroup->tqg_cnt; i++) if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue) break; if (i == qgroup->tqg_cnt) panic("%s: task %s not in group", __func__, gtask->gt_name); qgroup->tqg_queue[i].tgc_cnt--; LIST_REMOVE(gtask, gt_list); mtx_unlock(&qgroup->tqg_lock); gtask->gt_taskqueue = NULL; gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE; } static void taskqgroup_binder(void *ctx) { struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx; cpuset_t mask; int error; CPU_ZERO(&mask); CPU_SET(gtask->bt_cpuid, &mask); error = cpuset_setthread(curthread->td_tid, &mask); thread_lock(curthread); sched_bind(curthread, gtask->bt_cpuid); thread_unlock(curthread); if (error) printf("%s: binding curthread failed: %d\n", __func__, error); free(gtask, M_DEVBUF); } static void taskqgroup_bind(struct taskqgroup *qgroup) { struct taskq_bind_task *gtask; int i; /* * Bind taskqueue threads to specific CPUs, if they have been assigned * one. */ if (qgroup->tqg_cnt == 1) return; for (i = 0; i < qgroup->tqg_cnt; i++) { gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK); GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask); gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu; grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq, >ask->bt_task); } } static void taskqgroup_config_init(void *arg) { struct taskqgroup *qgroup = qgroup_config; LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL); LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks, grouptask, gt_list); qgroup->tqg_queue[0].tgc_cnt = 0; taskqgroup_cpu_create(qgroup, 0, 0); qgroup->tqg_cnt = 1; qgroup->tqg_stride = 1; } SYSINIT(taskqgroup_config_init, SI_SUB_TASKQ, SI_ORDER_SECOND, taskqgroup_config_init, NULL); static int _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) { LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL); struct grouptask *gtask; int i, k, old_cnt, old_cpu, cpu; mtx_assert(&qgroup->tqg_lock, MA_OWNED); if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) { printf("%s: failed cnt: %d stride: %d " "mp_ncpus: %d tqg_smp_started: %d\n", __func__, cnt, stride, mp_ncpus, tqg_smp_started); return (EINVAL); } if (qgroup->tqg_adjusting) { printf("%s failed: adjusting\n", __func__); return (EBUSY); } qgroup->tqg_adjusting = 1; old_cnt = qgroup->tqg_cnt; old_cpu = 0; if (old_cnt < cnt) old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu; mtx_unlock(&qgroup->tqg_lock); /* * Set up queue for tasks added before boot. */ if (old_cnt == 0) { LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks, grouptask, gt_list); qgroup->tqg_queue[0].tgc_cnt = 0; } /* * If new taskq threads have been added. */ cpu = old_cpu; for (i = old_cnt; i < cnt; i++) { taskqgroup_cpu_create(qgroup, i, cpu); for (k = 0; k < stride; k++) cpu = CPU_NEXT(cpu); } mtx_lock(&qgroup->tqg_lock); qgroup->tqg_cnt = cnt; qgroup->tqg_stride = stride; /* * Adjust drivers to use new taskqs. */ for (i = 0; i < old_cnt; i++) { while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) { LIST_REMOVE(gtask, gt_list); qgroup->tqg_queue[i].tgc_cnt--; LIST_INSERT_HEAD(>ask_head, gtask, gt_list); } } mtx_unlock(&qgroup->tqg_lock); while ((gtask = LIST_FIRST(>ask_head))) { LIST_REMOVE(gtask, gt_list); if (gtask->gt_cpu == -1) taskqgroup_attach_deferred(qgroup, gtask); else if (taskqgroup_attach_cpu_deferred(qgroup, gtask)) taskqgroup_attach_deferred(qgroup, gtask); } #ifdef INVARIANTS mtx_lock(&qgroup->tqg_lock); for (i = 0; i < qgroup->tqg_cnt; i++) { MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL); LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) MPASS(gtask->gt_taskqueue != NULL); } mtx_unlock(&qgroup->tqg_lock); #endif /* * If taskq thread count has been reduced. */ for (i = cnt; i < old_cnt; i++) taskqgroup_cpu_remove(qgroup, i); taskqgroup_bind(qgroup); mtx_lock(&qgroup->tqg_lock); qgroup->tqg_adjusting = 0; return (0); } int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) { int error; mtx_lock(&qgroup->tqg_lock); error = _taskqgroup_adjust(qgroup, cnt, stride); mtx_unlock(&qgroup->tqg_lock); return (error); } struct taskqgroup * taskqgroup_create(const char *name) { struct taskqgroup *qgroup; qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO); mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF); qgroup->tqg_name = name; LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks); return (qgroup); } void taskqgroup_destroy(struct taskqgroup *qgroup) { } void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, const char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_config, gtask, gtask, NULL, NULL, name); } void taskqgroup_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_config, gtask); } Index: head/sys/kern/subr_taskqueue.c =================================================================== --- head/sys/kern/subr_taskqueue.c (revision 357770) +++ head/sys/kern/subr_taskqueue.c (revision 357771) @@ -1,869 +1,882 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); static void *taskqueue_giant_ih; static void *taskqueue_ih; static void taskqueue_fast_enqueue(void *); static void taskqueue_swi_enqueue(void *); static void taskqueue_swi_giant_enqueue(void *); struct taskqueue_busy { struct task *tb_running; u_int tb_seq; LIST_ENTRY(taskqueue_busy) tb_link; }; struct taskqueue { STAILQ_HEAD(, task) tq_queue; LIST_HEAD(, taskqueue_busy) tq_active; struct task *tq_hint; u_int tq_seq; int tq_callouts; struct mtx_padalign tq_mutex; taskqueue_enqueue_fn tq_enqueue; void *tq_context; char *tq_name; struct thread **tq_threads; int tq_tcount; int tq_spin; int tq_flags; taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; }; #define TQ_FLAGS_ACTIVE (1 << 0) #define TQ_FLAGS_BLOCKED (1 << 1) #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) #define DT_CALLOUT_ARMED (1 << 0) #define DT_DRAIN_IN_PROGRESS (1 << 1) #define TQ_LOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_lock_spin(&(tq)->tq_mutex); \ else \ mtx_lock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) #define TQ_UNLOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_unlock_spin(&(tq)->tq_mutex); \ else \ mtx_unlock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context) { TASK_INIT(&timeout_task->t, priority, func, context); callout_init_mtx(&timeout_task->c, &queue->tq_mutex, CALLOUT_RETURNUNLOCKED); timeout_task->q = queue; timeout_task->f = 0; } static __inline int TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm) { if (tq->tq_spin) return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0)); return (msleep(p, &tq->tq_mutex, 0, wm, 0)); } static struct taskqueue * _taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context, int mtxflags, const char *mtxname __unused) { struct taskqueue *queue; char *tq_name; tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO); if (tq_name == NULL) return (NULL); queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); if (queue == NULL) { free(tq_name, M_TASKQUEUE); return (NULL); } snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); STAILQ_INIT(&queue->tq_queue); LIST_INIT(&queue->tq_active); queue->tq_enqueue = enqueue; queue->tq_context = context; queue->tq_name = tq_name; queue->tq_spin = (mtxflags & MTX_SPIN) != 0; queue->tq_flags |= TQ_FLAGS_ACTIVE; if (enqueue == taskqueue_fast_enqueue || enqueue == taskqueue_swi_enqueue || enqueue == taskqueue_swi_giant_enqueue || enqueue == taskqueue_thread_enqueue) queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); return (queue); } struct taskqueue * taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _taskqueue_create(name, mflags, enqueue, context, MTX_DEF, name); } void taskqueue_set_callback(struct taskqueue *queue, enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, void *context) { KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), ("Callback type %d not valid, must be %d-%d", cb_type, TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); KASSERT((queue->tq_callbacks[cb_type] == NULL), ("Re-initialization of taskqueue callback?")); queue->tq_callbacks[cb_type] = callback; queue->tq_cb_contexts[cb_type] = context; } /* * Signal a taskqueue thread to terminate. */ static void taskqueue_terminate(struct thread **pp, struct taskqueue *tq) { while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { wakeup(tq); TQ_SLEEP(tq, pp, "tq_destroy"); } } void taskqueue_free(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_ACTIVE; taskqueue_terminate(queue->tq_threads, queue); KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); mtx_destroy(&queue->tq_mutex); free(queue->tq_threads, M_TASKQUEUE); free(queue->tq_name, M_TASKQUEUE); free(queue, M_TASKQUEUE); } static int taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) { struct task *ins; struct task *prev; KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); /* * Count multiple enqueues. */ if (task->ta_pending) { if (task->ta_pending < USHRT_MAX) task->ta_pending++; TQ_UNLOCK(queue); return (0); } /* * Optimise cases when all tasks use small set of priorities. * In case of only one priority we always insert at the end. * In case of two tq_hint typically gives the insertion point. * In case of more then two tq_hint should halve the search. */ prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); if (!prev || prev->ta_priority >= task->ta_priority) { STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); } else { prev = queue->tq_hint; if (prev && prev->ta_priority >= task->ta_priority) { ins = STAILQ_NEXT(prev, ta_link); } else { prev = NULL; ins = STAILQ_FIRST(&queue->tq_queue); } for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link)) if (ins->ta_priority < task->ta_priority) break; if (prev) { STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); queue->tq_hint = task; } else STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); } task->ta_pending = 1; if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) TQ_UNLOCK(queue); if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) queue->tq_enqueue(queue->tq_context); if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) TQ_UNLOCK(queue); /* Return with lock released. */ return (0); } int taskqueue_enqueue(struct taskqueue *queue, struct task *task) { int res; TQ_LOCK(queue); res = taskqueue_enqueue_locked(queue, task); /* The lock is released inside. */ return (res); } static void taskqueue_timeout_func(void *arg) { struct taskqueue *queue; struct timeout_task *timeout_task; timeout_task = arg; queue = timeout_task->q; KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); timeout_task->f &= ~DT_CALLOUT_ARMED; queue->tq_callouts--; taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); /* The lock is released inside. */ } int taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags) { int res; TQ_LOCK(queue); KASSERT(timeout_task->q == NULL || timeout_task->q == queue, ("Migrated queue")); KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); timeout_task->q = queue; res = timeout_task->t.ta_pending; if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { /* Do nothing */ TQ_UNLOCK(queue); res = -1; } else if (sbt == 0) { taskqueue_enqueue_locked(queue, &timeout_task->t); /* The lock is released inside. */ } else { if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { res++; } else { queue->tq_callouts++; timeout_task->f |= DT_CALLOUT_ARMED; if (sbt < 0) sbt = -sbt; /* Ignore overflow. */ } if (sbt > 0) { callout_reset_sbt(&timeout_task->c, sbt, pr, taskqueue_timeout_func, timeout_task, flags); } TQ_UNLOCK(queue); } return (res); } int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *ttask, int ticks) { return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt, 0, 0)); } static void taskqueue_task_nop_fn(void *context, int pending) { } /* * Block until all currently queued tasks in this taskqueue * have begun execution. Tasks queued during execution of * this function are ignored. */ static int taskqueue_drain_tq_queue(struct taskqueue *queue) { struct task t_barrier; if (STAILQ_EMPTY(&queue->tq_queue)) return (0); /* * Enqueue our barrier after all current tasks, but with * the highest priority so that newly queued tasks cannot * pass it. Because of the high priority, we can not use * taskqueue_enqueue_locked directly (which drops the lock * anyway) so just insert it at tail while we have the * queue lock. */ - TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier); + TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier); STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); queue->tq_hint = &t_barrier; t_barrier.ta_pending = 1; /* * Once the barrier has executed, all previously queued tasks * have completed or are currently executing. */ while (t_barrier.ta_pending != 0) TQ_SLEEP(queue, &t_barrier, "tq_qdrain"); return (1); } /* * Block until all currently executing tasks for this taskqueue * complete. Tasks that begin execution during the execution * of this function are ignored. */ static int taskqueue_drain_tq_active(struct taskqueue *queue) { struct taskqueue_busy *tb; u_int seq; if (LIST_EMPTY(&queue->tq_active)) return (0); /* Block taskq_terminate().*/ queue->tq_callouts++; /* Wait for any active task with sequence from the past. */ seq = queue->tq_seq; restart: LIST_FOREACH(tb, &queue->tq_active, tb_link) { if ((int)(tb->tb_seq - seq) <= 0) { TQ_SLEEP(queue, tb->tb_running, "tq_adrain"); goto restart; } } /* Release taskqueue_terminate(). */ queue->tq_callouts--; if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) wakeup_one(queue->tq_threads); return (1); } void taskqueue_block(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags |= TQ_FLAGS_BLOCKED; TQ_UNLOCK(queue); } void taskqueue_unblock(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_BLOCKED; if (!STAILQ_EMPTY(&queue->tq_queue)) queue->tq_enqueue(queue->tq_context); TQ_UNLOCK(queue); } static void taskqueue_run_locked(struct taskqueue *queue) { + struct epoch_tracker et; struct taskqueue_busy tb; struct task *task; + bool in_net_epoch; int pending; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); + in_net_epoch = false; while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); if (queue->tq_hint == task) queue->tq_hint = NULL; pending = task->ta_pending; task->ta_pending = 0; tb.tb_running = task; tb.tb_seq = ++queue->tq_seq; TQ_UNLOCK(queue); KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); + if (!in_net_epoch && TASK_IS_NET(task)) { + in_net_epoch = true; + NET_EPOCH_ENTER(et); + } else if (in_net_epoch && !TASK_IS_NET(task)) { + NET_EPOCH_EXIT(et); + in_net_epoch = false; + } task->ta_func(task->ta_context, pending); TQ_LOCK(queue); wakeup(task); } + if (in_net_epoch) + NET_EPOCH_EXIT(et); LIST_REMOVE(&tb, tb_link); } void taskqueue_run(struct taskqueue *queue) { TQ_LOCK(queue); taskqueue_run_locked(queue); TQ_UNLOCK(queue); } static int task_is_running(struct taskqueue *queue, struct task *task) { struct taskqueue_busy *tb; TQ_ASSERT_LOCKED(queue); LIST_FOREACH(tb, &queue->tq_active, tb_link) { if (tb->tb_running == task) return (1); } return (0); } /* * Only use this function in single threaded contexts. It returns * non-zero if the given task is either pending or running. Else the * task is idle and can be queued again or freed. */ int taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) { int retval; TQ_LOCK(queue); retval = task->ta_pending > 0 || task_is_running(queue, task); TQ_UNLOCK(queue); return (retval); } static int taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, u_int *pendp) { if (task->ta_pending > 0) { STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); if (queue->tq_hint == task) queue->tq_hint = NULL; } if (pendp != NULL) *pendp = task->ta_pending; task->ta_pending = 0; return (task_is_running(queue, task) ? EBUSY : 0); } int taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) { int error; TQ_LOCK(queue); error = taskqueue_cancel_locked(queue, task, pendp); TQ_UNLOCK(queue); return (error); } int taskqueue_cancel_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, u_int *pendp) { u_int pending, pending1; int error; TQ_LOCK(queue); pending = !!(callout_stop(&timeout_task->c) > 0); error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { timeout_task->f &= ~DT_CALLOUT_ARMED; queue->tq_callouts--; } TQ_UNLOCK(queue); if (pendp != NULL) *pendp = pending + pending1; return (error); } void taskqueue_drain(struct taskqueue *queue, struct task *task) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); while (task->ta_pending != 0 || task_is_running(queue, task)) TQ_SLEEP(queue, task, "tq_drain"); TQ_UNLOCK(queue); } void taskqueue_drain_all(struct taskqueue *queue) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); (void)taskqueue_drain_tq_queue(queue); (void)taskqueue_drain_tq_active(queue); TQ_UNLOCK(queue); } void taskqueue_drain_timeout(struct taskqueue *queue, struct timeout_task *timeout_task) { /* * Set flag to prevent timer from re-starting during drain: */ TQ_LOCK(queue); KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0, ("Drain already in progress")); timeout_task->f |= DT_DRAIN_IN_PROGRESS; TQ_UNLOCK(queue); callout_drain(&timeout_task->c); taskqueue_drain(queue, &timeout_task->t); /* * Clear flag to allow timer to re-start: */ TQ_LOCK(queue); timeout_task->f &= ~DT_DRAIN_IN_PROGRESS; TQ_UNLOCK(queue); } void taskqueue_quiesce(struct taskqueue *queue) { int ret; TQ_LOCK(queue); do { ret = taskqueue_drain_tq_queue(queue); if (ret == 0) ret = taskqueue_drain_tq_active(queue); } while (ret != 0); TQ_UNLOCK(queue); } static void taskqueue_swi_enqueue(void *context) { swi_sched(taskqueue_ih, 0); } static void taskqueue_swi_run(void *dummy) { taskqueue_run(taskqueue_swi); } static void taskqueue_swi_giant_enqueue(void *context) { swi_sched(taskqueue_giant_ih, 0); } static void taskqueue_swi_giant_run(void *dummy) { taskqueue_run(taskqueue_swi_giant); } static int _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, struct proc *p, const char *name, va_list ap) { char ktname[MAXCOMLEN + 1]; struct thread *td; struct taskqueue *tq; int i, error; if (count <= 0) return (EINVAL); vsnprintf(ktname, sizeof(ktname), name, ap); tq = *tqp; tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, M_NOWAIT | M_ZERO); if (tq->tq_threads == NULL) { printf("%s: no memory for %s threads\n", __func__, ktname); return (ENOMEM); } for (i = 0; i < count; i++) { if (count == 1) error = kthread_add(taskqueue_thread_loop, tqp, p, &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); else error = kthread_add(taskqueue_thread_loop, tqp, p, &tq->tq_threads[i], RFSTOPPED, 0, "%s_%d", ktname, i); if (error) { /* should be ok to continue, taskqueue_free will dtrt */ printf("%s: kthread_add(%s): error %d", __func__, ktname, error); tq->tq_threads[i] = NULL; /* paranoid */ } else tq->tq_tcount++; } if (tq->tq_tcount == 0) { free(tq->tq_threads, M_TASKQUEUE); tq->tq_threads = NULL; return (ENOMEM); } for (i = 0; i < count; i++) { if (tq->tq_threads[i] == NULL) continue; td = tq->tq_threads[i]; if (mask) { error = cpuset_setthread(td->td_tid, mask); /* * Failing to pin is rarely an actual fatal error; * it'll just affect performance. */ if (error) printf("%s: curthread=%llu: can't pin; " "error=%d\n", __func__, (unsigned long long) td->td_tid, error); } thread_lock(td); sched_prio(td, pri); sched_add(td, SRQ_BORING); } return (0); } int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap); va_end(ap); return (error); } int taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri, struct proc *proc, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap); va_end(ap); return (error); } int taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap); va_end(ap); return (error); } static inline void taskqueue_run_callback(struct taskqueue *tq, enum taskqueue_callback_type cb_type) { taskqueue_callback_fn tq_callback; TQ_ASSERT_UNLOCKED(tq); tq_callback = tq->tq_callbacks[cb_type]; if (tq_callback != NULL) tq_callback(tq->tq_cb_contexts[cb_type]); } void taskqueue_thread_loop(void *arg) { struct taskqueue **tqp, *tq; tqp = arg; tq = *tqp; taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); TQ_LOCK(tq); while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { /* XXX ? */ taskqueue_run_locked(tq); /* * Because taskqueue_run() can drop tq_mutex, we need to * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the * meantime, which means we missed a wakeup. */ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) break; TQ_SLEEP(tq, tq, "-"); } taskqueue_run_locked(tq); /* * This thread is on its way out, so just drop the lock temporarily * in order to call the shutdown callback. This allows the callback * to look at the taskqueue, even just before it dies. */ TQ_UNLOCK(tq); taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); TQ_LOCK(tq); /* rendezvous with thread that asked us to terminate */ tq->tq_tcount--; wakeup_one(tq->tq_threads); TQ_UNLOCK(tq); kthread_exit(); } void taskqueue_thread_enqueue(void *context) { struct taskqueue **tqp, *tq; tqp = context; tq = *tqp; wakeup_any(tq); } TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, INTR_MPSAFE, &taskqueue_ih)); TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); TASKQUEUE_DEFINE_THREAD(thread); struct taskqueue * taskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _taskqueue_create(name, mflags, enqueue, context, MTX_SPIN, "fast_taskqueue"); } static void *taskqueue_fast_ih; static void taskqueue_fast_enqueue(void *context) { swi_sched(taskqueue_fast_ih, 0); } static void taskqueue_fast_run(void *dummy) { taskqueue_run(taskqueue_fast); } TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); int taskqueue_member(struct taskqueue *queue, struct thread *td) { int i, j, ret = 0; for (i = 0, j = 0; ; i++) { if (queue->tq_threads[i] == NULL) continue; if (queue->tq_threads[i] == td) { ret = 1; break; } if (++j >= queue->tq_tcount) break; } return (ret); } Index: head/sys/sys/_task.h =================================================================== --- head/sys/sys/_task.h (revision 357770) +++ head/sys/sys/_task.h (revision 357771) @@ -1,70 +1,77 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS__TASK_H_ #define _SYS__TASK_H_ #include /* * Each task includes a function which is called from * taskqueue_run(). The first argument is taken from the 'ta_context' * field of struct task and the second argument is a count of how many * times the task was enqueued before the call to taskqueue_run(). * * List of locks * (c) const after init * (q) taskqueue lock */ typedef void task_fn_t(void *context, int pending); struct task { STAILQ_ENTRY(task) ta_link; /* (q) link for queue */ uint16_t ta_pending; /* (q) count times queued */ - u_short ta_priority; /* (c) Priority */ + uint8_t ta_priority; /* (c) Priority */ + uint8_t ta_flags; /* (c) Flags */ task_fn_t *ta_func; /* (c) task handler */ void *ta_context; /* (c) argument for handler */ }; + +#define TASK_ENQUEUED 0x1 +#define TASK_NOENQUEUE 0x2 +#define TASK_NETWORK 0x4 + +#define TASK_IS_NET(ta) ((ta)->ta_flags & TASK_NETWORK) #ifdef _KERNEL typedef void gtask_fn_t(void *context); struct gtask { STAILQ_ENTRY(gtask) ta_link; /* (q) link for queue */ uint16_t ta_flags; /* (q) state flags */ u_short ta_priority; /* (c) Priority */ gtask_fn_t *ta_func; /* (c) task handler */ void *ta_context; /* (c) argument for handler */ }; #endif /* _KERNEL */ #endif /* !_SYS__TASK_H_ */ Index: head/sys/sys/epoch.h =================================================================== --- head/sys/sys/epoch.h (revision 357770) +++ head/sys/sys/epoch.h (revision 357771) @@ -1,109 +1,112 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_EPOCH_H_ #define _SYS_EPOCH_H_ struct epoch_context { void *data[2]; } __aligned(sizeof(void *)); typedef struct epoch_context *epoch_context_t; typedef void epoch_callback_t(epoch_context_t); #ifdef _KERNEL #include #include #include struct epoch; typedef struct epoch *epoch_t; #define EPOCH_PREEMPT 0x1 #define EPOCH_LOCKED 0x2 extern epoch_t global_epoch; extern epoch_t global_epoch_preempt; struct epoch_tracker { TAILQ_ENTRY(epoch_tracker) et_link; struct thread *et_td; ck_epoch_section_t et_section; #ifdef EPOCH_TRACE struct epoch *et_epoch; SLIST_ENTRY(epoch_tracker) et_tlink; const char *et_file; int et_line; #endif } __aligned(sizeof(void *)); typedef struct epoch_tracker *epoch_tracker_t; epoch_t epoch_alloc(const char *name, int flags); void epoch_free(epoch_t epoch); void epoch_wait(epoch_t epoch); void epoch_wait_preempt(epoch_t epoch); void epoch_drain_callbacks(epoch_t epoch); void epoch_call(epoch_t epoch, epoch_callback_t cb, epoch_context_t ctx); int in_epoch(epoch_t epoch); int in_epoch_verbose(epoch_t epoch, int dump_onfail); DPCPU_DECLARE(int, epoch_cb_count); DPCPU_DECLARE(struct grouptask, epoch_cb_task); #ifdef EPOCH_TRACE #define EPOCH_FILE_LINE , const char *file, int line #else #define EPOCH_FILE_LINE #endif void _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE); void _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE); #ifdef EPOCH_TRACE void epoch_trace_list(struct thread *); #define epoch_enter_preempt(epoch, et) _epoch_enter_preempt(epoch, et, __FILE__, __LINE__) #define epoch_exit_preempt(epoch, et) _epoch_exit_preempt(epoch, et, __FILE__, __LINE__) #else #define epoch_enter_preempt(epoch, et) _epoch_enter_preempt(epoch, et) #define epoch_exit_preempt(epoch, et) _epoch_exit_preempt(epoch, et) #endif void epoch_enter(epoch_t epoch); void epoch_exit(epoch_t epoch); /* * Globally recognized epochs in the FreeBSD kernel. */ /* Network preemptible epoch, declared in sys/net/if.c. */ extern epoch_t net_epoch_preempt; #define NET_EPOCH_ENTER(et) epoch_enter_preempt(net_epoch_preempt, &(et)) #define NET_EPOCH_EXIT(et) epoch_exit_preempt(net_epoch_preempt, &(et)) #define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt) #define NET_EPOCH_CALL(f, c) epoch_call(net_epoch_preempt, (f), (c)) #define NET_EPOCH_ASSERT() MPASS(in_epoch(net_epoch_preempt)) +#define NET_TASK_INIT(t, p, f, c) TASK_INIT_FLAGS(t, p, f, c, TASK_NETWORK) +#define NET_GROUPTASK_INIT(gtask, prio, func, ctx) \ + GTASK_INIT(&(gtask)->gt_task, TASK_NETWORK, (prio), (func), (ctx)) #endif /* _KERNEL */ #endif /* _SYS_EPOCH_H_ */ Index: head/sys/sys/gtaskqueue.h =================================================================== --- head/sys/sys/gtaskqueue.h (revision 357770) +++ head/sys/sys/gtaskqueue.h (revision 357771) @@ -1,132 +1,128 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2014 Jeffrey Roberson * Copyright (c) 2016 Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_GTASKQUEUE_H_ #define _SYS_GTASKQUEUE_H_ #ifndef _KERNEL #error "no user-serviceable parts inside" #endif #include #include #include #include struct gtaskqueue; /* * Taskqueue groups. Manages dynamic thread groups and irq binding for * device and other tasks. */ struct grouptask { struct gtask gt_task; void *gt_taskqueue; LIST_ENTRY(grouptask) gt_list; void *gt_uniq; #define GROUPTASK_NAMELEN 32 char gt_name[GROUPTASK_NAMELEN]; device_t gt_dev; struct resource *gt_irq; int gt_cpu; }; void gtaskqueue_block(struct gtaskqueue *queue); void gtaskqueue_unblock(struct gtaskqueue *queue); int gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask); void gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *task); void gtaskqueue_drain_all(struct gtaskqueue *queue); void grouptask_block(struct grouptask *grouptask); void grouptask_unblock(struct grouptask *grouptask); int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task); void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask, void *uniq, device_t dev, struct resource *irq, const char *name); int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask, void *uniq, int cpu, device_t dev, struct resource *irq, const char *name); void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask); struct taskqgroup *taskqgroup_create(const char *name); void taskqgroup_destroy(struct taskqgroup *qgroup); int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride); void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, const char *name); void taskqgroup_config_gtask_deinit(struct grouptask *gtask); -#define TASK_ENQUEUED 0x1 -#define TASK_SKIP_WAKEUP 0x2 -#define TASK_NOENQUEUE 0x4 - #define GTASK_INIT(gtask, flags, priority, func, context) do { \ (gtask)->ta_flags = flags; \ (gtask)->ta_priority = (priority); \ (gtask)->ta_func = (func); \ (gtask)->ta_context = (context); \ } while (0) #define GROUPTASK_INIT(gtask, priority, func, context) \ - GTASK_INIT(&(gtask)->gt_task, TASK_SKIP_WAKEUP, priority, func, context) + GTASK_INIT(&(gtask)->gt_task, 0, priority, func, context) #define GROUPTASK_ENQUEUE(gtask) \ grouptaskqueue_enqueue((gtask)->gt_taskqueue, &(gtask)->gt_task) #define TASKQGROUP_DECLARE(name) \ extern struct taskqgroup *qgroup_##name #define TASKQGROUP_DEFINE(name, cnt, stride) \ \ struct taskqgroup *qgroup_##name; \ \ static void \ taskqgroup_define_##name(void *arg) \ { \ qgroup_##name = taskqgroup_create(#name); \ } \ \ SYSINIT(taskqgroup_##name, SI_SUB_TASKQ, SI_ORDER_FIRST, \ taskqgroup_define_##name, NULL); \ \ static void \ taskqgroup_adjust_##name(void *arg) \ { \ taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \ } \ \ SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \ taskqgroup_adjust_##name, NULL) TASKQGROUP_DECLARE(net); TASKQGROUP_DECLARE(softirq); #endif /* !_SYS_GTASKQUEUE_H_ */ Index: head/sys/sys/taskqueue.h =================================================================== --- head/sys/sys/taskqueue.h (revision 357770) +++ head/sys/sys/taskqueue.h (revision 357771) @@ -1,217 +1,223 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_TASKQUEUE_H_ #define _SYS_TASKQUEUE_H_ #ifndef _KERNEL #error "no user-serviceable parts inside" #endif #include #include #include #include struct taskqueue; struct taskqgroup; struct proc; struct thread; struct timeout_task { struct taskqueue *q; struct task t; struct callout c; int f; }; enum taskqueue_callback_type { TASKQUEUE_CALLBACK_TYPE_INIT, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN, }; #define TASKQUEUE_CALLBACK_TYPE_MIN TASKQUEUE_CALLBACK_TYPE_INIT #define TASKQUEUE_CALLBACK_TYPE_MAX TASKQUEUE_CALLBACK_TYPE_SHUTDOWN #define TASKQUEUE_NUM_CALLBACKS TASKQUEUE_CALLBACK_TYPE_MAX + 1 #define TASKQUEUE_NAMELEN 32 typedef void (*taskqueue_callback_fn)(void *context); /* * A notification callback function which is called from * taskqueue_enqueue(). The context argument is given in the call to * taskqueue_create(). This function would normally be used to allow the * queue to arrange to run itself later (e.g., by scheduling a software * interrupt or waking a kernel thread). */ typedef void (*taskqueue_enqueue_fn)(void *context); struct taskqueue *taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context); int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, const char *name, ...) __printflike(4, 5); int taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri, struct proc *p, const char *name, ...) __printflike(5, 6); int taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, ...) __printflike(5, 6); int taskqueue_enqueue(struct taskqueue *queue, struct task *task); int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, int ticks); int taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags); int taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task); int taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp); int taskqueue_cancel_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, u_int *pendp); void taskqueue_drain(struct taskqueue *queue, struct task *task); void taskqueue_drain_timeout(struct taskqueue *queue, struct timeout_task *timeout_task); void taskqueue_drain_all(struct taskqueue *queue); void taskqueue_quiesce(struct taskqueue *queue); void taskqueue_free(struct taskqueue *queue); void taskqueue_run(struct taskqueue *queue); void taskqueue_block(struct taskqueue *queue); void taskqueue_unblock(struct taskqueue *queue); int taskqueue_member(struct taskqueue *queue, struct thread *td); void taskqueue_set_callback(struct taskqueue *queue, enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, void *context); #define TASK_INITIALIZER(priority, func, context) \ - { .ta_pending = 0, \ - .ta_priority = (priority), \ + { .ta_priority = (priority), \ .ta_func = (func), \ .ta_context = (context) } /* * Functions for dedicated thread taskqueues */ void taskqueue_thread_loop(void *arg); void taskqueue_thread_enqueue(void *context); /* * Initialise a task structure. */ -#define TASK_INIT(task, priority, func, context) do { \ - (task)->ta_pending = 0; \ - (task)->ta_priority = (priority); \ - (task)->ta_func = (func); \ - (task)->ta_context = (context); \ +#define TASK_INIT_FLAGS(task, priority, func, context, flags) do { \ + MPASS((priority) >= 0 && (priority) <= 255); \ + (task)->ta_pending = 0; \ + (task)->ta_priority = (priority); \ + (task)->ta_flags = (flags); \ + (task)->ta_func = (func); \ + (task)->ta_context = (context); \ } while (0) +#define TASK_INIT(t, p, f, c) TASK_INIT_FLAGS(t, p, f, c, 0) + void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context); -#define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) \ - _timeout_task_init(queue, timeout_task, priority, func, context); +#define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) do { \ + _Static_assert((priority) >= 0 && (priority) <= 255, \ + "struct task priority is 8 bit in size"); \ + _timeout_task_init(queue, timeout_task, priority, func, context); \ +} while (0) /* * Declare a reference to a taskqueue. */ #define TASKQUEUE_DECLARE(name) \ extern struct taskqueue *taskqueue_##name /* * Define and initialise a global taskqueue that uses sleep mutexes. */ #define TASKQUEUE_DEFINE(name, enqueue, context, init) \ \ struct taskqueue *taskqueue_##name; \ \ static void \ taskqueue_define_##name(void *arg) \ { \ taskqueue_##name = \ taskqueue_create(#name, M_WAITOK, (enqueue), (context)); \ init; \ } \ \ SYSINIT(taskqueue_##name, SI_SUB_TASKQ, SI_ORDER_SECOND, \ taskqueue_define_##name, NULL); \ \ struct __hack #define TASKQUEUE_DEFINE_THREAD(name) \ TASKQUEUE_DEFINE(name, taskqueue_thread_enqueue, &taskqueue_##name, \ taskqueue_start_threads(&taskqueue_##name, 1, PWAIT, \ "%s taskq", #name)) /* * Define and initialise a global taskqueue that uses spin mutexes. */ #define TASKQUEUE_FAST_DEFINE(name, enqueue, context, init) \ \ struct taskqueue *taskqueue_##name; \ \ static void \ taskqueue_define_##name(void *arg) \ { \ taskqueue_##name = \ taskqueue_create_fast(#name, M_WAITOK, (enqueue), \ (context)); \ init; \ } \ \ SYSINIT(taskqueue_##name, SI_SUB_TASKQ, SI_ORDER_SECOND, \ taskqueue_define_##name, NULL); \ \ struct __hack #define TASKQUEUE_FAST_DEFINE_THREAD(name) \ TASKQUEUE_FAST_DEFINE(name, taskqueue_thread_enqueue, \ &taskqueue_##name, taskqueue_start_threads(&taskqueue_##name \ 1, PWAIT, "%s taskq", #name)) /* * These queues are serviced by software interrupt handlers. To enqueue * a task, call taskqueue_enqueue(taskqueue_swi, &task) or * taskqueue_enqueue(taskqueue_swi_giant, &task). */ TASKQUEUE_DECLARE(swi_giant); TASKQUEUE_DECLARE(swi); /* * This queue is serviced by a kernel thread. To enqueue a task, call * taskqueue_enqueue(taskqueue_thread, &task). */ TASKQUEUE_DECLARE(thread); /* * Queue for swi handlers dispatched from fast interrupt handlers. * These are necessarily different from the above because the queue * must be locked with spinlocks since sleep mutex's cannot be used * from a fast interrupt handler context. */ TASKQUEUE_DECLARE(fast); struct taskqueue *taskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context); #endif /* !_SYS_TASKQUEUE_H_ */