Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/subr_gtaskqueue.c
Show First 20 Lines • Show All 164 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { | while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { | ||||
wakeup(tq); | wakeup(tq); | ||||
TQ_SLEEP(tq, pp, "gtq_destroy"); | TQ_SLEEP(tq, pp, "gtq_destroy"); | ||||
} | } | ||||
} | } | ||||
static void | static void __unused | ||||
gtaskqueue_free(struct gtaskqueue *queue) | gtaskqueue_free(struct gtaskqueue *queue) | ||||
{ | { | ||||
TQ_LOCK(queue); | TQ_LOCK(queue); | ||||
queue->tq_flags &= ~TQ_FLAGS_ACTIVE; | queue->tq_flags &= ~TQ_FLAGS_ACTIVE; | ||||
gtaskqueue_terminate(queue->tq_threads, queue); | gtaskqueue_terminate(queue->tq_threads, queue); | ||||
KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); | KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); | ||||
KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); | KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); | ||||
▲ Show 20 Lines • Show All 404 Lines • ▼ Show 20 Lines | |||||
gtaskqueue_create_fast(const char *name, int mflags, | gtaskqueue_create_fast(const char *name, int mflags, | ||||
taskqueue_enqueue_fn enqueue, void *context) | taskqueue_enqueue_fn enqueue, void *context) | ||||
{ | { | ||||
return _gtaskqueue_create(name, mflags, enqueue, context, | return _gtaskqueue_create(name, mflags, enqueue, context, | ||||
MTX_SPIN, "fast_taskqueue"); | MTX_SPIN, "fast_taskqueue"); | ||||
} | } | ||||
struct taskqgroup_cpu { | struct taskqgroup_cpu { | ||||
LIST_HEAD(, grouptask) tgc_tasks; | LIST_HEAD(, grouptask) tgc_tasks; | ||||
struct gtaskqueue *tgc_taskq; | struct gtaskqueue *tgc_taskq; | ||||
int tgc_cnt; | int tgc_cnt; | ||||
int tgc_cpu; | int tgc_cpu; | ||||
}; | }; | ||||
struct taskqgroup { | struct taskqgroup { | ||||
struct taskqgroup_cpu tqg_queue[MAXCPU]; | struct taskqgroup_cpu tqg_queue[MAXCPU]; | ||||
struct mtx tqg_lock; | struct mtx tqg_lock; | ||||
const char * tqg_name; | const char * tqg_name; | ||||
int tqg_adjusting; | |||||
int tqg_stride; | |||||
int tqg_cnt; | int tqg_cnt; | ||||
}; | }; | ||||
struct taskq_bind_task { | struct taskq_bind_task { | ||||
struct gtask bt_task; | struct gtask bt_task; | ||||
int bt_cpuid; | int bt_cpuid; | ||||
}; | }; | ||||
static void | static void | ||||
taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu) | taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu) | ||||
{ | { | ||||
struct taskqgroup_cpu *qcpu; | struct taskqgroup_cpu *qcpu; | ||||
qcpu = &qgroup->tqg_queue[idx]; | qcpu = &qgroup->tqg_queue[idx]; | ||||
LIST_INIT(&qcpu->tgc_tasks); | LIST_INIT(&qcpu->tgc_tasks); | ||||
qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK, | qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK, | ||||
taskqueue_thread_enqueue, &qcpu->tgc_taskq); | taskqueue_thread_enqueue, &qcpu->tgc_taskq); | ||||
gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT, | gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT, | ||||
"%s_%d", qgroup->tqg_name, idx); | "%s_%d", qgroup->tqg_name, idx); | ||||
qcpu->tgc_cpu = cpu; | qcpu->tgc_cpu = cpu; | ||||
} | } | ||||
static void | |||||
taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx) | |||||
{ | |||||
gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq); | |||||
} | |||||
/* | /* | ||||
* Find the taskq with least # of tasks that doesn't currently have any | * Find the taskq with least # of tasks that doesn't currently have any | ||||
* other queues from the uniq identifier. | * other queues from the uniq identifier. | ||||
*/ | */ | ||||
static int | static int | ||||
taskqgroup_find(struct taskqgroup *qgroup, void *uniq) | taskqgroup_find(struct taskqgroup *qgroup, void *uniq) | ||||
{ | { | ||||
struct grouptask *n; | struct grouptask *n; | ||||
int i, idx, mincnt; | int i, idx, mincnt; | ||||
int strict; | int strict; | ||||
mtx_assert(&qgroup->tqg_lock, MA_OWNED); | mtx_assert(&qgroup->tqg_lock, MA_OWNED); | ||||
if (qgroup->tqg_cnt == 0) | KASSERT(qgroup->tqg_cnt != 0, | ||||
return (0); | ("qgroup %s has no queues", qgroup->tqg_name)); | ||||
idx = -1; | idx = -1; | ||||
mincnt = INT_MAX; | mincnt = INT_MAX; | ||||
/* | /* | ||||
* Two passes; First scan for a queue with the least tasks that | * Two passes; First scan for a queue with the least tasks that | ||||
* does not already service this uniq id. If that fails simply find | * does not already service this uniq id. If that fails simply find | ||||
* the queue with the least total tasks; | * the queue with the least total tasks; | ||||
*/ | */ | ||||
for (strict = 1; mincnt == INT_MAX; strict = 0) { | for (strict = 1; mincnt == INT_MAX; strict = 0) { | ||||
Show All 13 Lines | for (strict = 1; mincnt == INT_MAX; strict = 0) { | ||||
} | } | ||||
} | } | ||||
if (idx == -1) | if (idx == -1) | ||||
panic("%s: failed to pick a qid.", __func__); | panic("%s: failed to pick a qid.", __func__); | ||||
return (idx); | return (idx); | ||||
} | } | ||||
/* | |||||
* smp_started is unusable since it is not set for UP kernels or even for | |||||
* SMP kernels when there is 1 CPU. This is usually handled by adding a | |||||
* (mp_ncpus == 1) test, but that would be broken here since we need to | |||||
* to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case | |||||
* smp_started only gives a fuzzy ordering relative to SI_SUB_SMP. | |||||
* | |||||
* So maintain our own flag. It must be set after all CPUs are started | |||||
* and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed | |||||
* adjustment is properly delayed. SI_ORDER_FOURTH is clearly before | |||||
* SI_ORDER_ANY and unclearly after the CPUs are started. It would be | |||||
* simpler for adjustment to pass a flag indicating if it is delayed. | |||||
*/ | |||||
static int tqg_smp_started; | |||||
static void | |||||
tqg_record_smp_started(void *arg) | |||||
{ | |||||
tqg_smp_started = 1; | |||||
} | |||||
SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH, | |||||
tqg_record_smp_started, NULL); | |||||
void | void | ||||
taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask, | taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask, | ||||
void *uniq, device_t dev, struct resource *irq, const char *name) | void *uniq, device_t dev, struct resource *irq, const char *name) | ||||
{ | { | ||||
int cpu, qid, error; | int cpu, qid, error; | ||||
KASSERT(qgroup->tqg_cnt > 0, | |||||
("qgroup %s has no queues", qgroup->tqg_name)); | |||||
gtask->gt_uniq = uniq; | gtask->gt_uniq = uniq; | ||||
snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); | snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); | ||||
gtask->gt_dev = dev; | gtask->gt_dev = dev; | ||||
gtask->gt_irq = irq; | gtask->gt_irq = irq; | ||||
gtask->gt_cpu = -1; | gtask->gt_cpu = -1; | ||||
mtx_lock(&qgroup->tqg_lock); | mtx_lock(&qgroup->tqg_lock); | ||||
qid = taskqgroup_find(qgroup, uniq); | qid = taskqgroup_find(qgroup, uniq); | ||||
qgroup->tqg_queue[qid].tgc_cnt++; | qgroup->tqg_queue[qid].tgc_cnt++; | ||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); | LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); | ||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; | gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; | ||||
if (dev != NULL && irq != NULL && tqg_smp_started) { | if (dev != NULL && irq != NULL) { | ||||
cpu = qgroup->tqg_queue[qid].tgc_cpu; | cpu = qgroup->tqg_queue[qid].tgc_cpu; | ||||
gtask->gt_cpu = cpu; | gtask->gt_cpu = cpu; | ||||
mtx_unlock(&qgroup->tqg_lock); | mtx_unlock(&qgroup->tqg_lock); | ||||
error = bus_bind_intr(dev, irq, cpu); | error = bus_bind_intr(dev, irq, cpu); | ||||
if (error) | if (error) | ||||
printf("%s: binding interrupt failed for %s: %d\n", | printf("%s: binding interrupt failed for %s: %d\n", | ||||
__func__, gtask->gt_name, error); | __func__, gtask->gt_name, error); | ||||
} else | } else | ||||
mtx_unlock(&qgroup->tqg_lock); | mtx_unlock(&qgroup->tqg_lock); | ||||
} | } | ||||
static void | |||||
taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) | |||||
{ | |||||
int qid, cpu, error; | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
qid = taskqgroup_find(qgroup, gtask->gt_uniq); | |||||
cpu = qgroup->tqg_queue[qid].tgc_cpu; | |||||
if (gtask->gt_dev != NULL && gtask->gt_irq != NULL) { | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
error = bus_bind_intr(gtask->gt_dev, gtask->gt_irq, cpu); | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
if (error) | |||||
printf("%s: binding interrupt failed for %s: %d\n", | |||||
__func__, gtask->gt_name, error); | |||||
} | |||||
qgroup->tqg_queue[qid].tgc_cnt++; | |||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); | |||||
MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); | |||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
} | |||||
int | int | ||||
taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask, | taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask, | ||||
void *uniq, int cpu, device_t dev, struct resource *irq, const char *name) | void *uniq, int cpu, device_t dev, struct resource *irq, const char *name) | ||||
{ | { | ||||
int i, qid, error; | int i, qid, error; | ||||
qid = -1; | qid = -1; | ||||
gtask->gt_uniq = uniq; | gtask->gt_uniq = uniq; | ||||
snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); | snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask"); | ||||
gtask->gt_dev = dev; | gtask->gt_dev = dev; | ||||
gtask->gt_irq = irq; | gtask->gt_irq = irq; | ||||
gtask->gt_cpu = cpu; | gtask->gt_cpu = cpu; | ||||
mtx_lock(&qgroup->tqg_lock); | mtx_lock(&qgroup->tqg_lock); | ||||
if (tqg_smp_started) { | |||||
for (i = 0; i < qgroup->tqg_cnt; i++) | for (i = 0; i < qgroup->tqg_cnt; i++) | ||||
if (qgroup->tqg_queue[i].tgc_cpu == cpu) { | if (qgroup->tqg_queue[i].tgc_cpu == cpu) { | ||||
qid = i; | qid = i; | ||||
break; | break; | ||||
} | } | ||||
if (qid == -1) { | if (qid == -1) { | ||||
mtx_unlock(&qgroup->tqg_lock); | mtx_unlock(&qgroup->tqg_lock); | ||||
printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); | printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
} else | |||||
qid = 0; | |||||
qgroup->tqg_queue[qid].tgc_cnt++; | qgroup->tqg_queue[qid].tgc_cnt++; | ||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); | LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); | ||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; | gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; | ||||
cpu = qgroup->tqg_queue[qid].tgc_cpu; | cpu = qgroup->tqg_queue[qid].tgc_cpu; | ||||
mtx_unlock(&qgroup->tqg_lock); | mtx_unlock(&qgroup->tqg_lock); | ||||
if (dev != NULL && irq != NULL && tqg_smp_started) { | |||||
error = bus_bind_intr(dev, irq, cpu); | |||||
if (error) | |||||
printf("%s: binding interrupt failed for %s: %d\n", | |||||
__func__, gtask->gt_name, error); | |||||
} | |||||
return (0); | |||||
} | |||||
static int | |||||
taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) | |||||
{ | |||||
device_t dev; | |||||
struct resource *irq; | |||||
int cpu, error, i, qid; | |||||
qid = -1; | |||||
dev = gtask->gt_dev; | |||||
irq = gtask->gt_irq; | |||||
cpu = gtask->gt_cpu; | |||||
MPASS(tqg_smp_started); | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
for (i = 0; i < qgroup->tqg_cnt; i++) | |||||
if (qgroup->tqg_queue[i].tgc_cpu == cpu) { | |||||
qid = i; | |||||
break; | |||||
} | |||||
if (qid == -1) { | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu); | |||||
return (EINVAL); | |||||
} | |||||
qgroup->tqg_queue[qid].tgc_cnt++; | |||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); | |||||
MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); | |||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
if (dev != NULL && irq != NULL) { | if (dev != NULL && irq != NULL) { | ||||
error = bus_bind_intr(dev, irq, cpu); | error = bus_bind_intr(dev, irq, cpu); | ||||
if (error) | if (error) | ||||
printf("%s: binding interrupt failed for %s: %d\n", | printf("%s: binding interrupt failed for %s: %d\n", | ||||
__func__, gtask->gt_name, error); | __func__, gtask->gt_name, error); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
Show All 15 Lines | taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask) | ||||
mtx_unlock(&qgroup->tqg_lock); | mtx_unlock(&qgroup->tqg_lock); | ||||
gtask->gt_taskqueue = NULL; | gtask->gt_taskqueue = NULL; | ||||
gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE; | gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE; | ||||
} | } | ||||
static void | static void | ||||
taskqgroup_binder(void *ctx) | taskqgroup_binder(void *ctx) | ||||
{ | { | ||||
struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx; | struct taskq_bind_task *gtask; | ||||
cpuset_t mask; | cpuset_t mask; | ||||
int error; | int error; | ||||
gtask = ctx; | |||||
CPU_ZERO(&mask); | CPU_ZERO(&mask); | ||||
CPU_SET(gtask->bt_cpuid, &mask); | CPU_SET(gtask->bt_cpuid, &mask); | ||||
error = cpuset_setthread(curthread->td_tid, &mask); | error = cpuset_setthread(curthread->td_tid, &mask); | ||||
thread_lock(curthread); | thread_lock(curthread); | ||||
sched_bind(curthread, gtask->bt_cpuid); | sched_bind(curthread, gtask->bt_cpuid); | ||||
thread_unlock(curthread); | thread_unlock(curthread); | ||||
if (error) | if (error) | ||||
printf("%s: binding curthread failed: %d\n", __func__, error); | printf("%s: binding curthread failed: %d\n", __func__, error); | ||||
free(gtask, M_DEVBUF); | free(gtask, M_DEVBUF); | ||||
} | } | ||||
static void | void | ||||
taskqgroup_bind(struct taskqgroup *qgroup) | taskqgroup_bind(struct taskqgroup *qgroup) | ||||
{ | { | ||||
struct taskq_bind_task *gtask; | struct taskq_bind_task *gtask; | ||||
int i; | int i; | ||||
/* | /* | ||||
* Bind taskqueue threads to specific CPUs, if they have been assigned | * Bind taskqueue threads to specific CPUs, if they have been assigned | ||||
* one. | * one. | ||||
*/ | */ | ||||
if (qgroup->tqg_cnt == 1) | if (qgroup->tqg_cnt == 1) | ||||
return; | return; | ||||
for (i = 0; i < qgroup->tqg_cnt; i++) { | for (i = 0; i < qgroup->tqg_cnt; i++) { | ||||
gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK); | gtask = malloc(sizeof(*gtask), M_DEVBUF, M_WAITOK); | ||||
GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask); | GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask); | ||||
gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu; | gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu; | ||||
grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq, | grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq, | ||||
>ask->bt_task); | >ask->bt_task); | ||||
} | } | ||||
} | } | ||||
static void | |||||
taskqgroup_config_init(void *arg) | |||||
{ | |||||
struct taskqgroup *qgroup = qgroup_config; | |||||
LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL); | |||||
LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks, | |||||
grouptask, gt_list); | |||||
qgroup->tqg_queue[0].tgc_cnt = 0; | |||||
taskqgroup_cpu_create(qgroup, 0, 0); | |||||
qgroup->tqg_cnt = 1; | |||||
qgroup->tqg_stride = 1; | |||||
} | |||||
SYSINIT(taskqgroup_config_init, SI_SUB_TASKQ, SI_ORDER_SECOND, | |||||
taskqgroup_config_init, NULL); | |||||
static int | |||||
_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) | |||||
{ | |||||
LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL); | |||||
struct grouptask *gtask; | |||||
int i, k, old_cnt, old_cpu, cpu; | |||||
mtx_assert(&qgroup->tqg_lock, MA_OWNED); | |||||
if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) { | |||||
printf("%s: failed cnt: %d stride: %d " | |||||
"mp_ncpus: %d tqg_smp_started: %d\n", | |||||
__func__, cnt, stride, mp_ncpus, tqg_smp_started); | |||||
return (EINVAL); | |||||
} | |||||
if (qgroup->tqg_adjusting) { | |||||
printf("%s failed: adjusting\n", __func__); | |||||
return (EBUSY); | |||||
} | |||||
qgroup->tqg_adjusting = 1; | |||||
old_cnt = qgroup->tqg_cnt; | |||||
old_cpu = 0; | |||||
if (old_cnt < cnt) | |||||
old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu; | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
/* | |||||
* Set up queue for tasks added before boot. | |||||
*/ | |||||
if (old_cnt == 0) { | |||||
LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks, | |||||
grouptask, gt_list); | |||||
qgroup->tqg_queue[0].tgc_cnt = 0; | |||||
} | |||||
/* | |||||
* If new taskq threads have been added. | |||||
*/ | |||||
cpu = old_cpu; | |||||
for (i = old_cnt; i < cnt; i++) { | |||||
taskqgroup_cpu_create(qgroup, i, cpu); | |||||
for (k = 0; k < stride; k++) | |||||
cpu = CPU_NEXT(cpu); | |||||
} | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
qgroup->tqg_cnt = cnt; | |||||
qgroup->tqg_stride = stride; | |||||
/* | |||||
* Adjust drivers to use new taskqs. | |||||
*/ | |||||
for (i = 0; i < old_cnt; i++) { | |||||
while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) { | |||||
LIST_REMOVE(gtask, gt_list); | |||||
qgroup->tqg_queue[i].tgc_cnt--; | |||||
LIST_INSERT_HEAD(>ask_head, gtask, gt_list); | |||||
} | |||||
} | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
while ((gtask = LIST_FIRST(>ask_head))) { | |||||
LIST_REMOVE(gtask, gt_list); | |||||
if (gtask->gt_cpu == -1) | |||||
taskqgroup_attach_deferred(qgroup, gtask); | |||||
else if (taskqgroup_attach_cpu_deferred(qgroup, gtask)) | |||||
taskqgroup_attach_deferred(qgroup, gtask); | |||||
} | |||||
#ifdef INVARIANTS | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
for (i = 0; i < qgroup->tqg_cnt; i++) { | |||||
MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL); | |||||
LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) | |||||
MPASS(gtask->gt_taskqueue != NULL); | |||||
} | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
#endif | |||||
/* | |||||
* If taskq thread count has been reduced. | |||||
*/ | |||||
for (i = cnt; i < old_cnt; i++) | |||||
taskqgroup_cpu_remove(qgroup, i); | |||||
taskqgroup_bind(qgroup); | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
qgroup->tqg_adjusting = 0; | |||||
return (0); | |||||
} | |||||
int | |||||
taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) | |||||
{ | |||||
int error; | |||||
mtx_lock(&qgroup->tqg_lock); | |||||
error = _taskqgroup_adjust(qgroup, cnt, stride); | |||||
mtx_unlock(&qgroup->tqg_lock); | |||||
return (error); | |||||
} | |||||
struct taskqgroup * | struct taskqgroup * | ||||
taskqgroup_create(const char *name) | taskqgroup_create(const char *name, int cnt, int stride) | ||||
{ | { | ||||
struct taskqgroup *qgroup; | struct taskqgroup *qgroup; | ||||
int cpu, i, j; | |||||
qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO); | qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO); | ||||
mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF); | mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF); | ||||
qgroup->tqg_name = name; | qgroup->tqg_name = name; | ||||
LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks); | qgroup->tqg_cnt = cnt; | ||||
for (cpu = i = 0; i < cnt; i++) { | |||||
taskqgroup_cpu_create(qgroup, i, cpu); | |||||
for (j = 0; j < stride; j++) | |||||
cpu = CPU_NEXT(cpu); | |||||
} | |||||
return (qgroup); | return (qgroup); | ||||
} | } | ||||
void | void | ||||
taskqgroup_destroy(struct taskqgroup *qgroup) | taskqgroup_destroy(struct taskqgroup *qgroup) | ||||
{ | { | ||||
} | } | ||||
Show All 16 Lines |