Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F147192646
D7393.id.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
85 KB
Referenced Files
None
Subscribers
None
D7393.id.diff
View Options
Index: head/sys/conf/files
===================================================================
--- head/sys/conf/files
+++ head/sys/conf/files
@@ -3349,6 +3349,7 @@
kern/subr_eventhandler.c standard
kern/subr_fattime.c standard
kern/subr_firmware.c optional firmware
+kern/subr_gtaskqueue.c standard
kern/subr_hash.c standard
kern/subr_hints.c standard
kern/subr_kdb.c standard
Index: head/sys/kern/subr_gtaskqueue.c
===================================================================
--- head/sys/kern/subr_gtaskqueue.c
+++ head/sys/kern/subr_gtaskqueue.c
@@ -0,0 +1,864 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * Copyright (c) 2014 Jeff Roberson
+ * Copyright (c) 2016 Matthew Macy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/libkern.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/gtaskqueue.h>
+#include <sys/unistd.h>
+#include <machine/stdarg.h>
+
+static MALLOC_DEFINE(M_GTASKQUEUE, "taskqueue", "Task Queues");
+static void gtaskqueue_thread_enqueue(void *);
+static void gtaskqueue_thread_loop(void *arg);
+
+
+struct gtaskqueue_busy {
+ struct gtask *tb_running;
+ TAILQ_ENTRY(gtaskqueue_busy) tb_link;
+};
+
+static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
+
+struct gtaskqueue {
+ STAILQ_HEAD(, gtask) tq_queue;
+ gtaskqueue_enqueue_fn tq_enqueue;
+ void *tq_context;
+ char *tq_name;
+ TAILQ_HEAD(, gtaskqueue_busy) tq_active;
+ struct mtx tq_mutex;
+ struct thread **tq_threads;
+ int tq_tcount;
+ int tq_spin;
+ int tq_flags;
+ int tq_callouts;
+ taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
+ void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
+};
+
+#define TQ_FLAGS_ACTIVE (1 << 0)
+#define TQ_FLAGS_BLOCKED (1 << 1)
+#define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
+
+#define DT_CALLOUT_ARMED (1 << 0)
+
+#define TQ_LOCK(tq) \
+ do { \
+ if ((tq)->tq_spin) \
+ mtx_lock_spin(&(tq)->tq_mutex); \
+ else \
+ mtx_lock(&(tq)->tq_mutex); \
+ } while (0)
+#define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
+
+#define TQ_UNLOCK(tq) \
+ do { \
+ if ((tq)->tq_spin) \
+ mtx_unlock_spin(&(tq)->tq_mutex); \
+ else \
+ mtx_unlock(&(tq)->tq_mutex); \
+ } while (0)
+#define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
+
+static __inline int
+TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
+ int t)
+{
+ if (tq->tq_spin)
+ return (msleep_spin(p, m, wm, t));
+ return (msleep(p, m, pri, wm, t));
+}
+
+static struct gtaskqueue *
+_gtaskqueue_create(const char *name, int mflags,
+ taskqueue_enqueue_fn enqueue, void *context,
+ int mtxflags, const char *mtxname __unused)
+{
+ struct gtaskqueue *queue;
+ char *tq_name;
+
+ tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
+ if (!tq_name)
+ return (NULL);
+
+ snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
+
+ queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
+ if (!queue)
+ return (NULL);
+
+ STAILQ_INIT(&queue->tq_queue);
+ TAILQ_INIT(&queue->tq_active);
+ queue->tq_enqueue = enqueue;
+ queue->tq_context = context;
+ queue->tq_name = tq_name;
+ queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
+ queue->tq_flags |= TQ_FLAGS_ACTIVE;
+ if (enqueue == gtaskqueue_thread_enqueue)
+ queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
+ mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
+
+ return (queue);
+}
+
+
+/*
+ * Signal a taskqueue thread to terminate.
+ */
+static void
+gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
+{
+
+ while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
+ wakeup(tq);
+ TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
+ }
+}
+
+static void
+gtaskqueue_free(struct gtaskqueue *queue)
+{
+
+ TQ_LOCK(queue);
+ queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
+ gtaskqueue_terminate(queue->tq_threads, queue);
+ KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
+ KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
+ mtx_destroy(&queue->tq_mutex);
+ free(queue->tq_threads, M_GTASKQUEUE);
+ free(queue->tq_name, M_GTASKQUEUE);
+ free(queue, M_GTASKQUEUE);
+}
+
+int
+grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
+{
+ TQ_LOCK(queue);
+ if (gtask->ta_flags & TASK_ENQUEUED) {
+ TQ_UNLOCK(queue);
+ return (0);
+ }
+ STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
+ gtask->ta_flags |= TASK_ENQUEUED;
+ TQ_UNLOCK(queue);
+ if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
+ queue->tq_enqueue(queue->tq_context);
+ return (0);
+}
+
+static void
+gtaskqueue_task_nop_fn(void *context)
+{
+}
+
+/*
+ * Block until all currently queued tasks in this taskqueue
+ * have begun execution. Tasks queued during execution of
+ * this function are ignored.
+ */
+static void
+gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
+{
+ struct gtask t_barrier;
+
+ if (STAILQ_EMPTY(&queue->tq_queue))
+ return;
+
+ /*
+ * Enqueue our barrier after all current tasks, but with
+ * the highest priority so that newly queued tasks cannot
+ * pass it. Because of the high priority, we can not use
+ * taskqueue_enqueue_locked directly (which drops the lock
+ * anyway) so just insert it at tail while we have the
+ * queue lock.
+ */
+ GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
+ STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
+ t_barrier.ta_flags |= TASK_ENQUEUED;
+
+ /*
+ * Once the barrier has executed, all previously queued tasks
+ * have completed or are currently executing.
+ */
+ while (t_barrier.ta_flags & TASK_ENQUEUED)
+ TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
+}
+
+/*
+ * Block until all currently executing tasks for this taskqueue
+ * complete. Tasks that begin execution during the execution
+ * of this function are ignored.
+ */
+static void
+gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
+{
+ struct gtaskqueue_busy tb_marker, *tb_first;
+
+ if (TAILQ_EMPTY(&queue->tq_active))
+ return;
+
+ /* Block taskq_terminate().*/
+ queue->tq_callouts++;
+
+ /*
+ * Wait for all currently executing taskqueue threads
+ * to go idle.
+ */
+ tb_marker.tb_running = TB_DRAIN_WAITER;
+ TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
+ while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
+ TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
+ TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
+
+ /*
+ * Wakeup any other drain waiter that happened to queue up
+ * without any intervening active thread.
+ */
+ tb_first = TAILQ_FIRST(&queue->tq_active);
+ if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
+ wakeup(tb_first);
+
+ /* Release taskqueue_terminate(). */
+ queue->tq_callouts--;
+ if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
+ wakeup_one(queue->tq_threads);
+}
+
+void
+gtaskqueue_block(struct gtaskqueue *queue)
+{
+
+ TQ_LOCK(queue);
+ queue->tq_flags |= TQ_FLAGS_BLOCKED;
+ TQ_UNLOCK(queue);
+}
+
+void
+gtaskqueue_unblock(struct gtaskqueue *queue)
+{
+
+ TQ_LOCK(queue);
+ queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
+ if (!STAILQ_EMPTY(&queue->tq_queue))
+ queue->tq_enqueue(queue->tq_context);
+ TQ_UNLOCK(queue);
+}
+
+static void
+gtaskqueue_run_locked(struct gtaskqueue *queue)
+{
+ struct gtaskqueue_busy tb;
+ struct gtaskqueue_busy *tb_first;
+ struct gtask *gtask;
+
+ KASSERT(queue != NULL, ("tq is NULL"));
+ TQ_ASSERT_LOCKED(queue);
+ tb.tb_running = NULL;
+
+ while (STAILQ_FIRST(&queue->tq_queue)) {
+ TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
+
+ /*
+ * Carefully remove the first task from the queue and
+ * clear its TASK_ENQUEUED flag
+ */
+ gtask = STAILQ_FIRST(&queue->tq_queue);
+ KASSERT(gtask != NULL, ("task is NULL"));
+ STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
+ gtask->ta_flags &= ~TASK_ENQUEUED;
+ tb.tb_running = gtask;
+ TQ_UNLOCK(queue);
+
+ KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
+ gtask->ta_func(gtask->ta_context);
+
+ TQ_LOCK(queue);
+ tb.tb_running = NULL;
+ wakeup(gtask);
+
+ TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
+ tb_first = TAILQ_FIRST(&queue->tq_active);
+ if (tb_first != NULL &&
+ tb_first->tb_running == TB_DRAIN_WAITER)
+ wakeup(tb_first);
+ }
+}
+
+static int
+task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
+{
+ struct gtaskqueue_busy *tb;
+
+ TQ_ASSERT_LOCKED(queue);
+ TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
+ if (tb->tb_running == gtask)
+ return (1);
+ }
+ return (0);
+}
+
+static int
+gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
+{
+
+ if (gtask->ta_flags & TASK_ENQUEUED)
+ STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
+ gtask->ta_flags &= ~TASK_ENQUEUED;
+ return (task_is_running(queue, gtask) ? EBUSY : 0);
+}
+
+int
+gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
+{
+ int error;
+
+ TQ_LOCK(queue);
+ error = gtaskqueue_cancel_locked(queue, gtask);
+ TQ_UNLOCK(queue);
+
+ return (error);
+}
+
+void
+gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
+{
+
+ if (!queue->tq_spin)
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
+
+ TQ_LOCK(queue);
+ while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
+ TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
+ TQ_UNLOCK(queue);
+}
+
+void
+gtaskqueue_drain_all(struct gtaskqueue *queue)
+{
+
+ if (!queue->tq_spin)
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
+
+ TQ_LOCK(queue);
+ gtaskqueue_drain_tq_queue(queue);
+ gtaskqueue_drain_tq_active(queue);
+ TQ_UNLOCK(queue);
+}
+
+static int
+_gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
+ cpuset_t *mask, const char *name, va_list ap)
+{
+ char ktname[MAXCOMLEN + 1];
+ struct thread *td;
+ struct gtaskqueue *tq;
+ int i, error;
+
+ if (count <= 0)
+ return (EINVAL);
+
+ vsnprintf(ktname, sizeof(ktname), name, ap);
+ tq = *tqp;
+
+ tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
+ M_NOWAIT | M_ZERO);
+ if (tq->tq_threads == NULL) {
+ printf("%s: no memory for %s threads\n", __func__, ktname);
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (count == 1)
+ error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
+ &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
+ else
+ error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
+ &tq->tq_threads[i], RFSTOPPED, 0,
+ "%s_%d", ktname, i);
+ if (error) {
+ /* should be ok to continue, taskqueue_free will dtrt */
+ printf("%s: kthread_add(%s): error %d", __func__,
+ ktname, error);
+ tq->tq_threads[i] = NULL; /* paranoid */
+ } else
+ tq->tq_tcount++;
+ }
+ for (i = 0; i < count; i++) {
+ if (tq->tq_threads[i] == NULL)
+ continue;
+ td = tq->tq_threads[i];
+ if (mask) {
+ error = cpuset_setthread(td->td_tid, mask);
+ /*
+ * Failing to pin is rarely an actual fatal error;
+ * it'll just affect performance.
+ */
+ if (error)
+ printf("%s: curthread=%llu: can't pin; "
+ "error=%d\n",
+ __func__,
+ (unsigned long long) td->td_tid,
+ error);
+ }
+ thread_lock(td);
+ sched_prio(td, pri);
+ sched_add(td, SRQ_BORING);
+ thread_unlock(td);
+ }
+
+ return (0);
+}
+
+static int
+gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
+ const char *name, ...)
+{
+ va_list ap;
+ int error;
+
+ va_start(ap, name);
+ error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
+ va_end(ap);
+ return (error);
+}
+
+static inline void
+gtaskqueue_run_callback(struct gtaskqueue *tq,
+ enum taskqueue_callback_type cb_type)
+{
+ taskqueue_callback_fn tq_callback;
+
+ TQ_ASSERT_UNLOCKED(tq);
+ tq_callback = tq->tq_callbacks[cb_type];
+ if (tq_callback != NULL)
+ tq_callback(tq->tq_cb_contexts[cb_type]);
+}
+
+static void
+gtaskqueue_thread_loop(void *arg)
+{
+ struct gtaskqueue **tqp, *tq;
+
+ tqp = arg;
+ tq = *tqp;
+ gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
+ TQ_LOCK(tq);
+ while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
+ /* XXX ? */
+ gtaskqueue_run_locked(tq);
+ /*
+ * Because taskqueue_run() can drop tq_mutex, we need to
+ * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
+ * meantime, which means we missed a wakeup.
+ */
+ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
+ break;
+ TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
+ }
+ gtaskqueue_run_locked(tq);
+ /*
+ * This thread is on its way out, so just drop the lock temporarily
+ * in order to call the shutdown callback. This allows the callback
+ * to look at the taskqueue, even just before it dies.
+ */
+ TQ_UNLOCK(tq);
+ gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
+ TQ_LOCK(tq);
+
+ /* rendezvous with thread that asked us to terminate */
+ tq->tq_tcount--;
+ wakeup_one(tq->tq_threads);
+ TQ_UNLOCK(tq);
+ kthread_exit();
+}
+
+static void
+gtaskqueue_thread_enqueue(void *context)
+{
+ struct gtaskqueue **tqp, *tq;
+
+ tqp = context;
+ tq = *tqp;
+ wakeup_one(tq);
+}
+
+
+static struct gtaskqueue *
+gtaskqueue_create_fast(const char *name, int mflags,
+ taskqueue_enqueue_fn enqueue, void *context)
+{
+ return _gtaskqueue_create(name, mflags, enqueue, context,
+ MTX_SPIN, "fast_taskqueue");
+}
+
+
+struct taskqgroup_cpu {
+ LIST_HEAD(, grouptask) tgc_tasks;
+ struct gtaskqueue *tgc_taskq;
+ int tgc_cnt;
+ int tgc_cpu;
+};
+
+struct taskqgroup {
+ struct taskqgroup_cpu tqg_queue[MAXCPU];
+ struct mtx tqg_lock;
+ char * tqg_name;
+ int tqg_adjusting;
+ int tqg_stride;
+ int tqg_cnt;
+};
+
+struct taskq_bind_task {
+ struct gtask bt_task;
+ int bt_cpuid;
+};
+
+static void
+taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
+{
+ struct taskqgroup_cpu *qcpu;
+
+ qcpu = &qgroup->tqg_queue[idx];
+ LIST_INIT(&qcpu->tgc_tasks);
+ qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
+ taskqueue_thread_enqueue, &qcpu->tgc_taskq);
+ gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
+ "%s_%d", qgroup->tqg_name, idx);
+ qcpu->tgc_cpu = idx * qgroup->tqg_stride;
+}
+
+static void
+taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
+{
+
+ gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
+}
+
+/*
+ * Find the taskq with least # of tasks that doesn't currently have any
+ * other queues from the uniq identifier.
+ */
+static int
+taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
+{
+ struct grouptask *n;
+ int i, idx, mincnt;
+ int strict;
+
+ mtx_assert(&qgroup->tqg_lock, MA_OWNED);
+ if (qgroup->tqg_cnt == 0)
+ return (0);
+ idx = -1;
+ mincnt = INT_MAX;
+ /*
+ * Two passes; First scan for a queue with the least tasks that
+ * does not already service this uniq id. If that fails simply find
+ * the queue with the least total tasks;
+ */
+ for (strict = 1; mincnt == INT_MAX; strict = 0) {
+ for (i = 0; i < qgroup->tqg_cnt; i++) {
+ if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
+ continue;
+ if (strict) {
+ LIST_FOREACH(n,
+ &qgroup->tqg_queue[i].tgc_tasks, gt_list)
+ if (n->gt_uniq == uniq)
+ break;
+ if (n != NULL)
+ continue;
+ }
+ mincnt = qgroup->tqg_queue[i].tgc_cnt;
+ idx = i;
+ }
+ }
+ if (idx == -1)
+ panic("taskqgroup_find: Failed to pick a qid.");
+
+ return (idx);
+}
+
+void
+taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
+ void *uniq, int irq, char *name)
+{
+ cpuset_t mask;
+ int qid;
+
+ gtask->gt_uniq = uniq;
+ gtask->gt_name = name;
+ gtask->gt_irq = irq;
+ gtask->gt_cpu = -1;
+ mtx_lock(&qgroup->tqg_lock);
+ qid = taskqgroup_find(qgroup, uniq);
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ if (irq != -1 && smp_started) {
+ CPU_ZERO(&mask);
+ CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
+ mtx_unlock(&qgroup->tqg_lock);
+ intr_setaffinity(irq, &mask);
+ } else
+ mtx_unlock(&qgroup->tqg_lock);
+}
+
+int
+taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
+ void *uniq, int cpu, int irq, char *name)
+{
+ cpuset_t mask;
+ int i, qid;
+
+ qid = -1;
+ gtask->gt_uniq = uniq;
+ gtask->gt_name = name;
+ gtask->gt_irq = irq;
+ gtask->gt_cpu = cpu;
+ mtx_lock(&qgroup->tqg_lock);
+ if (smp_started) {
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
+ qid = i;
+ break;
+ }
+ if (qid == -1) {
+ mtx_unlock(&qgroup->tqg_lock);
+ return (EINVAL);
+ }
+ } else
+ qid = 0;
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ if (irq != -1 && smp_started) {
+ CPU_ZERO(&mask);
+ CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
+ mtx_unlock(&qgroup->tqg_lock);
+ intr_setaffinity(irq, &mask);
+ } else
+ mtx_unlock(&qgroup->tqg_lock);
+ return (0);
+}
+
+void
+taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+ int i;
+
+ mtx_lock(&qgroup->tqg_lock);
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
+ break;
+ if (i == qgroup->tqg_cnt)
+ panic("taskqgroup_detach: task not in group\n");
+ qgroup->tqg_queue[i].tgc_cnt--;
+ LIST_REMOVE(gtask, gt_list);
+ mtx_unlock(&qgroup->tqg_lock);
+ gtask->gt_taskqueue = NULL;
+}
+
+static void
+taskqgroup_binder(void *ctx)
+{
+ struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
+ cpuset_t mask;
+ int error;
+
+ CPU_ZERO(&mask);
+ CPU_SET(gtask->bt_cpuid, &mask);
+ error = cpuset_setthread(curthread->td_tid, &mask);
+ thread_lock(curthread);
+ sched_bind(curthread, gtask->bt_cpuid);
+ thread_unlock(curthread);
+
+ if (error)
+ printf("taskqgroup_binder: setaffinity failed: %d\n",
+ error);
+ free(gtask, M_DEVBUF);
+}
+
+static void
+taskqgroup_bind(struct taskqgroup *qgroup)
+{
+ struct taskq_bind_task *gtask;
+ int i;
+
+ /*
+ * Bind taskqueue threads to specific CPUs, if they have been assigned
+ * one.
+ */
+ for (i = 0; i < qgroup->tqg_cnt; i++) {
+ gtask = malloc(sizeof (*gtask), M_DEVBUF, M_NOWAIT);
+ GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask);
+ gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
+ grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
+ >ask->bt_task);
+ }
+}
+
+static int
+_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
+{
+ LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
+ cpuset_t mask;
+ struct grouptask *gtask;
+ int i, old_cnt, qid;
+
+ mtx_assert(&qgroup->tqg_lock, MA_OWNED);
+
+ if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
+ printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
+ cnt, stride, mp_ncpus, smp_started);
+ return (EINVAL);
+ }
+ if (qgroup->tqg_adjusting) {
+ printf("taskqgroup_adjust failed: adjusting\n");
+ return (EBUSY);
+ }
+ qgroup->tqg_adjusting = 1;
+ old_cnt = qgroup->tqg_cnt;
+ mtx_unlock(&qgroup->tqg_lock);
+ /*
+ * Set up queue for tasks added before boot.
+ */
+ if (old_cnt == 0) {
+ LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks,
+ grouptask, gt_list);
+ qgroup->tqg_queue[0].tgc_cnt = 0;
+ }
+
+ /*
+ * If new taskq threads have been added.
+ */
+ for (i = old_cnt; i < cnt; i++)
+ taskqgroup_cpu_create(qgroup, i);
+ mtx_lock(&qgroup->tqg_lock);
+ qgroup->tqg_cnt = cnt;
+ qgroup->tqg_stride = stride;
+
+ /*
+ * Adjust drivers to use new taskqs.
+ */
+ for (i = 0; i < old_cnt; i++) {
+ while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
+ LIST_REMOVE(gtask, gt_list);
+ qgroup->tqg_queue[i].tgc_cnt--;
+ LIST_INSERT_HEAD(>ask_head, gtask, gt_list);
+ }
+ }
+
+ while ((gtask = LIST_FIRST(>ask_head))) {
+ LIST_REMOVE(gtask, gt_list);
+ if (gtask->gt_cpu == -1)
+ qid = taskqgroup_find(qgroup, gtask->gt_uniq);
+ else {
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
+ qid = i;
+ break;
+ }
+ }
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
+ gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ }
+ /*
+ * Set new CPU and IRQ affinity
+ */
+ for (i = 0; i < cnt; i++) {
+ qgroup->tqg_queue[i].tgc_cpu = i * qgroup->tqg_stride;
+ CPU_ZERO(&mask);
+ CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
+ LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
+ if (gtask->gt_irq == -1)
+ continue;
+ intr_setaffinity(gtask->gt_irq, &mask);
+ }
+ }
+ mtx_unlock(&qgroup->tqg_lock);
+
+ /*
+ * If taskq thread count has been reduced.
+ */
+ for (i = cnt; i < old_cnt; i++)
+ taskqgroup_cpu_remove(qgroup, i);
+
+ mtx_lock(&qgroup->tqg_lock);
+ qgroup->tqg_adjusting = 0;
+
+ taskqgroup_bind(qgroup);
+
+ return (0);
+}
+
+int
+taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
+{
+ int error;
+
+ mtx_lock(&qgroup->tqg_lock);
+ error = _taskqgroup_adjust(qgroup, cpu, stride);
+ mtx_unlock(&qgroup->tqg_lock);
+
+ return (error);
+}
+
+struct taskqgroup *
+taskqgroup_create(char *name)
+{
+ struct taskqgroup *qgroup;
+
+ qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
+ mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
+ qgroup->tqg_name = name;
+ LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
+
+ return (qgroup);
+}
+
+void
+taskqgroup_destroy(struct taskqgroup *qgroup)
+{
+
+}
Index: head/sys/kern/subr_taskqueue.c
===================================================================
--- head/sys/kern/subr_taskqueue.c
+++ head/sys/kern/subr_taskqueue.c
@@ -261,22 +261,6 @@
}
int
-grouptaskqueue_enqueue(struct taskqueue *queue, struct task *task)
-{
- TQ_LOCK(queue);
- if (task->ta_pending) {
- TQ_UNLOCK(queue);
- return (0);
- }
- STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
- task->ta_pending = 1;
- TQ_UNLOCK(queue);
- if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
- queue->tq_enqueue(queue->tq_context);
- return (0);
-}
-
-int
taskqueue_enqueue(struct taskqueue *queue, struct task *task)
{
int res;
@@ -806,347 +790,3 @@
}
return (ret);
}
-
-struct taskqgroup_cpu {
- LIST_HEAD(, grouptask) tgc_tasks;
- struct taskqueue *tgc_taskq;
- int tgc_cnt;
- int tgc_cpu;
-};
-
-struct taskqgroup {
- struct taskqgroup_cpu tqg_queue[MAXCPU];
- struct mtx tqg_lock;
- char * tqg_name;
- int tqg_adjusting;
- int tqg_stride;
- int tqg_cnt;
-};
-
-struct taskq_bind_task {
- struct task bt_task;
- int bt_cpuid;
-};
-
-static void
-taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
-{
- struct taskqgroup_cpu *qcpu;
- int i, j;
-
- qcpu = &qgroup->tqg_queue[idx];
- LIST_INIT(&qcpu->tgc_tasks);
- qcpu->tgc_taskq = taskqueue_create_fast(NULL, M_WAITOK,
- taskqueue_thread_enqueue, &qcpu->tgc_taskq);
- taskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
- "%s_%d", qgroup->tqg_name, idx);
-
- for (i = CPU_FIRST(), j = 0; j < idx * qgroup->tqg_stride;
- j++, i = CPU_NEXT(i)) {
- /*
- * Wait: evaluate the idx * qgroup->tqg_stride'th CPU,
- * potentially wrapping the actual count
- */
- }
- qcpu->tgc_cpu = i;
-}
-
-static void
-taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
-{
-
- taskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
-}
-
-/*
- * Find the taskq with least # of tasks that doesn't currently have any
- * other queues from the uniq identifier.
- */
-static int
-taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
-{
- struct grouptask *n;
- int i, idx, mincnt;
- int strict;
-
- mtx_assert(&qgroup->tqg_lock, MA_OWNED);
- if (qgroup->tqg_cnt == 0)
- return (0);
- idx = -1;
- mincnt = INT_MAX;
- /*
- * Two passes; First scan for a queue with the least tasks that
- * does not already service this uniq id. If that fails simply find
- * the queue with the least total tasks;
- */
- for (strict = 1; mincnt == INT_MAX; strict = 0) {
- for (i = 0; i < qgroup->tqg_cnt; i++) {
- if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
- continue;
- if (strict) {
- LIST_FOREACH(n,
- &qgroup->tqg_queue[i].tgc_tasks, gt_list)
- if (n->gt_uniq == uniq)
- break;
- if (n != NULL)
- continue;
- }
- mincnt = qgroup->tqg_queue[i].tgc_cnt;
- idx = i;
- }
- }
- if (idx == -1)
- panic("taskqgroup_find: Failed to pick a qid.");
-
- return (idx);
-}
-
-void
-taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
- void *uniq, int irq, char *name)
-{
- cpuset_t mask;
- int qid;
-
- gtask->gt_uniq = uniq;
- gtask->gt_name = name;
- gtask->gt_irq = irq;
- gtask->gt_cpu = -1;
- mtx_lock(&qgroup->tqg_lock);
- qid = taskqgroup_find(qgroup, uniq);
- qgroup->tqg_queue[qid].tgc_cnt++;
- LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
- gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- if (irq != -1 && smp_started) {
- CPU_ZERO(&mask);
- CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
- mtx_unlock(&qgroup->tqg_lock);
- intr_setaffinity(irq, &mask);
- } else
- mtx_unlock(&qgroup->tqg_lock);
-}
-
-int
-taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
- void *uniq, int cpu, int irq, char *name)
-{
- cpuset_t mask;
- int i, qid;
-
- qid = -1;
- gtask->gt_uniq = uniq;
- gtask->gt_name = name;
- gtask->gt_irq = irq;
- gtask->gt_cpu = cpu;
- mtx_lock(&qgroup->tqg_lock);
- if (smp_started) {
- for (i = 0; i < qgroup->tqg_cnt; i++)
- if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
- qid = i;
- break;
- }
- if (qid == -1) {
- mtx_unlock(&qgroup->tqg_lock);
- return (EINVAL);
- }
- } else
- qid = 0;
- qgroup->tqg_queue[qid].tgc_cnt++;
- LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
- gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- if (irq != -1 && smp_started) {
- CPU_ZERO(&mask);
- CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
- mtx_unlock(&qgroup->tqg_lock);
- intr_setaffinity(irq, &mask);
- } else
- mtx_unlock(&qgroup->tqg_lock);
- return (0);
-}
-
-void
-taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
-{
- int i;
-
- mtx_lock(&qgroup->tqg_lock);
- for (i = 0; i < qgroup->tqg_cnt; i++)
- if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
- break;
- if (i == qgroup->tqg_cnt)
- panic("taskqgroup_detach: task not in group\n");
- qgroup->tqg_queue[i].tgc_cnt--;
- LIST_REMOVE(gtask, gt_list);
- mtx_unlock(&qgroup->tqg_lock);
- gtask->gt_taskqueue = NULL;
-}
-
-static void
-taskqgroup_binder(void *ctx, int pending)
-{
- struct taskq_bind_task *task = (struct taskq_bind_task *)ctx;
- cpuset_t mask;
- int error;
-
- CPU_ZERO(&mask);
- CPU_SET(task->bt_cpuid, &mask);
- error = cpuset_setthread(curthread->td_tid, &mask);
- thread_lock(curthread);
- sched_bind(curthread, task->bt_cpuid);
- thread_unlock(curthread);
-
- if (error)
- printf("taskqgroup_binder: setaffinity failed: %d\n",
- error);
- free(task, M_DEVBUF);
-}
-
-static void
-taskqgroup_bind(struct taskqgroup *qgroup)
-{
- struct taskq_bind_task *task;
- int i;
-
- /*
- * Bind taskqueue threads to specific CPUs, if they have been assigned
- * one.
- */
- for (i = 0; i < qgroup->tqg_cnt; i++) {
- task = malloc(sizeof (*task), M_DEVBUF, M_NOWAIT);
- TASK_INIT(&task->bt_task, 0, taskqgroup_binder, task);
- task->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
- taskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
- &task->bt_task);
- }
-}
-
-static int
-_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
-{
- LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
- cpuset_t mask;
- struct grouptask *gtask;
- int i, k, old_cnt, qid, cpu;
-
- mtx_assert(&qgroup->tqg_lock, MA_OWNED);
-
- if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
- printf("taskqgroup_adjust failed cnt: %d stride: %d "
- "mp_ncpus: %d smp_started: %d\n", cnt, stride, mp_ncpus,
- smp_started);
- return (EINVAL);
- }
- if (qgroup->tqg_adjusting) {
- printf("taskqgroup_adjust failed: adjusting\n");
- return (EBUSY);
- }
- qgroup->tqg_adjusting = 1;
- old_cnt = qgroup->tqg_cnt;
- mtx_unlock(&qgroup->tqg_lock);
- /*
- * Set up queue for tasks added before boot.
- */
- if (old_cnt == 0) {
- LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks,
- grouptask, gt_list);
- qgroup->tqg_queue[0].tgc_cnt = 0;
- }
-
- /*
- * If new taskq threads have been added.
- */
- for (i = old_cnt; i < cnt; i++)
- taskqgroup_cpu_create(qgroup, i);
- mtx_lock(&qgroup->tqg_lock);
- qgroup->tqg_cnt = cnt;
- qgroup->tqg_stride = stride;
-
- /*
- * Adjust drivers to use new taskqs.
- */
- for (i = 0; i < old_cnt; i++) {
- while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
- LIST_REMOVE(gtask, gt_list);
- qgroup->tqg_queue[i].tgc_cnt--;
- LIST_INSERT_HEAD(>ask_head, gtask, gt_list);
- }
- }
-
- while ((gtask = LIST_FIRST(>ask_head))) {
- LIST_REMOVE(gtask, gt_list);
- if (gtask->gt_cpu == -1)
- qid = taskqgroup_find(qgroup, gtask->gt_uniq);
- else {
- for (i = 0; i < qgroup->tqg_cnt; i++)
- if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
- qid = i;
- break;
- }
- }
- qgroup->tqg_queue[qid].tgc_cnt++;
- LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
- gt_list);
- gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
- }
- /*
- * Set new CPU and IRQ affinity
- */
- cpu = CPU_FIRST();
- for (i = 0; i < cnt; i++) {
- qgroup->tqg_queue[i].tgc_cpu = cpu;
- for (k = 0; k < qgroup->tqg_stride; k++)
- cpu = CPU_NEXT(cpu);
- CPU_ZERO(&mask);
- CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
- LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
- if (gtask->gt_irq == -1)
- continue;
- intr_setaffinity(gtask->gt_irq, &mask);
- }
- }
- mtx_unlock(&qgroup->tqg_lock);
-
- /*
- * If taskq thread count has been reduced.
- */
- for (i = cnt; i < old_cnt; i++)
- taskqgroup_cpu_remove(qgroup, i);
-
- mtx_lock(&qgroup->tqg_lock);
- qgroup->tqg_adjusting = 0;
-
- taskqgroup_bind(qgroup);
-
- return (0);
-}
-
-int
-taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
-{
- int error;
-
- mtx_lock(&qgroup->tqg_lock);
- error = _taskqgroup_adjust(qgroup, cpu, stride);
- mtx_unlock(&qgroup->tqg_lock);
-
- return (error);
-}
-
-struct taskqgroup *
-taskqgroup_create(char *name)
-{
- struct taskqgroup *qgroup;
-
- qgroup = malloc(sizeof(*qgroup), M_TASKQUEUE, M_WAITOK | M_ZERO);
- mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
- qgroup->tqg_name = name;
- LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
-
- return (qgroup);
-}
-
-void
-taskqgroup_destroy(struct taskqgroup *qgroup)
-{
-
-}
Index: head/sys/net/ifdi_if.m
===================================================================
--- head/sys/net/ifdi_if.m
+++ head/sys/net/ifdi_if.m
@@ -60,9 +60,10 @@
return (0);
}
- static void
+ static int
null_queue_intr_enable(if_ctx_t _ctx __unused, uint16_t _qid __unused)
{
+ return (ENOTSUP);
}
static void
@@ -194,7 +195,7 @@
if_ctx_t _ctx;
};
-METHOD void queue_intr_enable {
+METHOD int queue_intr_enable {
if_ctx_t _ctx;
uint16_t _qid;
} DEFAULT null_queue_intr_enable;
Index: head/sys/net/iflib.h
===================================================================
--- head/sys/net/iflib.h
+++ head/sys/net/iflib.h
@@ -35,6 +35,7 @@
#include <machine/bus.h>
#include <sys/bus_dma.h>
#include <sys/nv.h>
+#include <sys/gtaskqueue.h>
/*
@@ -63,12 +64,14 @@
typedef struct if_rxd_frag {
uint8_t irf_flid;
uint16_t irf_idx;
+ uint16_t irf_len;
} *if_rxd_frag_t;
typedef struct if_rxd_info {
/* set by iflib */
uint16_t iri_qsidx; /* qset index */
uint16_t iri_vtag; /* vlan tag - if flag set */
+ /* XXX redundant with the new irf_len field */
uint16_t iri_len; /* packet length */
uint16_t iri_cidx; /* consumer index of cq */
struct ifnet *iri_ifp; /* some drivers >1 interface per softc */
@@ -156,10 +159,11 @@
void (*ift_txd_flush) (void *, uint16_t, uint32_t);
int (*ift_txd_credits_update) (void *, uint16_t, uint32_t, bool);
- int (*ift_rxd_available) (void *, uint16_t qsidx, uint32_t pidx);
+ int (*ift_rxd_available) (void *, uint16_t qsidx, uint32_t pidx,
+ int budget);
int (*ift_rxd_pkt_get) (void *, if_rxd_info_t ri);
void (*ift_rxd_refill) (void * , uint16_t qsidx, uint8_t flidx, uint32_t pidx,
- uint64_t *paddrs, caddr_t *vaddrs, uint16_t count);
+ uint64_t *paddrs, caddr_t *vaddrs, uint16_t count, uint16_t buf_size);
void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, uint32_t pidx);
int (*ift_legacy_intr) (void *);
} *if_txrx_t;
@@ -170,11 +174,20 @@
int isc_ntxqsets;
int isc_msix_bar; /* can be model specific - initialize in attach_pre */
int isc_tx_nsegments; /* can be model specific - initialize in attach_pre */
+ int isc_ntxd[8];
+ int isc_nrxd[8];
+
+ uint32_t isc_txqsizes[8];
+ uint32_t isc_rxqsizes[8];
+ int isc_max_txqsets;
+ int isc_max_rxqsets;
int isc_tx_tso_segments_max;
int isc_tx_tso_size_max;
int isc_tx_tso_segsize_max;
int isc_rss_table_size;
int isc_rss_table_mask;
+ int isc_nrxqsets_max;
+ int isc_ntxqsets_max;
iflib_intr_mode_t isc_intr;
uint16_t isc_max_frame_size; /* set at init time by driver */
@@ -188,8 +201,6 @@
int isc_magic;
if_txrx_t isc_txrx;
driver_t *isc_driver;
- int isc_ntxd;
- int isc_nrxd;
int isc_nfl;
int isc_flags;
bus_size_t isc_q_align;
@@ -199,14 +210,11 @@
bus_size_t isc_rx_maxsegsize;
int isc_rx_nsegments;
int isc_rx_process_limit;
-
-
- uint32_t isc_txqsizes[8];
int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
- uint32_t isc_rxqsizes[8];
int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
int isc_admin_intrcnt; /* # of admin/link interrupts */
+
int isc_tx_reclaim_thresh;
/* fields necessary for probe */
@@ -215,6 +223,12 @@
/* optional function to transform the read values to match the table*/
void (*isc_parse_devinfo) (uint16_t *device_id, uint16_t *subvendor_id,
uint16_t *subdevice_id, uint16_t *rev_id);
+ int isc_nrxd_min[8];
+ int isc_nrxd_default[8];
+ int isc_nrxd_max[8];
+ int isc_ntxd_min[8];
+ int isc_ntxd_default[8];
+ int isc_ntxd_max[8];
};
typedef struct iflib_dma_info {
@@ -240,9 +254,9 @@
/*
- * Interface has a separate command queue
+ * Interface has a separate command queue for RX
*/
-#define IFLIB_HAS_CQ 0x1
+#define IFLIB_HAS_RXCQ 0x1
/*
* Driver has already allocated vectors
*/
@@ -252,6 +266,10 @@
* Interface is a virtual function
*/
#define IFLIB_IS_VF 0x4
+/*
+ * Interface has a separate command queue for TX
+ */
+#define IFLIB_HAS_TXCQ 0x8
/*
@@ -308,7 +326,10 @@
void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name);
void iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask,
- task_fn_t *fn, char *name);
+ gtask_fn_t *fn, char *name);
+
+void iflib_config_gtask_deinit(struct grouptask *gtask);
+
void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid);
@@ -317,7 +338,7 @@
void iflib_iov_intr_deferred(if_ctx_t ctx);
-void iflib_link_state_change(if_ctx_t ctx, int linkstate);
+void iflib_link_state_change(if_ctx_t ctx, int linkstate, uint64_t baudrate);
int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags);
void iflib_dma_free(iflib_dma_info_t dma);
Index: head/sys/net/iflib.c
===================================================================
--- head/sys/net/iflib.c
+++ head/sys/net/iflib.c
@@ -49,6 +49,7 @@
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/taskqueue.h>
+#include <sys/limits.h>
#include <net/if.h>
@@ -101,7 +102,6 @@
* Enable mbuf vectors for compressing long mbuf chains
*/
-
/*
* NB:
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
@@ -181,8 +181,10 @@
struct sysctl_oid *ifc_sysctl_node;
uint16_t ifc_sysctl_ntxqs;
uint16_t ifc_sysctl_nrxqs;
- uint16_t ifc_sysctl_ntxds;
- uint16_t ifc_sysctl_nrxds;
+ uint16_t ifc_sysctl_qs_eq_override;
+
+ uint16_t ifc_sysctl_ntxds[8];
+ uint16_t ifc_sysctl_nrxds[8];
struct if_txrx ifc_txrx;
#define isc_txd_encap ifc_txrx.ift_txd_encap
#define isc_txd_flush ifc_txrx.ift_txd_flush
@@ -294,10 +296,11 @@
#define IFLIB_RESTART_BUDGET 8
-#define IFC_LEGACY 0x1
-#define IFC_QFLUSH 0x2
-#define IFC_MULTISEG 0x4
-#define IFC_DMAR 0x8
+#define IFC_LEGACY 0x01
+#define IFC_QFLUSH 0x02
+#define IFC_MULTISEG 0x04
+#define IFC_DMAR 0x08
+#define IFC_SC_ALLOCATED 0x10
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
@@ -311,6 +314,7 @@
uint8_t ift_db_pending;
uint8_t ift_db_pending_queued;
uint8_t ift_npending;
+ uint8_t ift_br_offset;
/* implicit pad */
uint64_t ift_processed;
uint64_t ift_cleaned;
@@ -414,6 +418,7 @@
uint16_t ifr_cq_cidx;
uint16_t ifr_cq_pidx;
uint8_t ifr_cq_gen;
+ uint8_t ifr_fl_offset;
if_ctx_t ifr_ctx;
iflib_fl_t ifr_fl;
@@ -604,7 +609,7 @@
static void iflib_rx_structures_free(if_ctx_t ctx);
static int iflib_queues_alloc(if_ctx_t ctx);
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
-static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx);
+static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget);
static int iflib_qset_structures_setup(if_ctx_t ctx);
static int iflib_msix_init(if_ctx_t ctx);
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str);
@@ -875,7 +880,7 @@
for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) {
nic_i = fl->ifl_cidx;
nm_i = netmap_idx_n2k(kring, nic_i);
- avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i);
+ avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i, INT_MAX);
for (n = 0; avail > 0; n++, avail--) {
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
if (error)
@@ -930,7 +935,7 @@
/*
* XXX we should be batching this operation - TODO
*/
- ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1);
+ ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1, fl->ifl_buf_size);
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map,
BUS_DMASYNC_PREREAD);
nm_i = nm_next(nm_i, lim);
@@ -958,6 +963,7 @@
iflib_netmap_attach(if_ctx_t ctx)
{
struct netmap_adapter na;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
bzero(&na, sizeof(na));
@@ -966,8 +972,8 @@
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
- na.num_tx_desc = ctx->ifc_sctx->isc_ntxd;
- na.num_rx_desc = ctx->ifc_sctx->isc_ntxd;
+ na.num_tx_desc = scctx->isc_ntxd[0];
+ na.num_rx_desc = scctx->isc_nrxd[0];
na.nm_txsync = iflib_netmap_txsync;
na.nm_rxsync = iflib_netmap_rxsync;
na.nm_register = iflib_netmap_register;
@@ -986,7 +992,7 @@
if (slot == 0)
return;
- for (int i = 0; i < ctx->ifc_sctx->isc_ntxd; i++) {
+ for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
/*
* In netmap mode, set the map for the packet buffer.
@@ -1011,7 +1017,7 @@
if (slot == 0)
return;
sd = rxq->ifr_fl[0].ifl_sds;
- nrxd = ctx->ifc_sctx->isc_nrxd;
+ nrxd = ctx->ifc_softc_ctx.isc_nrxd[0];
for (int i = 0; i < nrxd; i++, sd++) {
int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i);
uint64_t paddr;
@@ -1021,7 +1027,7 @@
vaddr = addr = PNMB(na, slot + sj, &paddr);
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, sd->ifsd_map, addr);
/* Update descriptor and the cached value */
- ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1);
+ ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1, rxq->ifr_fl[0].ifl_buf_size);
}
/* preserve queue */
if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) {
@@ -1236,7 +1242,8 @@
nsegments = scctx->isc_tx_nsegments;
ntsosegments = scctx->isc_tx_tso_segments_max;
- MPASS(sctx->isc_ntxd > 0);
+ MPASS(scctx->isc_ntxd[0] > 0);
+ MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
MPASS(nsegments > 0);
MPASS(ntsosegments > 0);
/*
@@ -1259,13 +1266,11 @@
sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize);
goto fail;
}
-#ifdef INVARIANTS
+#ifdef IFLIB_DIAGNOSTICS
device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n",
sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize);
+
#endif
- device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n",
- scctx->isc_tx_tso_size_max, ntsosegments,
- scctx->isc_tx_tso_segsize_max);
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@@ -1282,21 +1287,21 @@
goto fail;
}
-#ifdef INVARIANTS
+#ifdef IFLIB_DIAGNOSTICS
device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n",
scctx->isc_tx_tso_size_max, ntsosegments,
scctx->isc_tx_tso_segsize_max);
#endif
if (!(txq->ift_sds.ifsd_flags =
(uint8_t *) malloc(sizeof(uint8_t) *
- sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(txq->ift_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
- sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
@@ -1308,13 +1313,13 @@
return (0);
if (!(txq->ift_sds.ifsd_map =
- (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
- for (int i = 0; i < sctx->isc_ntxd; i++) {
+ for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
@@ -1348,9 +1353,8 @@
iflib_txq_destroy(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
- if_shared_ctx_t sctx = ctx->ifc_sctx;
- for (int i = 0; i < sctx->isc_ntxd; i++)
+ for (int i = 0; i < txq->ift_size; i++)
iflib_txsd_destroy(ctx, txq, i);
if (txq->ift_sds.ifsd_map != NULL) {
free(txq->ift_sds.ifsd_map, M_IFLIB);
@@ -1390,7 +1394,7 @@
bus_dmamap_unload(txq->ift_desc_tag,
txq->ift_sds.ifsd_map[i]);
}
- m_freem(*mp);
+ m_free(*mp);
DBG_COUNTER_INC(tx_frees);
*mp = NULL;
}
@@ -1399,7 +1403,7 @@
iflib_txq_setup(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
- if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
iflib_dma_info_t di;
int i;
@@ -1408,7 +1412,7 @@
/* Reset indices */
txq->ift_cidx_processed = txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
- txq->ift_size = sctx->isc_ntxd;
+ txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
@@ -1433,22 +1437,25 @@
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
iflib_fl_t fl;
iflib_rxsd_t rxsd;
int err;
- MPASS(sctx->isc_nrxd > 0);
+ MPASS(scctx->isc_nrxd[0] > 0);
+ MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_sds = malloc(sizeof(struct iflib_sw_rx_desc) *
- sctx->isc_nrxd, M_IFLIB, M_WAITOK | M_ZERO);
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB,
+ M_WAITOK | M_ZERO);
if (fl->ifl_sds == NULL) {
device_printf(dev, "Unable to allocate rx sw desc memory\n");
return (ENOMEM);
}
- fl->ifl_size = sctx->isc_nrxd; /* this isn't necessarily the same */
+ fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@@ -1468,7 +1475,7 @@
}
rxsd = fl->ifl_sds;
- for (int i = 0; i < sctx->isc_nrxd; i++, rxsd++) {
+ for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++, rxsd++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &rxsd->ifsd_map);
if (err) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
@@ -1626,7 +1633,7 @@
}
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx,
- fl->ifl_bus_addrs, fl->ifl_vm_addrs, i);
+ fl->ifl_bus_addrs, fl->ifl_vm_addrs, i, fl->ifl_buf_size);
i = 0;
pidx = fl->ifl_pidx;
}
@@ -1854,7 +1861,11 @@
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
iflib_netmap_rxq_init(ctx, rxq);
}
+#ifdef INVARIANTS
+ i = if_getdrvflags(ifp);
+#endif
IFDI_INIT(ctx);
+ MPASS(if_getdrvflags(ifp) == i);
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
if (iflib_fl_setup(fl)) {
@@ -1902,7 +1913,6 @@
iflib_txq_t txq = ctx->ifc_txqs;
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
- if_shared_ctx_t sctx = ctx->ifc_sctx;
iflib_dma_info_t di;
iflib_fl_t fl;
int i, j;
@@ -1920,7 +1930,7 @@
/* clean any enqueued buffers */
iflib_txq_check_drain(txq, 0);
/* Free any existing tx buffers. */
- for (j = 0; j < sctx->isc_ntxd; j++) {
+ for (j = 0; j < txq->ift_size; j++) {
iflib_txsd_free(ctx, txq, j);
}
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
@@ -1990,13 +2000,24 @@
caddr_t cl;
i = 0;
+ mh = NULL;
do {
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE);
MPASS(sd->ifsd_cl != NULL);
MPASS(sd->ifsd_m != NULL);
+
+ /* Don't include zero-length frags */
+ if (ri->iri_frags[i].irf_len == 0) {
+ /* XXX we can save the cluster here, but not the mbuf */
+ m_init(sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
+ m_free(sd->ifsd_m);
+ sd->ifsd_m = NULL;
+ continue;
+ }
+
m = sd->ifsd_m;
- if (i == 0) {
+ if (mh == NULL) {
flags = M_PKTHDR|M_EXT;
mh = mt = m;
padlen = ri->iri_pad;
@@ -2019,14 +2040,12 @@
*/
m->m_data += padlen;
ri->iri_len -= padlen;
- m->m_len = ri->iri_len;
+ m->m_len = ri->iri_frags[i].irf_len;
} while (++i < ri->iri_nfrags);
return (mh);
}
-
-
/*
* Process one software descriptor
*/
@@ -2037,13 +2056,14 @@
iflib_rxsd_t sd;
/* should I merge this back in now that the two paths are basically duplicated? */
- if (ri->iri_len <= IFLIB_RX_COPY_THRESH) {
+ if (ri->iri_nfrags == 1 &&
+ ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) {
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE);
m = sd->ifsd_m;
sd->ifsd_m = NULL;
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
memcpy(m->m_data, sd->ifsd_cl, ri->iri_len);
- m->m_len = ri->iri_len;
+ m->m_len = ri->iri_frags[0].irf_len;
} else {
m = assemble_segments(rxq, ri);
}
@@ -2063,13 +2083,13 @@
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int avail, i;
uint16_t *cidxp;
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
struct ifnet *ifp;
- struct lro_entry *queued;
int lro_enabled;
/*
* XXX early demux data packets so that if_input processing only handles
@@ -2084,11 +2104,11 @@
mh = mt = NULL;
MPASS(budget > 0);
rx_pkts = rx_bytes = 0;
- if (sctx->isc_flags & IFLIB_HAS_CQ)
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ)
cidxp = &rxq->ifr_cq_cidx;
else
cidxp = &rxq->ifr_fl[0].ifl_cidx;
- if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp)) == 0) {
+ if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
__iflib_fl_refill_lt(ctx, fl, budget + 8);
DBG_COUNTER_INC(rx_unavail);
@@ -2112,10 +2132,11 @@
/* in lieu of handling correctly - make sure it isn't being unhandled */
MPASS(err == 0);
- if (sctx->isc_flags & IFLIB_HAS_CQ) {
- /* we know we consumed _one_ CQ entry */
- if (++rxq->ifr_cq_cidx == sctx->isc_nrxd) {
- rxq->ifr_cq_cidx = 0;
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
+ *cidxp = ri.iri_cidx;
+ /* Update our consumer index */
+ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
+ rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
rxq->ifr_cq_gen = 0;
}
/* was this only a completion queue message? */
@@ -2128,7 +2149,7 @@
/* will advance the cidx on the corresponding free lists */
m = iflib_rxd_pkt_get(rxq, &ri);
if (avail == 0 && budget_left)
- avail = iflib_rxd_avail(ctx, rxq, *cidxp);
+ avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
if (__predict_false(m == NULL)) {
DBG_COUNTER_INC(rx_mbuf_null);
@@ -2148,7 +2169,6 @@
ifp = ctx->ifc_ifp;
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
-
while (mh != NULL) {
m = mh;
mh = mh->m_nextpkt;
@@ -2162,32 +2182,33 @@
DBG_COUNTER_INC(rx_if_input);
ifp->if_input(ifp, m);
}
+
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
/*
* Flush any outstanding LRO work
*/
- while ((queued = LIST_FIRST(&rxq->ifr_lc.lro_active)) != NULL) {
- LIST_REMOVE(queued, next);
#if defined(INET6) || defined(INET)
- tcp_lro_flush(&rxq->ifr_lc, queued);
+ tcp_lro_flush_all(&rxq->ifr_lc);
#endif
- }
- return (iflib_rxd_avail(ctx, rxq, *cidxp));
+ if (avail)
+ return true;
+ return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
}
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
-#define TXQ_MAX_DB_DEFERRED(ctx) (ctx->ifc_sctx->isc_ntxd >> 5)
-#define TXQ_MAX_DB_CONSUMED(ctx) (ctx->ifc_sctx->isc_ntxd >> 4)
+#define TXQ_MAX_DB_DEFERRED(size) (size >> 5)
+#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
static __inline void
iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring)
{
uint32_t dbval;
- if (ring || txq->ift_db_pending >= TXQ_MAX_DB_DEFERRED(ctx)) {
+ if (ring || txq->ift_db_pending >=
+ TXQ_MAX_DB_DEFERRED(txq->ift_size)) {
/* the lock will only ever be contended in the !min_latency case */
if (!TXDB_TRYLOCK(txq))
@@ -2233,9 +2254,9 @@
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
{
struct ether_vlan_header *eh;
- struct mbuf *m;
+ struct mbuf *m, *n;
- m = *mp;
+ n = m = *mp;
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
@@ -2261,7 +2282,6 @@
{
struct ip *ip = NULL;
struct tcphdr *th = NULL;
- struct mbuf *n;
int minthlen;
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
@@ -2403,37 +2423,31 @@
/*
* If dodgy hardware rejects the scatter gather chain we've handed it
- * we'll need to rebuild the mbuf chain before we can call m_defrag
+ * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
+ * m_defrag'd mbufs
*/
static __noinline struct mbuf *
-iflib_rebuild_mbuf(iflib_txq_t txq)
+iflib_remove_mbuf(iflib_txq_t txq)
{
-
- int ntxd, mhlen, len, i, pidx;
+ int ntxd, i, pidx;
struct mbuf *m, *mh, **ifsd_m;
- if_shared_ctx_t sctx;
pidx = txq->ift_pidx;
ifsd_m = txq->ift_sds.ifsd_m;
- sctx = txq->ift_ctx->ifc_sctx;
- ntxd = sctx->isc_ntxd;
+ ntxd = txq->ift_size;
mh = m = ifsd_m[pidx];
ifsd_m[pidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
- len = m->m_len;
- mhlen = m->m_pkthdr.len;
i = 1;
- while (len < mhlen && (m->m_next == NULL)) {
- m->m_next = ifsd_m[(pidx + i) & (ntxd-1)];
+ while (m) {
ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
m = m->m_next;
- len += m->m_len;
i++;
}
return (mh);
@@ -2446,6 +2460,7 @@
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
+ if_softc_ctx_t scctx;
int i, next, pidx, mask, err, maxsegsz, ntxd, count;
struct mbuf *m, *tmp, **ifsd_m, **mp;
@@ -2459,8 +2474,9 @@
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
+ scctx = &ctx->ifc_softc_ctx;
ifsd_m = txq->ift_sds.ifsd_m;
- ntxd = sctx->isc_ntxd;
+ ntxd = txq->ift_size;
pidx = txq->ift_pidx;
if (map != NULL) {
uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
@@ -2472,13 +2488,12 @@
ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
i = 0;
next = pidx;
- mask = (sctx->isc_ntxd-1);
+ mask = (txq->ift_size-1);
m = *m0;
do {
mp = &ifsd_m[next];
*mp = m;
m = m->m_next;
- (*mp)->m_next = NULL;
if (__predict_false((*mp)->m_len == 0)) {
m_free(*mp);
*mp = NULL;
@@ -2529,13 +2544,12 @@
count++;
tmp = m;
m = m->m_next;
- tmp->m_next = NULL;
} while (m != NULL);
*nsegs = i;
}
return (0);
err:
- *m0 = iflib_rebuild_mbuf(txq);
+ *m0 = iflib_remove_mbuf(txq);
return (EFBIG);
}
@@ -2558,7 +2572,7 @@
sctx = ctx->ifc_sctx;
scctx = &ctx->ifc_softc_ctx;
segs = txq->ift_segs;
- ntxd = sctx->isc_ntxd;
+ ntxd = txq->ift_size;
m_head = *m_headp;
map = NULL;
@@ -2645,14 +2659,14 @@
if (map != NULL)
bus_dmamap_unload(desc_tag, map);
DBG_COUNTER_INC(encap_txq_avail_fail);
- if (txq->ift_task.gt_task.ta_pending == 0)
+ if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
GROUPTASK_ENQUEUE(&txq->ift_task);
return (ENOBUFS);
}
pi.ipi_segs = segs;
pi.ipi_nsegs = nsegs;
- MPASS(pidx >= 0 && pidx < sctx->isc_ntxd);
+ MPASS(pidx >= 0 && pidx < txq->ift_size);
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
@@ -2661,11 +2675,12 @@
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
DBG_COUNTER_INC(tx_encap);
- MPASS(pi.ipi_new_pidx >= 0 && pi.ipi_new_pidx < sctx->isc_ntxd);
+ MPASS(pi.ipi_new_pidx >= 0 &&
+ pi.ipi_new_pidx < txq->ift_size);
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
if (pi.ipi_new_pidx < pi.ipi_pidx) {
- ndesc += sctx->isc_ntxd;
+ ndesc += txq->ift_size;
txq->ift_gen = 1;
}
MPASS(pi.ipi_new_pidx != pidx);
@@ -2678,7 +2693,7 @@
txq->ift_pidx = pi.ipi_new_pidx;
txq->ift_npending += pi.ipi_ndescs;
} else if (__predict_false(err == EFBIG && remap < 2)) {
- *m_headp = m_head = iflib_rebuild_mbuf(txq);
+ *m_headp = m_head = iflib_remove_mbuf(txq);
remap = 1;
txq->ift_txd_encap_efbig++;
goto defrag;
@@ -2700,7 +2715,7 @@
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
-#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NRXQSETS(ctx)) + FIRST_QSET(ctx))
+#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
@@ -2712,7 +2727,7 @@
*
* ORing with 2 assures that min occupancy is never less than 2 without any conditional logic
*/
-#define TXQ_MIN_OCCUPANCY(ctx) ((ctx->ifc_sctx->isc_ntxd >> 6)| 0x2)
+#define TXQ_MIN_OCCUPANCY(size) ((size >> 6)| 0x2)
static inline int
iflib_txq_min_occupancy(iflib_txq_t txq)
@@ -2720,7 +2735,9 @@
if_ctx_t ctx;
ctx = txq->ift_ctx;
- return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen) < TXQ_MIN_OCCUPANCY(ctx) + MAX_TX_DESC(ctx));
+ return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx,
+ txq->ift_gen) < TXQ_MIN_OCCUPANCY(txq->ift_size) +
+ MAX_TX_DESC(ctx));
}
static void
@@ -2734,7 +2751,7 @@
cidx = txq->ift_cidx;
gen = txq->ift_gen;
- qsize = txq->ift_ctx->ifc_sctx->isc_ntxd;
+ qsize = txq->ift_size;
mask = qsize-1;
hasmap = txq->ift_sds.ifsd_map != NULL;
ifsd_flags = txq->ift_sds.ifsd_flags;
@@ -2760,7 +2777,7 @@
/* XXX we don't support any drivers that batch packets yet */
MPASS(m->m_nextpkt == NULL);
- m_freem(m);
+ m_free(m);
ifsd_m[cidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
@@ -2856,7 +2873,7 @@
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
DBG_COUNTER_INC(txq_drain_flushing);
for (i = 0; i < avail; i++) {
- m_freem(r->items[(cidx + i) & (r->size-1)]);
+ m_free(r->items[(cidx + i) & (r->size-1)]);
r->items[(cidx + i) & (r->size-1)] = NULL;
}
return (avail);
@@ -2903,7 +2920,7 @@
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
break;
- if (desc_used > TXQ_MAX_DB_CONSUMED(ctx))
+ if (desc_used > TXQ_MAX_DB_CONSUMED(txq->ift_size))
break;
}
@@ -2924,7 +2941,7 @@
}
static void
-_task_fn_tx(void *context, int pending)
+_task_fn_tx(void *context)
{
iflib_txq_t txq = context;
if_ctx_t ctx = txq->ift_ctx;
@@ -2935,11 +2952,12 @@
}
static void
-_task_fn_rx(void *context, int pending)
+_task_fn_rx(void *context)
{
iflib_rxq_t rxq = context;
if_ctx_t ctx = rxq->ifr_ctx;
bool more;
+ int rc;
DBG_COUNTER_INC(task_fn_rxs);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
@@ -2950,7 +2968,8 @@
IFDI_INTR_ENABLE(ctx);
else {
DBG_COUNTER_INC(rx_intr_enables);
- IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ rc = IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
}
}
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
@@ -2960,7 +2979,7 @@
}
static void
-_task_fn_admin(void *context, int pending)
+_task_fn_admin(void *context)
{
if_ctx_t ctx = context;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
@@ -2990,7 +3009,7 @@
static void
-_task_fn_iov(void *context, int pending)
+_task_fn_iov(void *context)
{
if_ctx_t ctx = context;
@@ -3049,8 +3068,7 @@
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq;
- struct mbuf *marr[8], **mp, *next;
- int err, i, count, qidx;
+ int err, qidx;
if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(tx_frees);
@@ -3058,6 +3076,7 @@
return (0);
}
+ MPASS(m->m_nextpkt == NULL);
qidx = 0;
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
qidx = QIDX(ctx, m);
@@ -3077,6 +3096,7 @@
return (ENOBUFS);
}
#endif
+#ifdef notyet
qidx = count = 0;
mp = marr;
next = m;
@@ -3098,22 +3118,21 @@
next = next->m_nextpkt;
mp[i]->m_nextpkt = NULL;
}
+#endif
DBG_COUNTER_INC(tx_seen);
- err = ifmp_ring_enqueue(txq->ift_br[0], (void **)mp, count, TX_BATCH_SIZE);
+ err = ifmp_ring_enqueue(txq->ift_br[0], (void **)&m, 1, TX_BATCH_SIZE);
- if (iflib_txq_can_drain(txq->ift_br[0]))
- GROUPTASK_ENQUEUE(&txq->ift_task);
if (err) {
+ GROUPTASK_ENQUEUE(&txq->ift_task);
/* support forthcoming later */
#ifdef DRIVER_BACKPRESSURE
txq->ift_closed = TRUE;
#endif
- for (i = 0; i < count; i++)
- m_freem(mp[i]);
ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE);
+ m_freem(m);
+ } else if (TXQ_AVAIL(txq) < (txq->ift_size >> 1)) {
+ GROUPTASK_ENQUEUE(&txq->ift_task);
}
- if (count > nitems(marr))
- free(mp, M_IFLIB);
return (err);
}
@@ -3138,13 +3157,13 @@
if_qflush(ifp);
}
-#define IFCAP_REINIT (IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_TSO6|IFCAP_VLAN_HWTAGGING|IFCAP_VLAN_MTU | \
- IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
-#define IFCAP_FLAGS (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
+#define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | \
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
+#define IFCAP_REINIT IFCAP_FLAGS
+
static int
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
{
@@ -3428,6 +3447,9 @@
if_ctx_t ctx;
if_t ifp;
if_softc_ctx_t scctx;
+ int i;
+ uint16_t main_txq;
+ uint16_t main_rxq;
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
@@ -3435,6 +3457,7 @@
if (sc == NULL) {
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
device_set_softc(dev, ctx);
+ ctx->ifc_flags |= IFC_SC_ALLOCATED;
}
ctx->ifc_sctx = sctx;
@@ -3447,28 +3470,112 @@
return (err);
}
iflib_add_device_sysctl_pre(ctx);
+
+ scctx = &ctx->ifc_softc_ctx;
+ /*
+ * XXX sanity check that ntxd & nrxd are a power of 2
+ */
+ if (ctx->ifc_sysctl_ntxqs != 0)
+ scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
+ if (ctx->ifc_sysctl_nrxqs != 0)
+ scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
+
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ if (ctx->ifc_sysctl_ntxds[i] != 0)
+ scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
+ else
+ scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
+ }
+
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ if (ctx->ifc_sysctl_nrxds[i] != 0)
+ scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
+ else
+ scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
+ }
+
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
+ device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
+ i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
+ scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
+ }
+ if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
+ device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
+ i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
+ scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
+ }
+ }
+
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
+ device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
+ i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
+ scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
+ }
+ if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
+ device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
+ i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
+ scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
+ }
+ }
+
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
return (err);
}
+ if (scctx->isc_ntxqsets_max)
+ scctx->isc_ntxqsets = min(scctx->isc_ntxqsets, scctx->isc_ntxqsets_max);
+ if (scctx->isc_nrxqsets_max)
+ scctx->isc_nrxqsets = min(scctx->isc_nrxqsets, scctx->isc_nrxqsets_max);
+
#ifdef ACPI_DMAR
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
ctx->ifc_flags |= IFC_DMAR;
#endif
- scctx = &ctx->ifc_softc_ctx;
msix_bar = scctx->isc_msix_bar;
- if (scctx->isc_tx_nsegments > sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION)
- scctx->isc_tx_nsegments = max(1, sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION);
- if (scctx->isc_tx_tso_segments_max > sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION)
- scctx->isc_tx_tso_segments_max = max(1, sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION);
-
ifp = ctx->ifc_ifp;
- /*
- * XXX sanity check that ntxd & nrxd are a power of 2
- */
+ if(sctx->isc_flags & IFLIB_HAS_TXCQ)
+ main_txq = 1;
+ else
+ main_txq = 0;
+
+ if(sctx->isc_flags & IFLIB_HAS_RXCQ)
+ main_rxq = 1;
+ else
+ main_rxq = 0;
+
+ /* XXX change for per-queue sizes */
+ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
+ scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ if (!powerof2(scctx->isc_nrxd[i])) {
+ /* round down instead? */
+ device_printf(dev, "# rx descriptors must be a power of 2\n");
+ err = EINVAL;
+ goto fail;
+ }
+ }
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ if (!powerof2(scctx->isc_ntxd[i])) {
+ device_printf(dev,
+ "# tx descriptors must be a power of 2");
+ err = EINVAL;
+ goto fail;
+ }
+ }
+
+ if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
+ MAX_SINGLE_PACKET_FRACTION)
+ scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
+ MAX_SINGLE_PACKET_FRACTION);
+ if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
+ MAX_SINGLE_PACKET_FRACTION)
+ scctx->isc_tx_tso_segments_max = max(1,
+ scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
/*
* Protect the stack against modern hardware
@@ -3482,7 +3589,7 @@
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
if (scctx->isc_rss_table_size == 0)
scctx->isc_rss_table_size = 64;
- scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;;
+ scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
/*
** Now setup MSI or MSI/X, should
** return us the number of supported
@@ -3520,7 +3627,7 @@
MPASS(msix == 1);
rid = 1;
}
- if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx, &rid, "irq0")) != 0) {
+ if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
device_printf(dev, "iflib_legacy_setup failed %d\n", err);
goto fail_intr_free;
}
@@ -3536,6 +3643,7 @@
}
*ctxp = ctx;
+ if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
iflib_add_device_sysctl_post(ctx);
return (0);
fail_detach:
@@ -3599,7 +3707,7 @@
led_destroy(ctx->ifc_led_dev);
/* XXX drain any dependent tasks */
tqg = qgroup_if_io_tqg;
- for (txq = ctx->ifc_txqs, i = 0, rxq = ctx->ifc_rxqs; i < NTXQSETS(ctx); i++, txq++) {
+ for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
callout_drain(&txq->ift_timer);
callout_drain(&txq->ift_db_check);
if (txq->ift_task.gt_uniq != NULL)
@@ -3616,6 +3724,7 @@
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
IFDI_DETACH(ctx);
+ device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
pci_release_msi(dev);
}
@@ -3633,6 +3742,9 @@
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
+ if (ctx->ifc_flags & IFC_SC_ALLOCATED)
+ free(ctx->ifc_softc, M_IFLIB);
+ free(ctx, M_IFLIB);
return (0);
}
@@ -3782,7 +3894,13 @@
MPASS(sctx->isc_txrx->ift_rxd_pkt_get);
MPASS(sctx->isc_txrx->ift_rxd_refill);
MPASS(sctx->isc_txrx->ift_rxd_flush);
- MPASS(sctx->isc_nrxd);
+
+ MPASS(sctx->isc_nrxd_min[0]);
+ MPASS(sctx->isc_nrxd_max[0]);
+ MPASS(sctx->isc_nrxd_default[0]);
+ MPASS(sctx->isc_ntxd_min[0]);
+ MPASS(sctx->isc_ntxd_max[0]);
+ MPASS(sctx->isc_ntxd_default[0]);
}
static int
@@ -3796,7 +3914,6 @@
_iflib_assert(sctx);
CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
- MPASS(ctx->ifc_flags == 0);
ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
if (ifp == NULL) {
@@ -3818,7 +3935,6 @@
if_setioctlfn(ifp, iflib_if_ioctl);
if_settransmitfn(ifp, iflib_if_transmit);
if_setqflushfn(ifp, iflib_if_qflush);
- if_setgetcounterfn(ifp, iflib_if_get_counter);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setcapabilities(ifp, 0);
@@ -3842,16 +3958,17 @@
iflib_queues_alloc(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
- int nrxqsets = ctx->ifc_softc_ctx.isc_nrxqsets;
- int ntxqsets = ctx->ifc_softc_ctx.isc_ntxqsets;
+ int nrxqsets = scctx->isc_nrxqsets;
+ int ntxqsets = scctx->isc_ntxqsets;
iflib_txq_t txq;
iflib_rxq_t rxq;
iflib_fl_t fl = NULL;
- int i, j, cpu, err, txconf, rxconf, fl_ifdi_offset;
+ int i, j, cpu, err, txconf, rxconf;
iflib_dma_info_t ifdip;
- uint32_t *rxqsizes = sctx->isc_rxqsizes;
- uint32_t *txqsizes = sctx->isc_txqsizes;
+ uint32_t *rxqsizes = scctx->isc_rxqsizes;
+ uint32_t *txqsizes = scctx->isc_txqsizes;
uint8_t nrxqs = sctx->isc_nrxqs;
uint8_t ntxqs = sctx->isc_ntxqs;
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
@@ -3860,10 +3977,11 @@
struct ifmp_ring **brscp;
int nbuf_rings = 1; /* XXX determine dynamically */
- KASSERT(ntxqs > 0, ("number of queues must be at least 1"));
- KASSERT(nrxqs > 0, ("number of queues must be at least 1"));
+ KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
+ KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
brscp = NULL;
+ txq = NULL;
rxq = NULL;
/* Allocate the TX ring struct memory */
@@ -3891,8 +4009,6 @@
ctx->ifc_txqs = txq;
ctx->ifc_rxqs = rxq;
- txq = NULL;
- rxq = NULL;
/*
* XXX handle allocation failure
@@ -3916,6 +4032,11 @@
}
txq->ift_ctx = ctx;
txq->ift_id = i;
+ if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
+ txq->ift_br_offset = 1;
+ } else {
+ txq->ift_br_offset = 0;
+ }
/* XXX fix this */
txq->ift_timer.c_cpu = cpu;
txq->ift_db_check.c_cpu = cpu;
@@ -3970,10 +4091,10 @@
}
rxq->ifr_ctx = ctx;
rxq->ifr_id = i;
- if (sctx->isc_flags & IFLIB_HAS_CQ) {
- fl_ifdi_offset = 1;
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
+ rxq->ifr_fl_offset = 1;
} else {
- fl_ifdi_offset = 0;
+ rxq->ifr_fl_offset = 0;
}
rxq->ifr_nfl = nfree_lists;
if (!(fl =
@@ -3986,7 +4107,8 @@
for (j = 0; j < nfree_lists; j++) {
rxq->ifr_fl[j].ifl_rxq = rxq;
rxq->ifr_fl[j].ifl_id = j;
- rxq->ifr_fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + fl_ifdi_offset];
+ rxq->ifr_fl[j].ifl_ifdi =
+ &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
}
/* Allocate receive buffers for the ring*/
if (iflib_rxsd_alloc(rxq)) {
@@ -4106,12 +4228,13 @@
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
#if defined(INET6) || defined(INET)
tcp_lro_free(&rxq->ifr_lc);
- if ((err = tcp_lro_init(&rxq->ifr_lc)) != 0) {
+ if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
+ TCP_LRO_ENTRIES, min(1024,
+ ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
goto fail;
}
rxq->ifr_lro_enabled = TRUE;
- rxq->ifr_lc.ifp = ctx->ifc_ifp;
#endif
IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
}
@@ -4142,7 +4265,7 @@
{
iflib_rxq_t rxq = ctx->ifc_rxqs;
- for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, rxq++) {
+ for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
iflib_rx_sds_free(rxq);
}
}
@@ -4193,7 +4316,7 @@
struct taskqgroup *tqg;
iflib_filter_info_t info;
cpuset_t cpus;
- task_fn_t *fn;
+ gtask_fn_t *fn;
int tqrid, err;
void *q;
@@ -4254,7 +4377,7 @@
{
struct grouptask *gtask;
struct taskqgroup *tqg;
- task_fn_t *fn;
+ gtask_fn_t *fn;
void *q;
switch (type) {
@@ -4310,7 +4433,7 @@
iflib_filter_info_t info;
struct grouptask *gtask;
struct taskqgroup *tqg;
- task_fn_t *fn;
+ gtask_fn_t *fn;
int tqrid;
void *q;
int err;
@@ -4385,7 +4508,7 @@
}
void
-iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, task_fn_t *fn,
+iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn,
char *name)
{
@@ -4394,14 +4517,21 @@
}
void
-iflib_link_state_change(if_ctx_t ctx, int link_state)
+iflib_config_gtask_deinit(struct grouptask *gtask)
+{
+
+ taskqgroup_detach(qgroup_if_config_tqg, gtask);
+}
+
+void
+iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
{
if_t ifp = ctx->ifc_ifp;
iflib_txq_t txq = ctx->ifc_txqs;
-#if 0
+
if_setbaudrate(ifp, baudrate);
-#endif
+
/* If link down, disable watchdog */
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
@@ -4431,10 +4561,11 @@
}
static int
-iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx)
+iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget)
{
- return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx));
+ return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
+ budget));
}
void
@@ -4468,8 +4599,9 @@
int iflib_num_tx_queues, iflib_num_rx_queues;
int err, admincnt, bar;
- iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
- iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
+ iflib_num_tx_queues = scctx->isc_ntxqsets;
+ iflib_num_rx_queues = scctx->isc_nrxqsets;
+
bar = ctx->ifc_softc_ctx.isc_msix_bar;
admincnt = sctx->isc_admin_intrcnt;
/* Override by tuneable */
@@ -4549,18 +4681,31 @@
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
- if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queues)
- queues = rx_queues = iflib_num_rx_queues;
+ if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
+ rx_queues = iflib_num_rx_queues;
else
rx_queues = queues;
+ /*
+ * We want this to be all logical CPUs by default
+ */
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
tx_queues = iflib_num_tx_queues;
else
- tx_queues = queues;
+ tx_queues = mp_ncpus;
+
+ if (ctx->ifc_sysctl_qs_eq_override == 0) {
+#ifdef INVARIANTS
+ if (tx_queues != rx_queues)
+ device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
+ min(rx_queues, tx_queues), min(rx_queues, tx_queues));
+#endif
+ tx_queues = min(rx_queues, tx_queues);
+ rx_queues = min(rx_queues, tx_queues);
+ }
device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
- vectors = queues + admincnt;
+ vectors = rx_queues + admincnt;
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
device_printf(dev,
"Using MSIX interrupts with %d vectors\n", vectors);
@@ -4568,6 +4713,7 @@
scctx->isc_nrxqsets = rx_queues;
scctx->isc_ntxqsets = tx_queues;
scctx->isc_intr = IFLIB_INTR_MSIX;
+
return (vectors);
} else {
device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
@@ -4617,7 +4763,58 @@
return(rc);
}
+enum iflib_ndesc_handler {
+ IFLIB_NTXD_HANDLER,
+ IFLIB_NRXD_HANDLER,
+};
+
+static int
+mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
+{
+ if_ctx_t ctx = (void *)arg1;
+ enum iflib_ndesc_handler type = arg2;
+ char buf[256] = {0};
+ uint16_t *ndesc;
+ char *p, *next;
+ int nqs, rc, i;
+
+ MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
+
+ nqs = 8;
+ switch(type) {
+ case IFLIB_NTXD_HANDLER:
+ ndesc = ctx->ifc_sysctl_ntxds;
+ if (ctx->ifc_sctx)
+ nqs = ctx->ifc_sctx->isc_ntxqs;
+ break;
+ case IFLIB_NRXD_HANDLER:
+ ndesc = ctx->ifc_sysctl_nrxds;
+ if (ctx->ifc_sctx)
+ nqs = ctx->ifc_sctx->isc_nrxqs;
+ break;
+ }
+ if (nqs == 0)
+ nqs = 8;
+
+ for (i=0; i<8; i++) {
+ if (i >= nqs)
+ break;
+ if (i)
+ strcat(buf, ",");
+ sprintf(strchr(buf, 0), "%d", ndesc[i]);
+ }
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+ for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
+ i++, p = strsep(&next, " ,")) {
+ ndesc[i] = strtoul(p, NULL, 10);
+ }
+
+ return(rc);
+}
#define NAME_BUFLEN 32
static void
@@ -4634,19 +4831,29 @@
CTLFLAG_RD, NULL, "IFLIB fields");
oid_list = SYSCTL_CHILDREN(node);
+ SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
+ CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
+ "driver version");
+
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
"# of txqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
- "# of txqs to use, 0 => use default #");
- SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxds",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxds, 0,
- "# of tx descriptors to use, 0 => use default #");
- SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxds",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxds, 0,
- "# of rx descriptors to use, 0 => use default #");
-
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
+ "# of rxqs to use, 0 => use default #");
+ SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
+ "permit #txq != #rxq");
+
+ /* XXX change for per-queue sizes */
+ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
+ CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
+ mp_ndesc_handler, "A",
+ "list of # of tx descriptors to use, 0 = use default #");
+ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
+ CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
+ mp_ndesc_handler, "A",
+ "list of # of rx descriptors to use, 0 = use default #");
}
static void
@@ -4700,7 +4907,7 @@
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD,
- &txq->ift_mbuf_defrag_failed, "# of times no descriptors were available");
+ &txq->ift_no_desc_avail, "# of times no descriptors were available");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
CTLFLAG_RD,
&txq->ift_map_failed, "# of times dma map failed");
@@ -4763,7 +4970,7 @@
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
- if (sctx->isc_flags & IFLIB_HAS_CQ) {
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
CTLFLAG_RD,
&rxq->ifr_cq_pidx, 1, "Producer Index");
Index: head/sys/sys/_task.h
===================================================================
--- head/sys/sys/_task.h
+++ head/sys/sys/_task.h
@@ -42,6 +42,7 @@
* (q) taskqueue lock
*/
typedef void task_fn_t(void *context, int pending);
+typedef void gtask_fn_t(void *context);
struct task {
STAILQ_ENTRY(task) ta_link; /* (q) link for queue */
@@ -51,8 +52,16 @@
void *ta_context; /* (c) argument for handler */
};
+struct gtask {
+ STAILQ_ENTRY(gtask) ta_link; /* (q) link for queue */
+ uint16_t ta_flags; /* (q) state flags */
+ u_short ta_priority; /* (c) Priority */
+ gtask_fn_t *ta_func; /* (c) task handler */
+ void *ta_context; /* (c) argument for handler */
+};
+
struct grouptask {
- struct task gt_task;
+ struct gtask gt_task;
void *gt_taskqueue;
LIST_ENTRY(grouptask) gt_list;
void *gt_uniq;
Index: head/sys/sys/gtaskqueue.h
===================================================================
--- head/sys/sys/gtaskqueue.h
+++ head/sys/sys/gtaskqueue.h
@@ -0,0 +1,125 @@
+/*-
+ * Copyright (c) 2014 Jeffrey Roberson <jeff@freebsd.org>
+ * Copyright (c) 2016 Matthew Macy <mmacy@nextbsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_GTASKQUEUE_H_
+#define _SYS_GTASKQUEUE_H_
+#include <sys/taskqueue.h>
+
+#ifndef _KERNEL
+#error "no user-servicable parts inside"
+#endif
+
+struct gtaskqueue;
+typedef void (*gtaskqueue_enqueue_fn)(void *context);
+
+/*
+ * Taskqueue groups. Manages dynamic thread groups and irq binding for
+ * device and other tasks.
+ */
+
+void gtaskqueue_block(struct gtaskqueue *queue);
+void gtaskqueue_unblock(struct gtaskqueue *queue);
+
+int gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask);
+void gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *task);
+void gtaskqueue_drain_all(struct gtaskqueue *queue);
+
+int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task);
+void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask,
+ void *uniq, int irq, char *name);
+int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask,
+ void *uniq, int cpu, int irq, char *name);
+void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask);
+struct taskqgroup *taskqgroup_create(char *name);
+void taskqgroup_destroy(struct taskqgroup *qgroup);
+int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
+
+#define TASK_ENQUEUED 0x1
+#define TASK_SKIP_WAKEUP 0x2
+
+
+#define GTASK_INIT(task, flags, priority, func, context) do { \
+ (task)->ta_flags = flags; \
+ (task)->ta_priority = (priority); \
+ (task)->ta_func = (func); \
+ (task)->ta_context = (context); \
+} while (0)
+
+#define GROUPTASK_INIT(gtask, priority, func, context) \
+ GTASK_INIT(&(gtask)->gt_task, TASK_SKIP_WAKEUP, priority, func, context)
+
+#define GROUPTASK_ENQUEUE(gtask) \
+ grouptaskqueue_enqueue((gtask)->gt_taskqueue, &(gtask)->gt_task)
+
+#define TASKQGROUP_DECLARE(name) \
+extern struct taskqgroup *qgroup_##name
+
+
+#ifdef EARLY_AP_STARTUP
+#define TASKQGROUP_DEFINE(name, cnt, stride) \
+ \
+struct taskqgroup *qgroup_##name; \
+ \
+static void \
+taskqgroup_define_##name(void *arg) \
+{ \
+ qgroup_##name = taskqgroup_create(#name); \
+ taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \
+} \
+ \
+SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \
+ taskqgroup_define_##name, NULL)
+#else
+#define TASKQGROUP_DEFINE(name, cnt, stride) \
+ \
+struct taskqgroup *qgroup_##name; \
+ \
+static void \
+taskqgroup_define_##name(void *arg) \
+{ \
+ qgroup_##name = taskqgroup_create(#name); \
+} \
+ \
+SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \
+ taskqgroup_define_##name, NULL); \
+ \
+static void \
+taskqgroup_adjust_##name(void *arg) \
+{ \
+ taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \
+} \
+ \
+SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \
+ taskqgroup_adjust_##name, NULL); \
+ \
+struct __hack
+#endif
+TASKQGROUP_DECLARE(net);
+
+#endif /* !_SYS_GTASKQUEUE_H_ */
Index: head/sys/sys/taskqueue.h
===================================================================
--- head/sys/sys/taskqueue.h
+++ head/sys/sys/taskqueue.h
@@ -204,78 +204,4 @@
taskqueue_enqueue_fn enqueue,
void *context);
-/*
- * Taskqueue groups. Manages dynamic thread groups and irq binding for
- * device and other tasks.
- */
-int grouptaskqueue_enqueue(struct taskqueue *queue, struct task *task);
-void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
- void *uniq, int irq, char *name);
-int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
- void *uniq, int cpu, int irq, char *name);
-void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask);
-struct taskqgroup *taskqgroup_create(char *name);
-void taskqgroup_destroy(struct taskqgroup *qgroup);
-int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
-
-#define TASK_SKIP_WAKEUP 0x1
-
-#define GTASK_INIT(task, priority, func, context) do { \
- (task)->ta_pending = 0; \
- (task)->ta_priority = (priority); \
- (task)->ta_func = (func); \
- (task)->ta_context = (context); \
-} while (0)
-
-#define GROUPTASK_INIT(gtask, priority, func, context) \
- GTASK_INIT(&(gtask)->gt_task, priority, func, context)
-
-#define GROUPTASK_ENQUEUE(gtask) \
- grouptaskqueue_enqueue((gtask)->gt_taskqueue, &(gtask)->gt_task)
-
-#define TASKQGROUP_DECLARE(name) \
-extern struct taskqgroup *qgroup_##name
-
-#ifdef EARLY_AP_STARTUP
-#define TASKQGROUP_DEFINE(name, cnt, stride) \
- \
-struct taskqgroup *qgroup_##name; \
- \
-static void \
-taskqgroup_define_##name(void *arg) \
-{ \
- qgroup_##name = taskqgroup_create(#name); \
- taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \
-} \
- \
-SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \
- taskqgroup_define_##name, NULL)
-#else
-#define TASKQGROUP_DEFINE(name, cnt, stride) \
- \
-struct taskqgroup *qgroup_##name; \
- \
-static void \
-taskqgroup_define_##name(void *arg) \
-{ \
- qgroup_##name = taskqgroup_create(#name); \
-} \
- \
-SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \
- taskqgroup_define_##name, NULL); \
- \
-static void \
-taskqgroup_adjust_##name(void *arg) \
-{ \
- taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \
-} \
- \
-SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \
- taskqgroup_adjust_##name, NULL); \
- \
-struct __hack
-#endif
-
-TASKQGROUP_DECLARE(net);
-
#endif /* !_SYS_TASKQUEUE_H_ */
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Tue, Mar 10, 12:15 AM (8 h, 50 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29465270
Default Alt Text
D7393.id.diff (85 KB)
Attached To
Mode
D7393: Update iflib to support more NIC designs
Attached
Detach File
Event Timeline
Log In to Comment