Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F138060063
D20029.id56604.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
4 KB
Referenced Files
None
Subscribers
None
D20029.id56604.diff
View Options
Index: share/man/man4/iflib.4
===================================================================
--- share/man/man4/iflib.4
+++ share/man/man4/iflib.4
@@ -55,6 +55,14 @@
socket connected to the controller.
.It Va disable_msix
Disables MSI-X interrupts for the device.
+.It Va core_offset
+Specifies a starting core offset to assign queues to. If the value is
+unspecified or 65535, cores are assigned sequentially across controllers.
+.It Va separate_txrx
+Requests that RX and TX queues not be paired on the same core. If this
+is zero or not set, an RX and TX queue pair will be assigned to each core.
+When set to a non-zero value, TX queues are assigned to cores following the
+last RX queue.
.El
.Pp
These
Index: sys/net/iflib.c
===================================================================
--- sys/net/iflib.c
+++ sys/net/iflib.c
@@ -188,6 +188,9 @@
uint16_t ifc_sysctl_qs_eq_override;
uint16_t ifc_sysctl_rx_budget;
uint16_t ifc_sysctl_tx_abdicate;
+ uint16_t ifc_sysctl_core_offset;
+#define CORE_OFFSET_UNSPECIFIED 0xffff
+ uint8_t ifc_sysctl_separate_txrx;
qidx_t ifc_sysctl_ntxds[8];
qidx_t ifc_sysctl_nrxds[8];
@@ -725,6 +728,17 @@
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
#endif
+static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
+ SLIST_HEAD_INITIALIZER(cpu_offsets);
+struct cpu_offset {
+ SLIST_ENTRY(cpu_offset) entries;
+ cpuset_t set;
+ uint16_t offset;
+};
+static struct mtx cpu_offset_mtx;
+MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
+ MTX_DEF);
+
NETDUMP_DEFINE(iflib);
#ifdef DEV_NETMAP
@@ -4366,6 +4380,49 @@
}
}
+static uint16_t
+get_ctx_core_offset(if_ctx_t ctx)
+{
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
+ struct cpu_offset *op;
+ uint16_t qc;
+ uint16_t ret = ctx->ifc_sysctl_core_offset;
+
+ if (ret != CORE_OFFSET_UNSPECIFIED)
+ return (ret);
+
+ if (ctx->ifc_sysctl_separate_txrx)
+ qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets;
+ else
+ qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets);
+
+ mtx_lock(&cpu_offset_mtx);
+ SLIST_FOREACH(op, &cpu_offsets, entries) {
+ if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
+ ret = op->offset;
+ op->offset += qc;
+ break;
+ }
+ }
+ if (ret == CORE_OFFSET_UNSPECIFIED) {
+ ret = 0;
+ op = malloc(sizeof(struct cpu_offset), M_IFLIB,
+ M_NOWAIT | M_ZERO);
+ if (op == NULL) {
+ device_printf(ctx->ifc_dev,
+ "allocation for cpu offset failed.\n");
+ }
+ else {
+ op->offset = qc;
+ CPU_COPY(&ctx->ifc_cpus, &op->set);
+ SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
+ }
+ }
+ mtx_unlock(&cpu_offset_mtx);
+
+ return (ret);
+}
+
int
iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
{
@@ -4518,6 +4575,11 @@
goto fail_queues;
/*
+ * Now that we know how many queues there are, get the core offset.
+ */
+ ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
+
+ /*
* Group taskqueues aren't properly set up until SMP is started,
* so we disable interrupts until we can handle them post
* SI_SUB_SMP.
@@ -5073,6 +5135,7 @@
return (err);
break;
case MOD_UNLOAD:
+ /* Be sure to free the cpu_offsets list on success */
return (EBUSY);
default:
return (EOPNOTSUPP);
@@ -5558,7 +5621,7 @@
* Find the nth "close" core to the specified core
* "close" is defined as the deepest level that shares
* at least an L2 cache. With threads, this will be
- * threads on the same core. If the sahred cache is L3
+ * threads on the same core. If the shared cache is L3
* or higher, simply returns the same core.
*/
static int
@@ -5642,10 +5705,13 @@
const char *name)
{
device_t dev;
- int err, cpuid, tid;
+ int co, cpuid, err, tid;
dev = ctx->ifc_dev;
- cpuid = find_nth(ctx, qid);
+ co = ctx->ifc_sysctl_core_offset;
+ if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX)
+ co += ctx->ifc_softc_ctx.isc_nrxqsets;
+ cpuid = find_nth(ctx, qid + co);
tid = get_core_offset(ctx, type, qid);
MPASS(tid >= 0);
cpuid = find_close_core(cpuid, tid);
@@ -6247,6 +6313,13 @@
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
"cause tx to abdicate instead of running to completion");
+ ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
+ SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
+ CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
+ "offset to start using cores at");
+ SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
+ CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
+ "use separate cores for TX and RX");
/* XXX change for per-queue sizes */
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Nov 29, 1:57 PM (10 h, 58 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
26338264
Default Alt Text
D20029.id56604.diff (4 KB)
Attached To
Mode
D20029: Better control over queue core assignment
Attached
Detach File
Event Timeline
Log In to Comment