Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F140981330
D49427.id152481.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
11 KB
Referenced Files
None
Subscribers
None
D49427.id152481.diff
View Options
diff --git a/share/man/man4/gve.4 b/share/man/man4/gve.4
--- a/share/man/man4/gve.4
+++ b/share/man/man4/gve.4
@@ -79,6 +79,13 @@
.It
0x1AE0:0x0042
.El
+.Sh EXAMPLES
+.Pp
+Change the TX queue count to 4 for the gve0 interface:
+.D1 sysctl dev.gve.0.num_tx_queues=4
+.Pp
+Change the RX queue count to 4 for the gve0 interface:
+.D1 sysctl dev.gve.0.num_rx_queues=4
.Sh DIAGNOSTICS
The following messages are recorded during driver initialization:
.Bl -diag
@@ -211,6 +218,30 @@
The software LRO stack in the kernel is always used.
This sysctl variable needs to be set before loading the driver, using
.Xr loader.conf 5 .
+.It Va dev.gve.X.num_rx_queues
+A run-time tunable that represents the number of currently used RX queues.
+The default value is the max number of RX queues the device can support.
+.Pp
+This call turns down the interface while setting up the new queues,
+which may potentially cause any new packets to be dropped.
+This call can fail if the system is not able to provide the driver with enough resources.
+In that situation, the driver will revert to the previous number of RX queues.
+If this also fails, a device reset will be triggered.
+.Pp
+Note: Sysctl nodes for queue stats remain available even if a queue is removed.
+.Pp
+.It Va dev.gve.X.num_tx_queues
+A run-time tunable that represents the number of currently used TX queues.
+The default value is the max number of TX queues the device can support.
+.Pp
+This call turns down the interface while setting up the new queues,
+which may potentially cause any new packets to be dropped.
+This call can fail if the system is not able to provide the driver with enough resources.
+In that situation, the driver will revert to the previous number of TX queues.
+If this also fails, a device reset will be triggered.
+.Pp
+Note: Sysctl nodes for queue stats remain available even if a queue is removed.
+.Pp
.El
.Sh LIMITATIONS
.Nm
diff --git a/sys/dev/gve/gve.h b/sys/dev/gve/gve.h
--- a/sys/dev/gve/gve.h
+++ b/sys/dev/gve/gve.h
@@ -620,6 +620,9 @@
/* Defined in gve_main.c */
void gve_schedule_reset(struct gve_priv *priv);
+int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
+int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
+int gve_adjust_ring_sizes(struct gve_priv *priv, bool is_rx);
/* Register access functions defined in gve_utils.c */
uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
@@ -636,8 +639,8 @@
void gve_mextadd_free(struct mbuf *mbuf);
/* TX functions defined in gve_tx.c */
-int gve_alloc_tx_rings(struct gve_priv *priv);
-void gve_free_tx_rings(struct gve_priv *priv);
+int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
+void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
int gve_create_tx_rings(struct gve_priv *priv);
int gve_destroy_tx_rings(struct gve_priv *priv);
int gve_tx_intr(void *arg);
@@ -656,8 +659,8 @@
void gve_tx_cleanup_tq_dqo(void *arg, int pending);
/* RX functions defined in gve_rx.c */
-int gve_alloc_rx_rings(struct gve_priv *priv);
-void gve_free_rx_rings(struct gve_priv *priv);
+int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
+void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
int gve_create_rx_rings(struct gve_priv *priv);
int gve_destroy_rx_rings(struct gve_priv *priv);
int gve_rx_intr(void *arg);
diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c
--- a/sys/dev/gve/gve_main.c
+++ b/sys/dev/gve/gve_main.c
@@ -192,6 +192,74 @@
gve_schedule_reset(priv);
}
+int
+gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt)
+{
+ int err;
+
+ GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
+
+ gve_down(priv);
+
+ if (new_queue_cnt < priv->rx_cfg.num_queues) {
+ /*
+ * Freeing a ring still preserves its ntfy_id,
+ * which is needed if we create the ring again.
+ */
+ gve_free_rx_rings(priv, new_queue_cnt, priv->rx_cfg.num_queues);
+ } else {
+ err = gve_alloc_rx_rings(priv, priv->rx_cfg.num_queues, new_queue_cnt);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to allocate new queues");
+ // Failed to allocate rings, start back up with old ones
+ gve_up(priv);
+ return (err);
+
+ }
+ }
+ priv->rx_cfg.num_queues = new_queue_cnt;
+
+ err = gve_up(priv);
+ if (err != 0)
+ gve_schedule_reset(priv);
+
+ return (err);
+}
+
+int
+gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt)
+{
+ int err;
+
+ GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
+
+ gve_down(priv);
+
+ if (new_queue_cnt < priv->tx_cfg.num_queues) {
+ /*
+ * Freeing a ring still preserves its ntfy_id,
+ * which is needed if we create the ring again.
+ */
+ gve_free_tx_rings(priv, new_queue_cnt, priv->tx_cfg.num_queues);
+ } else {
+ err = gve_alloc_tx_rings(priv, priv->tx_cfg.num_queues, new_queue_cnt);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to allocate new queues");
+ // Failed to allocate rings, start back up with old ones
+ gve_up(priv);
+ return (err);
+
+ }
+ }
+ priv->tx_cfg.num_queues = new_queue_cnt;
+
+ err = gve_up(priv);
+ if (err != 0)
+ gve_schedule_reset(priv);
+
+ return (err);
+}
+
static int
gve_set_mtu(if_t ifp, uint32_t new_mtu)
{
@@ -480,8 +548,10 @@
gve_free_rings(struct gve_priv *priv)
{
gve_free_irqs(priv);
- gve_free_tx_rings(priv);
- gve_free_rx_rings(priv);
+ gve_free_tx_rings(priv, 0, priv->tx_cfg.num_queues);
+ free(priv->tx, M_GVE);
+ gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ free(priv->rx, M_GVE);
}
static int
@@ -489,11 +559,15 @@
{
int err;
- err = gve_alloc_rx_rings(priv);
+ priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.max_queues,
+ M_GVE, M_WAITOK | M_ZERO);
+ err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.max_queues);
if (err != 0)
goto abort;
- err = gve_alloc_tx_rings(priv);
+ priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.max_queues,
+ M_GVE, M_WAITOK | M_ZERO);
+ err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.max_queues);
if (err != 0)
goto abort;
diff --git a/sys/dev/gve/gve_rx.c b/sys/dev/gve/gve_rx.c
--- a/sys/dev/gve/gve_rx.c
+++ b/sys/dev/gve/gve_rx.c
@@ -183,38 +183,32 @@
}
int
-gve_alloc_rx_rings(struct gve_priv *priv)
+gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
- int err = 0;
int i;
+ int err;
- priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues,
- M_GVE, M_WAITOK | M_ZERO);
+ KASSERT(priv->rx != NULL, ("priv->rx is NULL!"));
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ for (i = start_idx; i < stop_idx; i++) {
err = gve_rx_alloc_ring(priv, i);
if (err != 0)
goto free_rings;
}
return (0);
-
free_rings:
- while (i--)
- gve_rx_free_ring(priv, i);
- free(priv->rx, M_GVE);
+ gve_free_rx_rings(priv, start_idx, i);
return (err);
}
void
-gve_free_rx_rings(struct gve_priv *priv)
+gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ for (i = start_idx; i < stop_idx; i++)
gve_rx_free_ring(priv, i);
-
- free(priv->rx, M_GVE);
}
static void
diff --git a/sys/dev/gve/gve_sysctl.c b/sys/dev/gve/gve_sysctl.c
--- a/sys/dev/gve/gve_sysctl.c
+++ b/sys/dev/gve/gve_sysctl.c
@@ -285,6 +285,95 @@
&priv->reset_cnt, 0, "Times reset");
}
+static int gve_check_num_queues(struct gve_priv *priv, uint16_t val, bool is_rx)
+{
+ if (val < 1) {
+ device_printf(priv->dev,
+ "Requested num queues (%u) must be a positive integer\n", val);
+ return (EINVAL);
+ }
+
+ if (val > (is_rx ? priv->rx_cfg.max_queues : priv->tx_cfg.max_queues)) {
+ device_printf(priv->dev,
+ "Requested num queues (%u) is too large\n", val);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static int
+gve_sysctl_num_tx_queues(SYSCTL_HANDLER_ARGS)
+{
+ struct gve_priv *priv = arg1;
+ uint16_t val;
+ int err;
+
+ val = priv->tx_cfg.num_queues;
+ err = sysctl_handle_16(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ err = gve_check_num_queues(priv, val, /*is_rx=*/false);
+ if (err != 0)
+ return (err);
+
+ if (val != priv->tx_cfg.num_queues) {
+ GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
+ gve_adjust_tx_queues(priv, val);
+ GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
+ } else {
+ device_printf(priv->dev,
+ "Requested num TX queues is the same as already used: %u\n",
+ priv->tx_cfg.num_queues);
+ }
+
+ return (0);
+}
+
+static int
+gve_sysctl_num_rx_queues(SYSCTL_HANDLER_ARGS)
+{
+ struct gve_priv *priv = arg1;
+ uint16_t val;
+ int err;
+
+ val = priv->rx_cfg.num_queues;
+ err = sysctl_handle_16(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ err = gve_check_num_queues(priv, val, /*is_rx=*/true);
+
+ if (err != 0)
+ return (err);
+
+ if (val != priv->rx_cfg.num_queues) {
+ GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
+ gve_adjust_rx_queues(priv, val);
+ GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
+ } else {
+ device_printf(priv->dev,
+ "Requested num RX queues is the same as already used: %u\n",
+ priv->rx_cfg.num_queues);
+ }
+
+ return (0);
+}
+
+static void
+gve_setup_sysctl_writables(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child, struct gve_priv *priv)
+{
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_tx_queues",
+ CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ gve_sysctl_num_tx_queues, "I", "Number of TX Queues.");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_rx_queues",
+ CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ gve_sysctl_num_rx_queues, "I", "Number of RX Queues.");
+}
+
void gve_setup_sysctl(struct gve_priv *priv)
{
device_t dev;
@@ -300,6 +389,7 @@
gve_setup_queue_stat_sysctl(ctx, child, priv);
gve_setup_adminq_stat_sysctl(ctx, child, priv);
gve_setup_main_stat_sysctl(ctx, child, priv);
+ gve_setup_sysctl_writables(ctx, child, priv);
}
void
diff --git a/sys/dev/gve/gve_tx.c b/sys/dev/gve/gve_tx.c
--- a/sys/dev/gve/gve_tx.c
+++ b/sys/dev/gve/gve_tx.c
@@ -180,39 +180,32 @@
}
int
-gve_alloc_tx_rings(struct gve_priv *priv)
+gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
- int err = 0;
int i;
+ int err;
- priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.num_queues,
- M_GVE, M_WAITOK | M_ZERO);
+ KASSERT(priv->tx != NULL, ("priv->tx is NULL!"));
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = start_idx; i < stop_idx; i++) {
err = gve_tx_alloc_ring(priv, i);
if (err != 0)
goto free_rings;
-
}
return (0);
-
free_rings:
- while (i--)
- gve_tx_free_ring(priv, i);
- free(priv->tx, M_GVE);
+ gve_free_tx_rings(priv, start_idx, i);
return (err);
}
void
-gve_free_tx_rings(struct gve_priv *priv)
+gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++)
+ for (i = start_idx; i < stop_idx; i++)
gve_tx_free_ring(priv, i);
-
- free(priv->tx, M_GVE);
}
static void
diff --git a/sys/dev/gve/gve_utils.c b/sys/dev/gve/gve_utils.c
--- a/sys/dev/gve/gve_utils.c
+++ b/sys/dev/gve/gve_utils.c
@@ -234,7 +234,7 @@
return;
}
- num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1;
+ num_irqs = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues + 1;
for (i = 0; i < num_irqs; i++) {
irq = &priv->irq_tbl[i];
@@ -268,8 +268,8 @@
int
gve_alloc_irqs(struct gve_priv *priv)
{
- int num_tx = priv->tx_cfg.num_queues;
- int num_rx = priv->rx_cfg.num_queues;
+ int num_tx = priv->tx_cfg.max_queues;
+ int num_rx = priv->rx_cfg.max_queues;
int req_nvecs = num_tx + num_rx + 1;
int got_nvecs = req_nvecs;
struct gve_irq *irq;
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Dec 31, 9:28 AM (9 h, 7 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27407697
Default Alt Text
D49427.id152481.diff (11 KB)
Attached To
Mode
D49427: gve: Add feature to adjust RX/TX queue counts
Attached
Detach File
Event Timeline
Log In to Comment