Page MenuHomeFreeBSD

D48969.id151026.diff
No OneTemporary

D48969.id151026.diff

diff --git a/sys/dev/gve/gve.h b/sys/dev/gve/gve.h
--- a/sys/dev/gve/gve.h
+++ b/sys/dev/gve/gve.h
@@ -303,7 +303,7 @@
SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
/*
- * Only used in QPL mode. Pages refered to by if_input-ed mbufs
+ * Only used in QPL mode. Pages referred to by if_input-ed mbufs
* stay parked here till their wire count comes back to 1.
* Pages are moved here after there aren't any pending completions.
*/
@@ -450,7 +450,7 @@
/*
* The completion taskqueue moves pending-packet objects to this
* list after freeing the mbuf. The "_prd" denotes that this is
- * a producer list. The trasnmit taskqueue steals this list once
+ * a producer list. The transmit taskqueue steals this list once
* its consumer list, with the "_csm" suffix, is depleted.
*/
int32_t free_pending_pkts_prd;
@@ -458,7 +458,7 @@
/*
* The completion taskqueue moves the QPL pages corresponding to a
* completed packet into this list. It is only used in QPL mode.
- * The "_prd" denotes that this is a producer list. The trasnmit
+ * The "_prd" denotes that this is a producer list. The transmit
* taskqueue steals this list once its consumer list, with the "_csm"
* suffix, is depleted.
*
diff --git a/sys/dev/gve/gve_desc.h b/sys/dev/gve/gve_desc.h
--- a/sys/dev/gve/gve_desc.h
+++ b/sys/dev/gve/gve_desc.h
@@ -130,10 +130,10 @@
__be64 addr;
};
-/* GVE Recive Packet Descriptor Seq No */
+/* GVE Receive Packet Descriptor Seq No */
#define GVE_SEQNO(x) (be16toh(x) & 0x7)
-/* GVE Recive Packet Descriptor Flags */
+/* GVE Receive Packet Descriptor Flags */
#define GVE_RXFLG(x) htobe16(1 << (3 + (x)))
#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c
--- a/sys/dev/gve/gve_main.c
+++ b/sys/dev/gve/gve_main.c
@@ -32,10 +32,10 @@
#include "gve_adminq.h"
#include "gve_dqo.h"
-#define GVE_DRIVER_VERSION "GVE-FBSD-1.3.1\n"
+#define GVE_DRIVER_VERSION "GVE-FBSD-1.3.2\n"
#define GVE_VERSION_MAJOR 1
#define GVE_VERSION_MINOR 3
-#define GVE_VERSION_SUB 1
+#define GVE_VERSION_SUB 2
#define GVE_DEFAULT_RX_COPYBREAK 256
@@ -391,7 +391,7 @@
/*
* Set TSO limits, must match the arguments to bus_dma_tag_create
* when creating tx->dqo.buf_dmatag. Only applies to the RDA mode
- * because in QPL we copy the entire pakcet into the bounce buffer
+ * because in QPL we copy the entire packet into the bounce buffer
* and thus it does not matter how fragmented the mbuf is.
*/
if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) {
diff --git a/sys/dev/gve/gve_rx.c b/sys/dev/gve/gve_rx.c
--- a/sys/dev/gve/gve_rx.c
+++ b/sys/dev/gve/gve_rx.c
@@ -706,7 +706,7 @@
* interrupt but they will still be handled by the enqueue below.
* Fragments received after the barrier WILL trigger an interrupt.
*/
- mb();
+ atomic_thread_fence_seq_cst();
if (gve_rx_work_pending(rx)) {
gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
diff --git a/sys/dev/gve/gve_rx_dqo.c b/sys/dev/gve/gve_rx_dqo.c
--- a/sys/dev/gve/gve_rx_dqo.c
+++ b/sys/dev/gve/gve_rx_dqo.c
@@ -972,7 +972,7 @@
* Prevent generation bit from being read after the rest of the
* descriptor.
*/
- rmb();
+ atomic_thread_fence_acq();
rx->cnt++;
rx->dqo.tail = (rx->dqo.tail + 1) & rx->dqo.mask;
diff --git a/sys/dev/gve/gve_sysctl.c b/sys/dev/gve/gve_sysctl.c
--- a/sys/dev/gve/gve_sysctl.c
+++ b/sys/dev/gve/gve_sysctl.c
@@ -94,7 +94,7 @@
SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
"rx_mbuf_dmamap_err", CTLFLAG_RD,
&stats->rx_mbuf_dmamap_err,
- "Number of rx mbufs which couldnt be dma mapped");
+ "Number of rx mbufs which could not be dma mapped");
SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
"rx_mbuf_mclget_null", CTLFLAG_RD,
&stats->rx_mbuf_mclget_null,
@@ -170,7 +170,7 @@
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"tx_mbuf_collpase", CTLFLAG_RD,
&stats->tx_mbuf_collapse,
- "tx mbufs that had to be collpased");
+ "tx mbufs that had to be collapsed");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"tx_mbuf_defrag", CTLFLAG_RD,
&stats->tx_mbuf_defrag,
diff --git a/sys/dev/gve/gve_tx.c b/sys/dev/gve/gve_tx.c
--- a/sys/dev/gve/gve_tx.c
+++ b/sys/dev/gve/gve_tx.c
@@ -240,15 +240,16 @@
}
static void
-gve_start_tx_ring(struct gve_priv *priv, int i,
- void (cleanup) (void *arg, int pending))
+gve_start_tx_ring(struct gve_priv *priv, int i)
{
struct gve_tx_ring *tx = &priv->tx[i];
struct gve_ring_com *com = &tx->com;
atomic_store_bool(&tx->stopped, false);
-
- NET_TASK_INIT(&com->cleanup_task, 0, cleanup, tx);
+ if (gve_is_gqi(priv))
+ NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx);
+ else
+ NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq_dqo, tx);
com->cleanup_tq = taskqueue_create_fast("gve tx", M_WAITOK,
taskqueue_thread_enqueue, &com->cleanup_tq);
taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, "%s txq %d",
@@ -297,10 +298,7 @@
com->db_offset = 4 * be32toh(com->q_resources->db_index);
com->counter_idx = be32toh(com->q_resources->counter_index);
- if (gve_is_gqi(priv))
- gve_start_tx_ring(priv, i, gve_tx_cleanup_tq);
- else
- gve_start_tx_ring(priv, i, gve_tx_cleanup_tq_dqo);
+ gve_start_tx_ring(priv, i);
}
gve_set_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
@@ -421,7 +419,7 @@
* interrupt but they will still be handled by the enqueue below.
* Completions born after the barrier WILL trigger an interrupt.
*/
- mb();
+ atomic_thread_fence_seq_cst();
nic_done = gve_tx_load_event_counter(priv, tx);
todo = nic_done - tx->done;
diff --git a/sys/dev/gve/gve_tx_dqo.c b/sys/dev/gve/gve_tx_dqo.c
--- a/sys/dev/gve/gve_tx_dqo.c
+++ b/sys/dev/gve/gve_tx_dqo.c
@@ -1031,7 +1031,7 @@
* Prevent generation bit from being read after the rest of the
* descriptor.
*/
- rmb();
+ atomic_thread_fence_acq();
type = compl_desc->type;
if (type == GVE_COMPL_TYPE_DQO_DESC) {

File Metadata

Mime Type
text/plain
Expires
Mon, Feb 9, 11:40 AM (14 h, 10 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28575009
Default Alt Text
D48969.id151026.diff (5 KB)

Event Timeline