Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/gve/gve.h
Show First 20 Lines • Show All 99 Lines • ▼ Show 20 Lines | |||||
* when the entire configure_device_resources command is zeroed out and the | * when the entire configure_device_resources command is zeroed out and the | ||||
* queue_format is not specified. | * queue_format is not specified. | ||||
*/ | */ | ||||
enum gve_queue_format { | enum gve_queue_format { | ||||
GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, | GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, | ||||
GVE_GQI_RDA_FORMAT = 0x1, | GVE_GQI_RDA_FORMAT = 0x1, | ||||
GVE_GQI_QPL_FORMAT = 0x2, | GVE_GQI_QPL_FORMAT = 0x2, | ||||
GVE_DQO_RDA_FORMAT = 0x3, | GVE_DQO_RDA_FORMAT = 0x3, | ||||
GVE_DQO_QPL_FORMAT = 0x4, | |||||
}; | }; | ||||
enum gve_state_flags_bit { | enum gve_state_flags_bit { | ||||
GVE_STATE_FLAG_ADMINQ_OK, | GVE_STATE_FLAG_ADMINQ_OK, | ||||
GVE_STATE_FLAG_RESOURCES_OK, | GVE_STATE_FLAG_RESOURCES_OK, | ||||
GVE_STATE_FLAG_QPLREG_OK, | GVE_STATE_FLAG_QPLREG_OK, | ||||
GVE_STATE_FLAG_RX_RINGS_OK, | GVE_STATE_FLAG_RX_RINGS_OK, | ||||
GVE_STATE_FLAG_TX_RINGS_OK, | GVE_STATE_FLAG_TX_RINGS_OK, | ||||
▲ Show 20 Lines • Show All 105 Lines • ▼ Show 20 Lines | |||||
struct gve_rxq_stats { | struct gve_rxq_stats { | ||||
counter_u64_t rbytes; | counter_u64_t rbytes; | ||||
counter_u64_t rpackets; | counter_u64_t rpackets; | ||||
counter_u64_t rx_dropped_pkt; | counter_u64_t rx_dropped_pkt; | ||||
counter_u64_t rx_copybreak_cnt; | counter_u64_t rx_copybreak_cnt; | ||||
counter_u64_t rx_frag_flip_cnt; | counter_u64_t rx_frag_flip_cnt; | ||||
counter_u64_t rx_frag_copy_cnt; | counter_u64_t rx_frag_copy_cnt; | ||||
counter_u64_t rx_dropped_pkt_desc_err; | counter_u64_t rx_dropped_pkt_desc_err; | ||||
counter_u64_t rx_dropped_pkt_buf_post_fail; | |||||
counter_u64_t rx_dropped_pkt_mbuf_alloc_fail; | counter_u64_t rx_dropped_pkt_mbuf_alloc_fail; | ||||
counter_u64_t rx_mbuf_dmamap_err; | counter_u64_t rx_mbuf_dmamap_err; | ||||
counter_u64_t rx_mbuf_mclget_null; | counter_u64_t rx_mbuf_mclget_null; | ||||
}; | }; | ||||
#define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t)) | #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t)) | ||||
union gve_rx_qpl_buf_id_dqo { | |||||
struct { | |||||
uint16_t buf_id:11; /* Index into rx->dqo.bufs */ | |||||
uint8_t frag_num:5; /* Which frag in the QPL page */ | |||||
}; | |||||
uint16_t all; | |||||
} __packed; | |||||
_Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2, | |||||
"gve: bad dqo qpl rx buf id length"); | |||||
struct gve_rx_buf_dqo { | struct gve_rx_buf_dqo { | ||||
union { | |||||
/* RDA */ | |||||
struct { | |||||
struct mbuf *mbuf; | struct mbuf *mbuf; | ||||
bus_dmamap_t dmamap; | bus_dmamap_t dmamap; | ||||
uint64_t addr; | uint64_t addr; | ||||
bool mapped; | bool mapped; | ||||
}; | |||||
/* QPL */ | |||||
struct { | |||||
uint8_t num_nic_frags; /* number of pending completions */ | |||||
uint8_t next_idx; /* index of the next frag to post */ | |||||
/* for chaining rx->dqo.used_bufs */ | |||||
STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry; | |||||
}; | |||||
}; | |||||
/* for chaining rx->dqo.free_bufs */ | |||||
SLIST_ENTRY(gve_rx_buf_dqo) slist_entry; | SLIST_ENTRY(gve_rx_buf_dqo) slist_entry; | ||||
}; | }; | ||||
/* power-of-2 sized receive ring */ | /* power-of-2 sized receive ring */ | ||||
struct gve_rx_ring { | struct gve_rx_ring { | ||||
struct gve_ring_com com; | struct gve_ring_com com; | ||||
struct gve_dma_handle desc_ring_mem; | struct gve_dma_handle desc_ring_mem; | ||||
uint32_t cnt; /* free-running total number of completed packets */ | uint32_t cnt; /* free-running total number of completed packets */ | ||||
Show All 22 Lines | struct { | ||||
bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */ | bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */ | ||||
uint32_t buf_cnt; /* Size of the bufs array */ | uint32_t buf_cnt; /* Size of the bufs array */ | ||||
uint32_t mask; /* One less than the sizes of the desc and compl rings */ | uint32_t mask; /* One less than the sizes of the desc and compl rings */ | ||||
uint32_t head; /* The index at which to post the next buffer at */ | uint32_t head; /* The index at which to post the next buffer at */ | ||||
uint32_t tail; /* The index at which to receive the next compl at */ | uint32_t tail; /* The index at which to receive the next compl at */ | ||||
uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */ | uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */ | ||||
SLIST_HEAD(, gve_rx_buf_dqo) free_bufs; | SLIST_HEAD(, gve_rx_buf_dqo) free_bufs; | ||||
/* | |||||
* Only used in QPL mode. Pages refered to by if_input-ed mbufs | |||||
* stay parked here till their wire count comes back to 1. | |||||
* Pages are moved here after there aren't any pending completions. | |||||
*/ | |||||
STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs; | |||||
} dqo; | } dqo; | ||||
}; | }; | ||||
struct lro_ctrl lro; | struct lro_ctrl lro; | ||||
struct gve_rx_ctx ctx; | struct gve_rx_ctx ctx; | ||||
struct gve_rxq_stats stats; | struct gve_rxq_stats stats; | ||||
} __aligned(CACHE_LINE_SIZE); | } __aligned(CACHE_LINE_SIZE); | ||||
Show All 21 Lines | struct gve_txq_stats { | ||||
counter_u64_t tbytes; | counter_u64_t tbytes; | ||||
counter_u64_t tpackets; | counter_u64_t tpackets; | ||||
counter_u64_t tso_packet_cnt; | counter_u64_t tso_packet_cnt; | ||||
counter_u64_t tx_dropped_pkt; | counter_u64_t tx_dropped_pkt; | ||||
counter_u64_t tx_dropped_pkt_nospace_device; | counter_u64_t tx_dropped_pkt_nospace_device; | ||||
counter_u64_t tx_dropped_pkt_nospace_bufring; | counter_u64_t tx_dropped_pkt_nospace_bufring; | ||||
counter_u64_t tx_delayed_pkt_nospace_descring; | counter_u64_t tx_delayed_pkt_nospace_descring; | ||||
counter_u64_t tx_delayed_pkt_nospace_compring; | counter_u64_t tx_delayed_pkt_nospace_compring; | ||||
counter_u64_t tx_delayed_pkt_nospace_qpl_bufs; | |||||
counter_u64_t tx_delayed_pkt_tsoerr; | counter_u64_t tx_delayed_pkt_tsoerr; | ||||
counter_u64_t tx_dropped_pkt_vlan; | counter_u64_t tx_dropped_pkt_vlan; | ||||
counter_u64_t tx_mbuf_collapse; | counter_u64_t tx_mbuf_collapse; | ||||
counter_u64_t tx_mbuf_defrag; | counter_u64_t tx_mbuf_defrag; | ||||
counter_u64_t tx_mbuf_defrag_err; | counter_u64_t tx_mbuf_defrag_err; | ||||
counter_u64_t tx_mbuf_dmamap_enomem_err; | counter_u64_t tx_mbuf_dmamap_enomem_err; | ||||
counter_u64_t tx_mbuf_dmamap_err; | counter_u64_t tx_mbuf_dmamap_err; | ||||
}; | }; | ||||
#define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t)) | #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t)) | ||||
struct gve_tx_pending_pkt_dqo { | struct gve_tx_pending_pkt_dqo { | ||||
struct mbuf *mbuf; | struct mbuf *mbuf; | ||||
union { | |||||
/* RDA */ | |||||
bus_dmamap_t dmamap; | bus_dmamap_t dmamap; | ||||
/* QPL */ | |||||
struct { | |||||
/* | |||||
* A linked list of entries from qpl_bufs that served | |||||
* as the bounce buffer for this packet. | |||||
*/ | |||||
int32_t qpl_buf_head; | |||||
uint32_t num_qpl_bufs; | |||||
}; | |||||
}; | |||||
uint8_t state; /* the gve_packet_state enum */ | uint8_t state; /* the gve_packet_state enum */ | ||||
int next; /* To chain the free_pending_pkts lists */ | int next; /* To chain the free_pending_pkts lists */ | ||||
}; | }; | ||||
/* power-of-2 sized transmit ring */ | /* power-of-2 sized transmit ring */ | ||||
struct gve_tx_ring { | struct gve_tx_ring { | ||||
struct gve_ring_com com; | struct gve_ring_com com; | ||||
struct gve_dma_handle desc_ring_mem; | struct gve_dma_handle desc_ring_mem; | ||||
Show All 34 Lines | struct { | ||||
/* | /* | ||||
* The head index of a singly linked list containing pending packet objects | * The head index of a singly linked list containing pending packet objects | ||||
* to park mbufs till the NIC sends completions. Once this list is depleted, | * to park mbufs till the NIC sends completions. Once this list is depleted, | ||||
* the "_prd" suffixed producer list, grown by the completion taskqueue, | * the "_prd" suffixed producer list, grown by the completion taskqueue, | ||||
* is stolen. | * is stolen. | ||||
*/ | */ | ||||
int32_t free_pending_pkts_csm; | int32_t free_pending_pkts_csm; | ||||
bus_dma_tag_t buf_dmatag; /* DMA params for mapping Tx mbufs */ | /* | ||||
* The head index of a singly linked list representing QPL page fragments | |||||
* to copy mbuf payload into for the NIC to see. Once this list is depleted, | |||||
* the "_prd" suffixed producer list, grown by the completion taskqueue, | |||||
* is stolen. | |||||
* | |||||
* Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist. | |||||
*/ | |||||
int32_t free_qpl_bufs_csm; | |||||
uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */ | |||||
uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */ | |||||
/* DMA params for mapping Tx mbufs. Only used in RDA mode. */ | |||||
bus_dma_tag_t buf_dmatag; | |||||
} __aligned(CACHE_LINE_SIZE); | } __aligned(CACHE_LINE_SIZE); | ||||
/* Accessed when processing completions */ | /* Accessed when processing completions */ | ||||
struct { | struct { | ||||
struct gve_tx_compl_desc_dqo *compl_ring; | struct gve_tx_compl_desc_dqo *compl_ring; | ||||
uint32_t compl_mask; /* masks head to the size of compl_ring */ | uint32_t compl_mask; /* masks head to the size of compl_ring */ | ||||
uint32_t compl_head; /* last completion read by driver */ | uint32_t compl_head; /* last completion read by driver */ | ||||
uint8_t cur_gen_bit; /* NIC flips a bit on every pass */ | uint8_t cur_gen_bit; /* NIC flips a bit on every pass */ | ||||
uint32_t hw_tx_head; /* last desc read by NIC */ | uint32_t hw_tx_head; /* last desc read by NIC */ | ||||
/* | /* | ||||
* The completion taskqueue moves pending-packet objects to this | * The completion taskqueue moves pending-packet objects to this | ||||
* list after freeing the mbuf. The "_prd" denotes that this is | * list after freeing the mbuf. The "_prd" denotes that this is | ||||
* a producer list. The trasnmit taskqueue steals this list once | * a producer list. The trasnmit taskqueue steals this list once | ||||
* its consumer list, with the "_csm" suffix, is depleted. | * its consumer list, with the "_csm" suffix, is depleted. | ||||
*/ | */ | ||||
int32_t free_pending_pkts_prd; | int32_t free_pending_pkts_prd; | ||||
/* | |||||
* The completion taskqueue moves the QPL pages corresponding to a | |||||
* completed packet into this list. It is only used in QPL mode. | |||||
* The "_prd" denotes that this is a producer list. The trasnmit | |||||
* taskqueue steals this list once its consumer list, with the "_csm" | |||||
* suffix, is depleted. | |||||
* | |||||
* Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist. | |||||
*/ | |||||
int32_t free_qpl_bufs_prd; | |||||
uint32_t qpl_bufs_produced; | |||||
} __aligned(CACHE_LINE_SIZE); | } __aligned(CACHE_LINE_SIZE); | ||||
/* Accessed by both the completion and xmit loops */ | /* Accessed by both the completion and xmit loops */ | ||||
struct { | struct { | ||||
/* completion tags index into this array */ | /* completion tags index into this array */ | ||||
struct gve_tx_pending_pkt_dqo *pending_pkts; | struct gve_tx_pending_pkt_dqo *pending_pkts; | ||||
uint16_t num_pending_pkts; | uint16_t num_pending_pkts; | ||||
/* | |||||
* Represents QPL page fragments. An index into this array | |||||
* always represents the same QPL page fragment. The value | |||||
* is also an index into this array and servers as a means | |||||
* to chain buffers into linked lists whose heads are | |||||
* either free_qpl_bufs_prd or free_qpl_bufs_csm or | |||||
* qpl_bufs_head. | |||||
*/ | |||||
int32_t *qpl_bufs; | |||||
} __aligned(CACHE_LINE_SIZE); | } __aligned(CACHE_LINE_SIZE); | ||||
} dqo; | } dqo; | ||||
}; | }; | ||||
struct gve_txq_stats stats; | struct gve_txq_stats stats; | ||||
} __aligned(CACHE_LINE_SIZE); | } __aligned(CACHE_LINE_SIZE); | ||||
enum gve_packet_state { | enum gve_packet_state { | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 113 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static inline bool | static inline bool | ||||
gve_is_gqi(struct gve_priv *priv) | gve_is_gqi(struct gve_priv *priv) | ||||
{ | { | ||||
return (priv->queue_format == GVE_GQI_QPL_FORMAT); | return (priv->queue_format == GVE_GQI_QPL_FORMAT); | ||||
} | } | ||||
static inline bool | |||||
gve_is_qpl(struct gve_priv *priv) | |||||
{ | |||||
return (priv->queue_format == GVE_GQI_QPL_FORMAT || | |||||
priv->queue_format == GVE_DQO_QPL_FORMAT); | |||||
} | |||||
/* Defined in gve_main.c */ | /* Defined in gve_main.c */ | ||||
void gve_schedule_reset(struct gve_priv *priv); | void gve_schedule_reset(struct gve_priv *priv); | ||||
/* Register access functions defined in gve_utils.c */ | /* Register access functions defined in gve_utils.c */ | ||||
uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset); | uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset); | ||||
void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); | void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); | ||||
void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); | void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); | ||||
void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); | void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); | ||||
/* QPL (Queue Page List) functions defined in gve_qpl.c */ | /* QPL (Queue Page List) functions defined in gve_qpl.c */ | ||||
int gve_alloc_qpls(struct gve_priv *priv); | int gve_alloc_qpls(struct gve_priv *priv); | ||||
void gve_free_qpls(struct gve_priv *priv); | void gve_free_qpls(struct gve_priv *priv); | ||||
int gve_register_qpls(struct gve_priv *priv); | int gve_register_qpls(struct gve_priv *priv); | ||||
int gve_unregister_qpls(struct gve_priv *priv); | int gve_unregister_qpls(struct gve_priv *priv); | ||||
void gve_mextadd_free(struct mbuf *mbuf); | |||||
/* TX functions defined in gve_tx.c */ | /* TX functions defined in gve_tx.c */ | ||||
int gve_alloc_tx_rings(struct gve_priv *priv); | int gve_alloc_tx_rings(struct gve_priv *priv); | ||||
void gve_free_tx_rings(struct gve_priv *priv); | void gve_free_tx_rings(struct gve_priv *priv); | ||||
int gve_create_tx_rings(struct gve_priv *priv); | int gve_create_tx_rings(struct gve_priv *priv); | ||||
int gve_destroy_tx_rings(struct gve_priv *priv); | int gve_destroy_tx_rings(struct gve_priv *priv); | ||||
int gve_tx_intr(void *arg); | int gve_tx_intr(void *arg); | ||||
int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf); | int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf); | ||||
void gve_qflush(if_t ifp); | void gve_qflush(if_t ifp); | ||||
void gve_xmit_tq(void *arg, int pending); | void gve_xmit_tq(void *arg, int pending); | ||||
void gve_tx_cleanup_tq(void *arg, int pending); | void gve_tx_cleanup_tq(void *arg, int pending); | ||||
/* TX functions defined in gve_tx_dqo.c */ | /* TX functions defined in gve_tx_dqo.c */ | ||||
int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i); | int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i); | ||||
void gve_tx_free_ring_dqo(struct gve_priv *priv, int i); | void gve_tx_free_ring_dqo(struct gve_priv *priv, int i); | ||||
void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i); | void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i); | ||||
int gve_tx_intr_dqo(void *arg); | int gve_tx_intr_dqo(void *arg); | ||||
int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr); | int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr); | ||||
int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf); | |||||
void gve_tx_cleanup_tq_dqo(void *arg, int pending); | void gve_tx_cleanup_tq_dqo(void *arg, int pending); | ||||
/* RX functions defined in gve_rx.c */ | /* RX functions defined in gve_rx.c */ | ||||
int gve_alloc_rx_rings(struct gve_priv *priv); | int gve_alloc_rx_rings(struct gve_priv *priv); | ||||
void gve_free_rx_rings(struct gve_priv *priv); | void gve_free_rx_rings(struct gve_priv *priv); | ||||
int gve_create_rx_rings(struct gve_priv *priv); | int gve_create_rx_rings(struct gve_priv *priv); | ||||
int gve_destroy_rx_rings(struct gve_priv *priv); | int gve_destroy_rx_rings(struct gve_priv *priv); | ||||
int gve_rx_intr(void *arg); | int gve_rx_intr(void *arg); | ||||
Show All 38 Lines |