diff --git a/sys/dev/mlx5/driver.h b/sys/dev/mlx5/driver.h index cb1a2907a443..9daa1235bd9c 100644 --- a/sys/dev/mlx5/driver.h +++ b/sys/dev/mlx5/driver.h @@ -1,1234 +1,1277 @@ /*- * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. * Copyright (c) 2022 NVIDIA corporation & affiliates. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef MLX5_DRIVER_H #define MLX5_DRIVER_H #include "opt_ratelimit.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MLX5_QCOUNTER_SETS_NETDEV 64 #define MLX5_MAX_NUMBER_OF_VFS 128 #define MLX5_INVALID_QUEUE_HANDLE 0xffffffff +#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, }; enum { MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, }; enum { CMD_OWNER_SW = 0x0, CMD_OWNER_HW = 0x1, CMD_STATUS_SUCCESS = 0, }; enum mlx5_sqp_t { MLX5_SQP_SMI = 0, MLX5_SQP_GSI = 1, MLX5_SQP_IEEE_1588 = 2, MLX5_SQP_SNIFFER = 3, MLX5_SQP_SYNC_UMR = 4, }; enum { MLX5_MAX_PORTS = 2, }; enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, MLX5_EQ_VEC_COMP_BASE, }; enum { MLX5_ATOMIC_MODE_OFF = 16, MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF, }; enum { MLX5_ATOMIC_MODE_DCT_OFF = 20, MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF, }; enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2, MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3, }; enum { MLX5_REG_QPTS = 0x4002, MLX5_REG_QETCR = 0x4005, MLX5_REG_QPDP = 0x4007, MLX5_REG_QTCT = 0x400A, MLX5_REG_QPDPM = 0x4013, MLX5_REG_QHLL = 0x4016, MLX5_REG_QCAM = 0x4019, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_ACCESS_REG = 0x4024, MLX5_REG_FPGA_SHELL_CNTR = 0x4025, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMLP = 0x5002, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, MLX5_REG_PUDE = 0x5009, MLX5_REG_PPTB = 0x500B, MLX5_REG_PBMC = 0x500C, MLX5_REG_PELC = 0x500E, MLX5_REG_PVLC = 0x500F, MLX5_REG_PMPE = 0x5010, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PPLM = 0x5023, MLX5_REG_PDDR = 0x5031, MLX5_REG_PBSR = 0x5038, MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MTMP = 0x900a, MLX5_REG_MCIA = 0x9014, MLX5_REG_MFRL = 0x9028, MLX5_REG_MPCNT = 0x9051, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, }; enum dbg_rsc_type { MLX5_DBG_RSC_QP, MLX5_DBG_RSC_EQ, MLX5_DBG_RSC_CQ, }; enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, MLX5_INTERFACE_NUMBER = 2, }; struct mlx5_field_desc { int i; }; struct mlx5_rsc_debug { struct mlx5_core_dev *dev; void *object; enum dbg_rsc_type type; struct mlx5_field_desc fields[0]; }; enum mlx5_dev_event { MLX5_DEV_EVENT_SYS_ERROR, MLX5_DEV_EVENT_PORT_UP, MLX5_DEV_EVENT_PORT_DOWN, MLX5_DEV_EVENT_PORT_INITIALIZED, MLX5_DEV_EVENT_LID_CHANGE, MLX5_DEV_EVENT_PKEY_CHANGE, MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, MLX5_DEV_EVENT_VPORT_CHANGE, MLX5_DEV_EVENT_ERROR_STATE_DCBX, MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE, MLX5_DEV_EVENT_LOCAL_OPER_CHANGE, MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE, }; enum mlx5_port_status { MLX5_PORT_UP = 1 << 0, MLX5_PORT_DOWN = 1 << 1, }; enum { MLX5_VSC_SPACE_SUPPORTED = 0x1, MLX5_VSC_SPACE_OFFSET = 0x4, MLX5_VSC_COUNTER_OFFSET = 0x8, MLX5_VSC_SEMA_OFFSET = 0xC, MLX5_VSC_ADDR_OFFSET = 0x10, MLX5_VSC_DATA_OFFSET = 0x14, MLX5_VSC_MAX_RETRIES = 0x1000, }; #define MLX5_PROT_MASK(link_mode) (1 << link_mode) struct mlx5_cmd_first { __be32 data[4]; }; struct cache_ent; struct mlx5_fw_page { union { struct rb_node rb_node; struct list_head list; }; struct mlx5_cmd_first first; struct mlx5_core_dev *dev; bus_dmamap_t dma_map; bus_addr_t dma_addr; void *virt_addr; struct cache_ent *cache; u32 numpages; u16 load_done; #define MLX5_LOAD_ST_NONE 0 #define MLX5_LOAD_ST_SUCCESS 1 #define MLX5_LOAD_ST_FAILURE 2 u16 func_id; }; #define mlx5_cmd_msg mlx5_fw_page struct mlx5_cmd_debug { void *in_msg; void *out_msg; u8 status; u16 inlen; u16 outlen; }; struct cache_ent { /* protect block chain allocations */ spinlock_t lock; struct list_head head; }; struct cmd_msg_cache { struct cache_ent large; struct cache_ent med; }; struct mlx5_traffic_counter { u64 packets; u64 octets; }; +struct mlx5_fc_pool { + struct mlx5_core_dev *dev; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_fcs; + int used_fcs; + int threshold; +}; + +struct mlx5_fc_stats { + spinlock_t counters_idr_lock; /* protects counters_idr */ + struct idr counters_idr; + struct list_head counters; + struct llist_head addlist; + struct llist_head dellist; + + struct workqueue_struct *wq; + struct delayed_work work; + unsigned long next_query; + unsigned long sampling_interval; /* jiffies */ + u32 *bulk_query_out; + int bulk_query_len; + size_t num_counters; + bool bulk_query_alloc_failed; + unsigned long next_bulk_query_alloc; + struct mlx5_fc_pool fc_pool; +}; + enum mlx5_cmd_mode { MLX5_CMD_MODE_POLLING, MLX5_CMD_MODE_EVENTS }; struct mlx5_cmd_stats { u64 sum; u64 n; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { struct mlx5_fw_page *cmd_page; bus_dma_tag_t dma_tag; struct sx dma_sx; struct mtx dma_mtx; #define MLX5_DMA_OWNED(dev) mtx_owned(&(dev)->cmd.dma_mtx) #define MLX5_DMA_LOCK(dev) mtx_lock(&(dev)->cmd.dma_mtx) #define MLX5_DMA_UNLOCK(dev) mtx_unlock(&(dev)->cmd.dma_mtx) struct cv dma_cv; #define MLX5_DMA_DONE(dev) cv_broadcast(&(dev)->cmd.dma_cv) #define MLX5_DMA_WAIT(dev) cv_wait(&(dev)->cmd.dma_cv, &(dev)->cmd.dma_mtx) void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; u8 log_sz; u8 log_stride; int max_reg_cmds; int events; u32 __iomem *vector; /* protect command queue allocations */ spinlock_t alloc_lock; /* protect token allocations */ spinlock_t token_lock; u8 token; unsigned long bitmask; struct semaphore sem; struct semaphore pages_sem; enum mlx5_cmd_mode mode; struct mlx5_cmd_work_ent * volatile ent_arr[MLX5_MAX_COMMANDS]; volatile enum mlx5_cmd_mode ent_mode[MLX5_MAX_COMMANDS]; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; }; struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; }; struct mlx5_buf { bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; struct mlx5_core_dev *dev; struct { void *buf; } direct; u64 *page_list; int npages; int size; u8 page_shift; u8 load_done; }; struct mlx5_frag_buf { struct mlx5_buf_list *frags; int npages; int size; u8 page_shift; }; struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; u32 cons_index; struct mlx5_buf buf; int size; u8 irqn; u8 eqn; int nent; u64 mask; struct list_head list; int index; struct mlx5_rsc_debug *dbg; }; struct mlx5_core_psv { u32 psv_idx; struct psv_layout { u32 pd; u16 syndrome; u16 reserved; u16 bg; u16 app_tag; u32 ref_tag; } psv; }; struct mlx5_core_sig_ctx { struct mlx5_core_psv psv_memory; struct mlx5_core_psv psv_wire; struct ib_sig_err err_item; bool sig_status_checked; bool sig_err_exists; u32 sigerr_count; }; enum { MLX5_MKEY_MR = 1, MLX5_MKEY_MW, MLX5_MKEY_INDIRECT_DEVX, }; struct mlx5_core_mkey { u64 iova; u64 size; u32 key; u32 pd; u32 type; }; enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_XRQ = 5, MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; atomic_t refcount; struct completion free; }; struct mlx5_uars_page { void __iomem *map; bool wc; u32 index; struct list_head list; unsigned int bfregs; unsigned long *reg_bitmap; /* for non fast path bf regs */ unsigned long *fp_bitmap; unsigned int reg_avail; unsigned int fp_avail; struct kref ref_count; struct mlx5_core_dev *mdev; }; struct mlx5_bfreg_head { /* protect blue flame registers allocations */ struct mutex lock; struct list_head list; }; struct mlx5_bfreg_data { struct mlx5_bfreg_head reg_head; struct mlx5_bfreg_head wc_head; }; struct mlx5_sq_bfreg { void __iomem *map; struct mlx5_uars_page *up; bool wc; u32 index; unsigned int offset; }; struct mlx5_core_srq { struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; size_t max_gs; size_t max_avail_gather; int wqe_shift; void (*event)(struct mlx5_core_srq *, int); atomic_t refcount; struct completion free; }; struct mlx5_ib_dev; struct mlx5_eq_table { void __iomem *update_ci; void __iomem *update_arm_ci; struct list_head comp_eqs_list; struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; int num_comp_vectors; spinlock_t lock; /* protect EQs list */ struct mlx5_ib_dev *dev; /* for devx event notifier */ bool (*cb)(struct mlx5_core_dev *mdev, uint8_t event_type, void *data); }; struct mlx5_core_health { struct mlx5_health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; u32 prev; int miss_counter; u32 fatal_error; struct workqueue_struct *wq_watchdog; struct work_struct work_watchdog; /* wq spinlock to synchronize draining */ spinlock_t wq_lock; struct workqueue_struct *wq; unsigned long flags; struct work_struct work; struct delayed_work recover_work; unsigned int last_reset_req; struct work_struct work_cmd_completion; struct workqueue_struct *wq_cmd; }; #define MLX5_CQ_LINEAR_ARRAY_SIZE 1024 struct mlx5_cq_linear_array_entry { struct mlx5_core_cq * volatile cq; }; struct mlx5_cq_table { /* protect radix tree */ spinlock_t writerlock; atomic_t writercount; struct radix_tree_root tree; struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE]; }; struct mlx5_qp_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_srq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_mr_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; #ifdef RATELIMIT struct mlx5_rl_entry { u32 rate; u16 burst; u16 index; u32 qos_handle; /* schedule queue handle */ u32 refcount; }; struct mlx5_rl_table { struct mutex rl_lock; u16 max_size; u32 max_rate; u32 min_rate; struct mlx5_rl_entry *rl_entry; }; #endif struct mlx5_pme_stats { u64 status_counters[MLX5_MODULE_STATUS_NUM]; u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; }; struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); int disable_irqs; /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; s64 fw_pages; atomic_t reg_pages; s64 pages_per_func[MLX5_MAX_NUMBER_OF_VFS]; struct mlx5_core_health health; struct mlx5_srq_table srq_table; /* start: qp staff */ struct mlx5_qp_table qp_table; /* end: qp staff */ /* start: cq staff */ struct mlx5_cq_table cq_table; /* end: cq staff */ /* start: mr staff */ struct mlx5_mr_table mr_table; /* end: mr staff */ /* start: alloc staff */ int numa_node; struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ /* protect mkey key part */ spinlock_t mkey_lock; u8 mkey_key; struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; unsigned long pci_dev_data; #ifdef RATELIMIT struct mlx5_rl_table rl_table; #endif struct mlx5_pme_stats pme_stats; struct mlx5_eswitch *eswitch; struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; + struct mlx5_fc_stats fc_stats; }; enum mlx5_device_state { MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; enum mlx5_interface_state { MLX5_INTERFACE_STATE_UP = 0x1, MLX5_INTERFACE_STATE_TEARDOWN = 0x2, }; enum mlx5_pci_status { MLX5_PCI_STATUS_DISABLED, MLX5_PCI_STATUS_ENABLED, }; #define MLX5_MAX_RESERVED_GIDS 8 struct mlx5_rsvd_gids { unsigned int start; unsigned int count; struct ida ida; }; struct mlx5_special_contexts { int resd_lkey; }; struct mlx5_diag_cnt_id { u16 id; bool enabled; }; struct mlx5_diag_cnt { #define DIAG_LOCK(dc) mutex_lock(&(dc)->lock) #define DIAG_UNLOCK(dc) mutex_unlock(&(dc)->lock) struct mutex lock; struct sysctl_ctx_list sysctl_ctx; struct mlx5_diag_cnt_id *cnt_id; u16 num_of_samples; u16 sample_index; u8 num_cnt_id; u8 log_num_of_samples; u8 log_sample_period; u8 flag; u8 ready; }; struct mlx5_flow_root_namespace; struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; enum mlx5_pci_status pci_status; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; struct { u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; } caps; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; unsigned long intf_state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; struct mlx5_diag_cnt diag_cnt; u32 vsc_addr; u32 issi; struct mlx5_special_contexts special_contexts; unsigned int module_status[MLX5_MAX_PORTS]; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *esw_egress_root_ns; struct mlx5_flow_root_namespace *esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER]; struct mlx5_crspace_regmap *dump_rege; uint32_t *dump_data; unsigned dump_size; bool dump_valid; bool dump_copyout; struct mtx dump_lock; struct sysctl_ctx_list sysctl_ctx; int msix_eqvec; int pwr_status; int pwr_value; struct { struct mlx5_rsvd_gids reserved_gids; atomic_t roce_en; } roce; struct { spinlock_t spinlock; #define MLX5_MPFS_TABLE_MAX 32 long bitmap[BITS_TO_LONGS(MLX5_MPFS_TABLE_MAX)]; } mpfs; #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif }; enum { MLX5_WOL_DISABLE = 0, MLX5_WOL_SECURED_MAGIC = 1 << 1, MLX5_WOL_MAGIC = 1 << 2, MLX5_WOL_ARP = 1 << 3, MLX5_WOL_BROADCAST = 1 << 4, MLX5_WOL_MULTICAST = 1 << 5, MLX5_WOL_UNICAST = 1 << 6, MLX5_WOL_PHY_ACTIVITY = 1 << 7, }; struct mlx5_db { __be32 *db; union { struct mlx5_db_pgdir *pgdir; struct mlx5_ib_user_db_page *user_page; } u; dma_addr_t dma; int index; }; struct mlx5_net_counters { u64 packets; u64 octets; }; struct mlx5_ptys_reg { u8 an_dis_admin; u8 an_dis_ap; u8 local_port; u8 proto_mask; u32 eth_proto_cap; u16 ib_link_width_cap; u16 ib_proto_cap; u32 eth_proto_admin; u16 ib_link_width_admin; u16 ib_proto_admin; u32 eth_proto_oper; u16 ib_link_width_oper; u16 ib_proto_oper; u32 eth_proto_lp_advertise; }; struct mlx5_pvlc_reg { u8 local_port; u8 vl_hw_cap; u8 vl_admin; u8 vl_operational; }; struct mlx5_pmtu_reg { u8 local_port; u16 max_mtu; u16 admin_mtu; u16 oper_mtu; }; struct mlx5_vport_counters { struct mlx5_net_counters received_errors; struct mlx5_net_counters transmit_errors; struct mlx5_net_counters received_ib_unicast; struct mlx5_net_counters transmitted_ib_unicast; struct mlx5_net_counters received_ib_multicast; struct mlx5_net_counters transmitted_ib_multicast; struct mlx5_net_counters received_eth_broadcast; struct mlx5_net_counters transmitted_eth_broadcast; struct mlx5_net_counters received_eth_unicast; struct mlx5_net_counters transmitted_eth_unicast; struct mlx5_net_counters received_eth_multicast; struct mlx5_net_counters transmitted_eth_multicast; }; enum { MLX5_DB_PER_PAGE = MLX5_ADAPTER_PAGE_SIZE / L1_CACHE_BYTES, }; struct mlx5_core_dct { struct mlx5_core_rsc_common common; /* must be first */ void (*event)(struct mlx5_core_dct *, int); int dctn; struct completion drained; struct mlx5_rsc_debug *dbg; int pid; u16 uid; }; enum { MLX5_PTYS_IB = 1 << 0, MLX5_PTYS_EN = 1 << 2, }; struct mlx5_db_pgdir { struct list_head list; DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); struct mlx5_fw_page *fw_page; __be32 *db_page; dma_addr_t db_dma; }; typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; int uin_size; void *uout; int uout_size; mlx5_cmd_cbk_t callback; struct delayed_work cb_timeout_work; void *context; int idx; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; struct mlx5_cmd_layout *lay; int ret; int page_queue; u8 status; u8 token; u64 ts1; u64 ts2; u16 op; u8 busy; bool polling; }; struct mlx5_pas { u64 pa; u8 log_sz; }; enum port_state_policy { MLX5_POLICY_DOWN = 0, MLX5_POLICY_UP = 1, MLX5_POLICY_FOLLOW = 2, MLX5_POLICY_INVALID = 0xffffffff }; static inline void * mlx5_buf_offset(struct mlx5_buf *buf, int offset) { return ((char *)buf->direct.buf + offset); } extern struct workqueue_struct *mlx5_core_wq; #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) { return pci_get_drvdata(pdev); } static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) & 0xffff; } static inline u16 fw_rev_min(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) >> 16; } static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n"); return 0; } return 8 * (1 << param); } static inline void *mlx5_vzalloc(unsigned long size) { void *rtn; rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); return rtn; } static inline void *mlx5_vmalloc(unsigned long size) { void *rtn; rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!rtn) rtn = vmalloc(size); return rtn; } static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; } int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); struct mlx5_async_ctx { struct mlx5_core_dev *dev; atomic_t num_inflight; struct wait_queue_head wait; }; struct mlx5_async_work; typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; }; void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, struct mlx5_async_ctx *ctx); void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); void mlx5_trigger_health_watchdog(struct mlx5_core_dev *dev); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); void mlx5_init_mr_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, struct mlx5_async_ctx *async_ctx, u32 *in, int inlen, u32 *out, int outlen, mlx5_async_cbk_t callback, struct mlx5_async_work *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mr, u32 *in, int inlen); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *out, int outlen); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mr, u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn, u16 uid); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); void mlx5_fwp_flush(struct mlx5_fw_page *fwp); void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp); struct mlx5_fw_page *mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num); void mlx5_fwp_free(struct mlx5_fw_page *fwp); u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset); void *mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset); void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vector, enum mlx5_cmd_mode mode); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, u64 addr); int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u32 *out, int outlen); int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); static inline struct domainset * mlx5_dev_domainset(struct mlx5_core_dev *mdev) { return (linux_get_vm_domain_set(mdev->priv.numa_node)); } const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev); int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode); int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout); int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout); int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode); int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, struct mlx5_pvlc_reg *pvlc, int write); int mlx5_core_access_ptys(struct mlx5_core_dev *dev, struct mlx5_ptys_reg *ptys, int write); int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, struct mlx5_pmtu_reg *pmtu, int write); int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port); int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port); int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int *is_enable); int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int enable); int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, void *out, int out_size); int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, void *out, int out_size); int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, u8 num_of_samples, u16 sample_index, void *out, int out_size); int mlx5_vsc_find_cap(struct mlx5_core_dev *mdev); int mlx5_vsc_lock(struct mlx5_core_dev *mdev); void mlx5_vsc_unlock(struct mlx5_core_dev *mdev); int mlx5_vsc_set_space(struct mlx5_core_dev *mdev, u16 space); int mlx5_vsc_wait_on_flag(struct mlx5_core_dev *mdev, u32 expected); int mlx5_vsc_write(struct mlx5_core_dev *mdev, u32 addr, const u32 *data); int mlx5_vsc_read(struct mlx5_core_dev *mdev, u32 addr, u32 *data); int mlx5_vsc_lock_addr_space(struct mlx5_core_dev *mdev, u32 addr); int mlx5_vsc_unlock_addr_space(struct mlx5_core_dev *mdev, u32 addr); int mlx5_pci_read_power_status(struct mlx5_core_dev *mdev, u16 *p_power, u8 *p_status); static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; } static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) { return mkey_idx << 8; } static inline u8 mlx5_mkey_variant(u32 mkey) { return mkey & 0xff; } enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, }; enum { MAX_MR_CACHE_ENTRIES = 15, }; struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); void * (*get_dev)(void *context); int protocol; struct list_head list; }; void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, const u8 *mac, bool vlan, u16 vlan_id); struct mlx5_profile { u64 mask; u8 log_max_qp; struct { int size; int limit; } mr_cache[MAX_MR_CACHE_ENTRIES]; }; enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) { return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } #ifdef RATELIMIT int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst, u16 *index); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst); bool mlx5_rl_is_in_range(const struct mlx5_core_dev *dev, u32 rate, u32 burst); int mlx5e_query_rate_limit_cmd(struct mlx5_core_dev *dev, u16 index, u32 *scq_handle); static inline u32 mlx5_rl_get_scq_handle(struct mlx5_core_dev *dev, uint16_t index) { KASSERT(index > 0, ("invalid rate index for sq remap, failed retrieving SCQ handle")); return (dev->priv.rl_table.rl_entry[index - 1].qos_handle); } static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) { return !!(dev->priv.rl_table.max_size); } #endif void mlx5_disable_interrupts(struct mlx5_core_dev *); void mlx5_poll_interrupts(struct mlx5_core_dev *); static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) { return !MLX5_CAP_ROCE(dev, qp_ts_format) ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; } static inline int mlx5_get_rq_default_ts(struct mlx5_core_dev *dev) { return !MLX5_CAP_GEN(dev, rq_ts_format) ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING : MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; } static inline int mlx5_get_sq_default_ts(struct mlx5_core_dev *dev) { return !MLX5_CAP_GEN(dev, sq_ts_format) ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING : MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; } #endif /* MLX5_DRIVER_H */ diff --git a/sys/dev/mlx5/fs.h b/sys/dev/mlx5/fs.h index f62716d806d0..65d38b9ee67a 100644 --- a/sys/dev/mlx5/fs.h +++ b/sys/dev/mlx5/fs.h @@ -1,278 +1,292 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _MLX5_FS_ #define _MLX5_FS_ #include #include #include #include enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, }; /*Flow tag*/ enum { MLX5_FS_DEFAULT_FLOW_TAG = 0xFFFFFF, MLX5_FS_ETH_FLOW_TAG = 0xFFFFFE, MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD, }; enum mlx5_rule_fwd_action { MLX5_FLOW_RULE_FWD_ACTION_ALLOW = 0x1, MLX5_FLOW_RULE_FWD_ACTION_DROP = 0x2, MLX5_FLOW_RULE_FWD_ACTION_DEST = 0x4, }; enum { MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF, }; #define FS_MAX_TYPES 10 #define FS_MAX_ENTRIES 32000U #define FS_REFORMAT_KEYWORD "_reformat" enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, MLX5_FLOW_NAMESPACE_OFFLOADS, MLX5_FLOW_NAMESPACE_KERNEL, MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_FDB, MLX5_FLOW_NAMESPACE_ESW_EGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS, }; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_rule; struct mlx5_flow_namespace; struct mlx5_flow_spec { u8 match_criteria_enable; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; }; struct mlx5_flow_destination { u32 type; union { u32 tir_num; struct mlx5_flow_table *ft; u32 vport_num; }; }; enum mlx5_flow_act_actions { MLX5_FLOW_ACT_ACTIONS_FLOW_TAG = 1 << 0, MLX5_FLOW_ACT_ACTIONS_MODIFY_HDR = 1 << 1, MLX5_FLOW_ACT_ACTIONS_PACKET_REFORMAT = 1 << 2, }; enum MLX5_FLOW_ACT_FLAGS { MLX5_FLOW_ACT_NO_APPEND = 1 << 0, }; struct mlx5_flow_act { u32 actions; /* See enum mlx5_flow_act_actions */ u32 flags; u32 flow_tag; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; }; #define FT_NAME_STR_SZ 20 #define LEFTOVERS_RULE_NUM 2 static inline void build_leftovers_ft_param(char *name, unsigned int *priority, int *n_ent, int *n_grp) { snprintf(name, FT_NAME_STR_SZ, "leftovers"); *priority = 0; /*Priority of leftovers_prio-0*/ *n_ent = LEFTOVERS_RULE_NUM + 1; /*1: star rules*/ *n_grp = LEFTOVERS_RULE_NUM; } static inline bool outer_header_zero(u32 *match_criteria) { int size = MLX5_ST_SZ_BYTES(fte_match_param); char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers); return outer_headers_c[0] == 0 && !memcmp(outer_headers_c, outer_headers_c + 1, size - 1); } struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); /* The underlying implementation create two more entries for * chaining flow tables. the user should be aware that if he pass * max_num_ftes as 2^N it will result in doubled size flow table */ struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, const char *name, int num_flow_table_entries, int max_num_groups, int num_reserved_entries); struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, u16 vport, int prio, const char *name, int num_flow_table_entries); struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, const char *name, int num_flow_table_entries); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: * start_flow_index * end_flow_index * match_criteria_enable * match_criteria */ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); /* Single destination per rule. * Group ID is implied by the match criteria. */ struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, u8 match_criteria_enable, u32 *match_criteria, u32 *match_value, u32 sw_action, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest); void mlx5_del_flow_rule(struct mlx5_flow_rule **); /*The following API is for sniffer*/ typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule, bool ctx_changed, void *client_data, void *context); struct mlx5_flow_handler; struct flow_client_priv_data; void mlx5e_sniffer_roce_mode_notify( struct mlx5_core_dev *mdev, int action); int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule, struct mlx5_flow_handler *handler, void *client_data); struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type, rule_event_fn add_cb, rule_event_fn del_cb, void *context); void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler); void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns, rule_event_fn cb, void *context); void mlx5_get_match_criteria(u32 *match_criteria, struct mlx5_flow_rule *rule); void mlx5_get_match_value(u32 *match_value, struct mlx5_flow_rule *rule); u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule); struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode); void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list); struct mlx5_flow_rules_list { struct list_head head; }; struct mlx5_flow_rule_node { struct list_head list; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; u8 match_criteria_enable; }; struct mlx5_core_fs_mask { u8 match_criteria_enable; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; }; bool fs_match_exact_val( struct mlx5_core_fs_mask *mask, void *val1, void *val2); bool fs_match_exact_mask( u8 match_criteria_enable1, u8 match_criteria_enable2, void *mask1, void *mask2); /**********end API for sniffer**********/ - struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type, u8 num_actions, void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); struct mlx5_pkt_reformat_params { int type; u8 param_0; u8 param_1; size_t size; void *data; }; struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat_params *params, enum mlx5_flow_namespace_type ns_type); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *pkt_reformat); +/********** Flow counters API **********/ +struct mlx5_fc; +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); + +/* As mlx5_fc_create() but doesn't queue stats refresh thread. */ +struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); + +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse); +int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, + u64 *packets, u64 *bytes); +u32 mlx5_fc_id(struct mlx5_fc *counter); +/******* End of Flow counters API ******/ #endif diff --git a/sys/dev/mlx5/mlx5_core/fs_core.h b/sys/dev/mlx5/mlx5_core/fs_core.h index a9273fdab61c..05757f493469 100644 --- a/sys/dev/mlx5/mlx5_core/fs_core.h +++ b/sys/dev/mlx5/mlx5_core/fs_core.h @@ -1,326 +1,331 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _MLX5_FS_CORE_ #define _MLX5_FS_CORE_ #include #include #include #include enum fs_type { FS_TYPE_NAMESPACE, FS_TYPE_PRIO, FS_TYPE_FLOW_TABLE, FS_TYPE_FLOW_GROUP, FS_TYPE_FLOW_ENTRY, FS_TYPE_FLOW_DEST }; enum fs_ft_type { FS_FT_NIC_RX = 0x0, FS_FT_ESW_EGRESS_ACL = 0x2, FS_FT_ESW_INGRESS_ACL = 0x3, FS_FT_FDB = 0X4, FS_FT_SNIFFER_RX = 0x5, FS_FT_SNIFFER_TX = 0x6 }; enum fs_fte_status { FS_FTE_STATUS_EXISTING = 1UL << 0, }; /* Should always be the first variable in the struct */ struct fs_base { struct list_head list; struct fs_base *parent; enum fs_type type; struct kref refcount; /* lock the node for writing and traversing */ struct mutex lock; struct completion complete; atomic_t users_refcount; const char *name; }; struct mlx5_flow_rule { struct fs_base base; struct mlx5_flow_destination dest_attr; struct list_head clients_data; /*protect clients lits*/ struct mutex clients_lock; }; struct fs_fte { struct fs_base base; u32 val[MLX5_ST_SZ_DW(fte_match_param)]; uint32_t dests_size; struct list_head dests; uint32_t index; /* index in ft */ struct mlx5_flow_act flow_act; u32 sw_action; /* enum mlx5_rule_fwd_action */ enum fs_fte_status status; }; struct fs_star_rule { struct mlx5_flow_group *fg; struct fs_fte *fte; }; struct mlx5_flow_table { struct fs_base base; /* sorted list by start_index */ struct list_head fgs; struct { bool active; unsigned int max_types; unsigned int group_size; unsigned int num_types; unsigned int max_fte; } autogroup; unsigned int max_fte; unsigned int level; uint32_t id; u16 vport; enum fs_ft_type type; struct fs_star_rule star_rule; unsigned int shared_refcount; }; enum fs_prio_flags { MLX5_CORE_FS_PRIO_SHARED = 1 }; struct fs_prio { struct fs_base base; struct list_head objs; /* each object is a namespace or ft */ unsigned int max_ft; unsigned int num_ft; unsigned int max_ns; unsigned int prio; /*When create shared flow table, this lock should be taken*/ struct mutex shared_lock; u8 flags; }; struct mlx5_flow_namespace { /* parent == NULL => root ns */ struct fs_base base; /* sorted by priority number */ struct list_head prios; /* list of fs_prios */ struct list_head list_notifiers; struct rw_semaphore notifiers_rw_sem; struct rw_semaphore dests_rw_sem; }; struct mlx5_flow_root_namespace { struct mlx5_flow_namespace ns; struct mlx5_flow_table *ft_level_0; enum fs_ft_type table_type; struct mlx5_core_dev *dev; struct mlx5_flow_table *root_ft; /* When chaining flow-tables, this lock should be taken */ struct mutex fs_chain_lock; }; struct mlx5_flow_group { struct fs_base base; struct list_head ftes; struct mlx5_core_fs_mask mask; uint32_t start_index; uint32_t max_ftes; uint32_t num_ftes; uint32_t id; }; struct mlx5_flow_handler { struct list_head list; rule_event_fn add_dst_cb; rule_event_fn del_dst_cb; void *client_context; struct mlx5_flow_namespace *ns; }; struct fs_client_priv_data { struct mlx5_flow_handler *fs_handler; struct list_head list; void *client_dst_data; }; struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; u32 id; }; struct mlx5_pkt_reformat { enum mlx5_flow_namespace_type ns_type; int reformat_type; /* from mlx5_ifc */ u32 id; }; void _fs_remove_node(struct kref *kref); #define fs_get_obj(v, _base) {v = container_of((_base), typeof(*v), base); } #define fs_get_parent(v, child) {v = (child)->base.parent ? \ container_of((child)->base.parent, \ typeof(*v), base) : NULL; } #define fs_list_for_each_entry(pos, cond, root) \ list_for_each_entry(pos, root, base.list) \ if (!(cond)) {} else #define fs_list_for_each_entry_continue(pos, cond, root) \ list_for_each_entry_continue(pos, root, base.list) \ if (!(cond)) {} else #define fs_list_for_each_entry_reverse(pos, cond, root) \ list_for_each_entry_reverse(pos, root, base.list) \ if (!(cond)) {} else #define fs_list_for_each_entry_continue_reverse(pos, cond, root) \ list_for_each_entry_continue_reverse(pos, root, base.list) \ if (!(cond)) {} else #define fs_for_each_ft(pos, prio) \ fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_TABLE, \ &(prio)->objs) #define fs_for_each_ft_reverse(pos, prio) \ fs_list_for_each_entry_reverse(pos, \ (pos)->base.type == FS_TYPE_FLOW_TABLE, \ &(prio)->objs) #define fs_for_each_ns(pos, prio) \ fs_list_for_each_entry(pos, \ (pos)->base.type == FS_TYPE_NAMESPACE, \ &(prio)->objs) #define fs_for_each_ns_or_ft_reverse(pos, prio) \ list_for_each_entry_reverse(pos, &(prio)->objs, list) \ if (!((pos)->type == FS_TYPE_NAMESPACE || \ (pos)->type == FS_TYPE_FLOW_TABLE)) {} else #define fs_for_each_ns_or_ft(pos, prio) \ list_for_each_entry(pos, &(prio)->objs, list) \ if (!((pos)->type == FS_TYPE_NAMESPACE || \ (pos)->type == FS_TYPE_FLOW_TABLE)) {} else #define fs_for_each_ns_or_ft_continue_reverse(pos, prio) \ list_for_each_entry_continue_reverse(pos, &(prio)->objs, list) \ if (!((pos)->type == FS_TYPE_NAMESPACE || \ (pos)->type == FS_TYPE_FLOW_TABLE)) {} else #define fs_for_each_ns_or_ft_continue(pos, prio) \ list_for_each_entry_continue(pos, &(prio)->objs, list) \ if (!((pos)->type == FS_TYPE_NAMESPACE || \ (pos)->type == FS_TYPE_FLOW_TABLE)) {} else #define fs_for_each_prio(pos, ns) \ fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_PRIO, \ &(ns)->prios) #define fs_for_each_prio_reverse(pos, ns) \ fs_list_for_each_entry_reverse(pos, (pos)->base.type == FS_TYPE_PRIO, \ &(ns)->prios) #define fs_for_each_prio_continue(pos, ns) \ fs_list_for_each_entry_continue(pos, (pos)->base.type == FS_TYPE_PRIO, \ &(ns)->prios) #define fs_for_each_prio_continue_reverse(pos, ns) \ fs_list_for_each_entry_continue_reverse(pos, \ (pos)->base.type == FS_TYPE_PRIO, \ &(ns)->prios) #define fs_for_each_fg(pos, ft) \ fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_GROUP, \ &(ft)->fgs) #define fs_for_each_fte(pos, fg) \ fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_ENTRY, \ &(fg)->ftes) #define fs_for_each_dst(pos, fte) \ fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_DEST, \ &(fte)->dests) int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev, u16 vport, enum fs_ft_type type, unsigned int level, unsigned int log_size, const char *name, unsigned int *table_id); int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev, u16 vport, enum fs_ft_type type, unsigned int table_id); int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev, u32 *in, u16 vport, enum fs_ft_type type, unsigned int table_id, unsigned int *group_id); int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev, u16 vport, enum fs_ft_type type, unsigned int table_id, unsigned int group_id); int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev, u16 vport, enum fs_fte_status *fte_status, u32 *match_val, enum fs_ft_type type, unsigned int table_id, unsigned int index, unsigned int group_id, struct mlx5_flow_act *flow_act, u32 sw_action, int dest_size, struct list_head *dests); /* mlx5_flow_desination */ int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev, u16 vport, enum fs_fte_status *fte_status, enum fs_ft_type type, unsigned int table_id, unsigned int index); int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, enum fs_ft_type type, unsigned int id); int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, unsigned long interval); int mlx5_cmd_modify_header_alloc(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type namespace, u8 num_actions, void *modify_actions, struct mlx5_modify_hdr *modify_hdr); void mlx5_cmd_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); int mlx5_cmd_packet_reformat_alloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat_params *params, enum mlx5_flow_namespace_type namespace, struct mlx5_pkt_reformat *pkt_reformat); void mlx5_cmd_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *pkt_reformat); +int mlx5_init_fc_stats(struct mlx5_core_dev *dev); +void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev); +void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, + struct delayed_work *dwork, + unsigned long delay); #endif diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c new file mode 100644 index 000000000000..f3410249e67f --- /dev/null +++ b/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c @@ -0,0 +1,102 @@ +/*- + * Copyright (c) 2022 NVIDIA corporation & affiliates. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include + +int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, + u32 *id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {}; + int err; + + MLX5_SET(alloc_flow_counter_in, in, opcode, + MLX5_CMD_OP_ALLOC_FLOW_COUNTER); + MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask); + + err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out); + if (!err) + *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); + return err; +} + +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) +{ + return mlx5_cmd_fc_bulk_alloc(dev, 0, id); +} + +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {}; + + MLX5_SET(dealloc_flow_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); + MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); + return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in); +} + +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, + u64 *packets, u64 *bytes) +{ + u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter)] = {}; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; + void *stats; + int err = 0; + + MLX5_SET(query_flow_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_COUNTER); + MLX5_SET(query_flow_counter_in, in, op_mod, 0); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); + *packets = MLX5_GET64(traffic_counter, stats, packets); + *bytes = MLX5_GET64(traffic_counter, stats, octets); + return 0; +} + +int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, + u32 *out) +{ + int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len); + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; + + MLX5_SET(query_flow_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_COUNTER); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id); + MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); +} + diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h b/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h new file mode 100644 index 000000000000..3adebb3ca94c --- /dev/null +++ b/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2023, NVIDIA Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FC_CMD_ +#define _MLX5_FC_CMD_ + +#include "fs_core.h" + +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); +int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, + u32 *id); +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, + u64 *packets, u64 *bytes); + +int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, + u32 *out); +static inline int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len) +{ + return MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len; +} + +#endif diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c new file mode 100644 index 000000000000..7214c5256388 --- /dev/null +++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c @@ -0,0 +1,758 @@ +/*- + * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. + * Copyright (c) 2022 NVIDIA corporation & affiliates. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include +#include + +#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) +#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000) +/* Max number of counters to query in bulk read is 32K */ +#define MLX5_SW_MAX_COUNTERS_BULK BIT(15) +#define MLX5_INIT_COUNTERS_BULK 8 +#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) +#define MLX5_FC_POOL_USED_BUFF_RATIO 10 + +struct mlx5_fc_cache { + u64 packets; + u64 bytes; + u64 lastuse; +}; + +struct mlx5_fc { + struct list_head list; + struct llist_node addlist; + struct llist_node dellist; + + /* last{packets,bytes} members are used when calculating the delta since + * last reading + */ + u64 lastpackets; + u64 lastbytes; + + struct mlx5_fc_bulk *bulk; + u32 id; + bool aging; + + struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; +}; + +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); +static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); +static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); + +/* locking scheme: + * + * It is the responsibility of the user to prevent concurrent calls or bad + * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference + * to struct mlx5_fc. + * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a + * dump (access to struct mlx5_fc) after a counter is destroyed. + * + * access to counter list: + * - create (user context) + * - mlx5_fc_create() only adds to an addlist to be used by + * mlx5_fc_stats_work(). addlist is a lockless single linked list + * that doesn't require any additional synchronization when adding single + * node. + * - spawn thread to do the actual destroy + * + * - destroy (user context) + * - add a counter to lockless dellist + * - spawn thread to do the actual del + * + * - dump (user context) + * user should not call dump after destroy + * + * - query (single thread workqueue context) + * destroy/dump - no conflict (see destroy) + * query/dump - packets and bytes might be inconsistent (since update is not + * atomic) + * query/create - no conflict (see create) + * since every create/destroy spawn the work, only after necessary time has + * elapsed, the thread will actually query the hardware. + */ + +static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev, + u32 id) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + int next_id = id + 1; + + rcu_read_lock(); + /* skip counters that are in idr, but not yet in counters list */ + while ((counter = idr_get_next(&fc_stats->counters_idr, &next_id)) != NULL && + list_empty(&counter->list)) + next_id++; + rcu_read_unlock(); + + return counter ? &counter->list : &fc_stats->counters; +} + +static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id); + + list_add_tail(&counter->list, next); +} + +static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + list_del(&counter->list); + + spin_lock(&fc_stats->counters_idr_lock); + WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id)); + spin_unlock(&fc_stats->counters_idr_lock); +} + +static int get_init_bulk_query_len(struct mlx5_core_dev *dev) +{ + return min_t(int, MLX5_INIT_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +} + +static int get_max_bulk_query_len(struct mlx5_core_dev *dev) +{ + return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +} + +static void update_counter_cache(int index, u32 *bulk_raw_data, + struct mlx5_fc_cache *cache) +{ + void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data, + flow_statistics[index]); + u64 packets = MLX5_GET64(traffic_counter, stats, packets); + u64 bytes = MLX5_GET64(traffic_counter, stats, octets); + + if (cache->packets == packets) + return; + + cache->packets = packets; + cache->bytes = bytes; + cache->lastuse = jiffies; +} + +static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, + struct mlx5_fc *first, + u32 last_id) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + bool query_more_counters = (first->id <= last_id); + int cur_bulk_len = fc_stats->bulk_query_len; + u32 *data = fc_stats->bulk_query_out; + struct mlx5_fc *counter = first; + u32 bulk_base_id; + int bulk_len; + int err; + + while (query_more_counters) { + /* first id must be aligned to 4 when using bulk query */ + bulk_base_id = counter->id & ~0x3; + + /* number of counters to query inc. the last counter */ + bulk_len = min_t(int, cur_bulk_len, + ALIGN(last_id - bulk_base_id + 1, 4)); + + err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, + data); + if (err) { + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); + return; + } + query_more_counters = false; + + list_for_each_entry_from(counter, &fc_stats->counters, list) { + int counter_index = counter->id - bulk_base_id; + struct mlx5_fc_cache *cache = &counter->cache; + + if (counter->id >= bulk_base_id + bulk_len) { + query_more_counters = true; + break; + } + + update_counter_cache(counter_index, data, cache); + } + } +} + +static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + mlx5_cmd_fc_free(dev, counter->id); + kfree(counter); +} + +static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (counter->bulk) + mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); + else + mlx5_fc_free(dev, counter); +} + +static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int max_bulk_len = get_max_bulk_query_len(dev); + unsigned long now = jiffies; + u32 *bulk_query_out_tmp; + int max_out_len; + + if (fc_stats->bulk_query_alloc_failed && + time_before(now, fc_stats->next_bulk_query_alloc)) + return; + + max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); + bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL); + if (!bulk_query_out_tmp) { + mlx5_core_warn(dev, + "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n", + max_bulk_len); + fc_stats->bulk_query_alloc_failed = true; + fc_stats->next_bulk_query_alloc = + now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD; + return; + } + + kfree(fc_stats->bulk_query_out); + fc_stats->bulk_query_out = bulk_query_out_tmp; + fc_stats->bulk_query_len = max_bulk_len; + if (fc_stats->bulk_query_alloc_failed) { + mlx5_core_info(dev, + "Flow counters bulk query buffer size increased, bulk_size(%d)\n", + max_bulk_len); + fc_stats->bulk_query_alloc_failed = false; + } +} + +static void mlx5_fc_stats_work(struct work_struct *work) +{ + struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, + priv.fc_stats.work.work); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + /* Take dellist first to ensure that counters cannot be deleted before + * they are inserted. + */ + struct llist_node *dellist = llist_del_all(&fc_stats->dellist); + struct llist_node *addlist = llist_del_all(&fc_stats->addlist); + struct mlx5_fc *counter = NULL, *last = NULL, *tmp; + unsigned long now = jiffies; + + if (addlist || !list_empty(&fc_stats->counters)) + queue_delayed_work(fc_stats->wq, &fc_stats->work, + fc_stats->sampling_interval); + + llist_for_each_entry(counter, addlist, addlist) { + mlx5_fc_stats_insert(dev, counter); + fc_stats->num_counters++; + } + + llist_for_each_entry_safe(counter, tmp, dellist, dellist) { + mlx5_fc_stats_remove(dev, counter); + + mlx5_fc_release(dev, counter); + fc_stats->num_counters--; + } + + if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) && + fc_stats->num_counters > get_init_bulk_query_len(dev)) + mlx5_fc_stats_bulk_query_size_increase(dev); + + if (time_before(now, fc_stats->next_query) || + list_empty(&fc_stats->counters)) + return; + last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list); + + counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, + list); + if (counter) + mlx5_fc_stats_query_counter_range(dev, counter, last->id); + + fc_stats->next_query = now + fc_stats->sampling_interval; +} + +static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev) +{ + struct mlx5_fc *counter; + int err; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return ERR_PTR(-ENOMEM); + + err = mlx5_cmd_fc_alloc(dev, &counter->id); + if (err) { + kfree(counter); + return ERR_PTR(err); + } + + return counter; +} + +static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + + if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) { + counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool); + if (!IS_ERR(counter)) + return counter; + } + + return mlx5_fc_single_alloc(dev); +} + +struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int err = 0; + + if (IS_ERR(counter)) + return counter; + + INIT_LIST_HEAD(&counter->list); + counter->aging = aging; + + if (aging) { + u32 id = counter->id; + + counter->cache.lastuse = jiffies; + counter->lastbytes = counter->cache.bytes; + counter->lastpackets = counter->cache.packets; + + idr_preload(GFP_KERNEL); + spin_lock(&fc_stats->counters_idr_lock); + + err = idr_alloc(&fc_stats->counters_idr, counter, id, id + 1, + GFP_NOWAIT); + + spin_unlock(&fc_stats->counters_idr_lock); + idr_preload_end(); + if (err < 0 || err != id) + goto err_out_alloc; + + llist_add(&counter->addlist, &fc_stats->addlist); + } + + return counter; + +err_out_alloc: + mlx5_fc_release(dev, counter); + return ERR_PTR(err); +} + +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (aging) + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + return counter; +} +EXPORT_SYMBOL(mlx5_fc_create); + +u32 mlx5_fc_id(struct mlx5_fc *counter) +{ + return counter->id; +} +EXPORT_SYMBOL(mlx5_fc_id); + +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (!counter) + return; + + if (counter->aging) { + llist_add(&counter->dellist, &fc_stats->dellist); + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + return; + } + + mlx5_fc_release(dev, counter); +} +EXPORT_SYMBOL(mlx5_fc_destroy); + +int mlx5_init_fc_stats(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int init_bulk_len; + int init_out_len; + + spin_lock_init(&fc_stats->counters_idr_lock); + idr_init(&fc_stats->counters_idr); + INIT_LIST_HEAD(&fc_stats->counters); + init_llist_head(&fc_stats->addlist); + init_llist_head(&fc_stats->dellist); + + init_bulk_len = get_init_bulk_query_len(dev); + init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len); + fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL); + if (!fc_stats->bulk_query_out) + return -ENOMEM; + fc_stats->bulk_query_len = init_bulk_len; + + fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); + if (!fc_stats->wq) + goto err_wq_create; + + fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD; + INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); + + mlx5_fc_pool_init(&fc_stats->fc_pool, dev); + return 0; + +err_wq_create: + kfree(fc_stats->bulk_query_out); + return -ENOMEM; +} + +void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct llist_node *tmplist; + struct mlx5_fc *counter; + struct mlx5_fc *tmp; + + if (!dev->priv.fc_stats.wq) + return; + + cancel_delayed_work_sync(&dev->priv.fc_stats.work); + destroy_workqueue(dev->priv.fc_stats.wq); + dev->priv.fc_stats.wq = NULL; + + tmplist = llist_del_all(&fc_stats->addlist); + llist_for_each_entry_safe(counter, tmp, tmplist, addlist) + mlx5_fc_release(dev, counter); + + list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) + mlx5_fc_release(dev, counter); + + mlx5_fc_pool_cleanup(&fc_stats->fc_pool); + idr_destroy(&fc_stats->counters_idr); + kfree(fc_stats->bulk_query_out); +} + +int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, + u64 *packets, u64 *bytes) +{ + return mlx5_cmd_fc_query(dev, counter->id, packets, bytes); +} +EXPORT_SYMBOL(mlx5_fc_query); + +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter) +{ + return counter->cache.lastuse; +} + +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse) +{ + struct mlx5_fc_cache c; + + c = counter->cache; + + *bytes = c.bytes - counter->lastbytes; + *packets = c.packets - counter->lastpackets; + *lastuse = c.lastuse; + + counter->lastbytes = c.bytes; + counter->lastpackets = c.packets; +} + +void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, + struct delayed_work *dwork, + unsigned long delay) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + queue_delayed_work(fc_stats->wq, dwork, delay); +} + +void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, + unsigned long interval) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + fc_stats->sampling_interval = min_t(unsigned long, interval, + fc_stats->sampling_interval); +} + +/* Flow counter bluks */ + +struct mlx5_fc_bulk { + struct list_head pool_list; + u32 base_id; + int bulk_len; + unsigned long *bitmask; + struct mlx5_fc fcs[]; +}; + +static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, + u32 id) +{ + counter->bulk = bulk; + counter->id = id; +} + +static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) +{ + return bitmap_weight(bulk->bitmask, bulk->bulk_len); +} + +static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) +{ + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask; + struct mlx5_fc_bulk *bulk; + int err = -ENOMEM; + int bulk_len; + u32 base_id; + int i; + + alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); + bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; + + bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); + if (!bulk) + goto err_alloc_bulk; + + bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); + if (!bulk->bitmask) + goto err_alloc_bitmask; + + err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id); + if (err) + goto err_mlx5_cmd_bulk_alloc; + + bulk->base_id = base_id; + bulk->bulk_len = bulk_len; + for (i = 0; i < bulk_len; i++) { + mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); + set_bit(i, bulk->bitmask); + } + + return bulk; + +err_mlx5_cmd_bulk_alloc: + kvfree(bulk->bitmask); +err_alloc_bitmask: + kvfree(bulk); +err_alloc_bulk: + return ERR_PTR(err); +} + +static int +mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) +{ + if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { + mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); + return -EBUSY; + } + + mlx5_cmd_fc_free(dev, bulk->base_id); + kvfree(bulk->bitmask); + kvfree(bulk); + + return 0; +} + +static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) +{ + int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len); + + if (free_fc_index >= bulk->bulk_len) + return ERR_PTR(-ENOSPC); + + clear_bit(free_fc_index, bulk->bitmask); + return &bulk->fcs[free_fc_index]; +} + +static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) +{ + int fc_index = fc->id - bulk->base_id; + + if (test_bit(fc_index, bulk->bitmask)) + return -EINVAL; + + set_bit(fc_index, bulk->bitmask); + return 0; +} + +/* Flow counters pool API */ + +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) +{ + fc_pool->dev = dev; + mutex_init(&fc_pool->pool_lock); + INIT_LIST_HEAD(&fc_pool->fully_used); + INIT_LIST_HEAD(&fc_pool->partially_used); + INIT_LIST_HEAD(&fc_pool->unused); + fc_pool->available_fcs = 0; + fc_pool->used_fcs = 0; + fc_pool->threshold = 0; +} + +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *bulk; + struct mlx5_fc_bulk *tmp; + + list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); +} + +static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool) +{ + fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, + fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO); +} + +static struct mlx5_fc_bulk * +mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *new_bulk; + + new_bulk = mlx5_fc_bulk_create(dev); + if (!IS_ERR(new_bulk)) + fc_pool->available_fcs += new_bulk->bulk_len; + mlx5_fc_pool_update_threshold(fc_pool); + return new_bulk; +} + +static void +mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + + fc_pool->available_fcs -= bulk->bulk_len; + mlx5_fc_bulk_destroy(dev, bulk); + mlx5_fc_pool_update_threshold(fc_pool); +} + +static struct mlx5_fc * +mlx5_fc_pool_acquire_from_list(struct list_head *src_list, + struct list_head *next_list, + bool move_non_full_bulk) +{ + struct mlx5_fc_bulk *bulk; + struct mlx5_fc *fc; + + if (list_empty(src_list)) + return ERR_PTR(-ENODATA); + + bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list); + fc = mlx5_fc_bulk_acquire_fc(bulk); + if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0) + list_move(&bulk->pool_list, next_list); + return fc; +} + +static struct mlx5_fc * +mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_fc_bulk *new_bulk; + struct mlx5_fc *fc; + + mutex_lock(&fc_pool->pool_lock); + + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used, + &fc_pool->fully_used, false); + if (IS_ERR(fc)) + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused, + &fc_pool->partially_used, + true); + if (IS_ERR(fc)) { + new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool); + if (IS_ERR(new_bulk)) { + fc = ERR_CAST(new_bulk); + goto out; + } + fc = mlx5_fc_bulk_acquire_fc(new_bulk); + list_add(&new_bulk->pool_list, &fc_pool->partially_used); + } + fc_pool->available_fcs--; + fc_pool->used_fcs++; + +out: + mutex_unlock(&fc_pool->pool_lock); + return fc; +} + +static void +mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *bulk = fc->bulk; + int bulk_free_fcs_amount; + + mutex_lock(&fc_pool->pool_lock); + + if (mlx5_fc_bulk_release_fc(bulk, fc)) { + mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); + goto unlock; + } + + fc_pool->available_fcs++; + fc_pool->used_fcs--; + + bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk); + if (bulk_free_fcs_amount == 1) + list_move_tail(&bulk->pool_list, &fc_pool->partially_used); + if (bulk_free_fcs_amount == bulk->bulk_len) { + list_del(&bulk->pool_list); + if (fc_pool->available_fcs > fc_pool->threshold) + mlx5_fc_pool_free_bulk(fc_pool, bulk); + else + list_add(&bulk->pool_list, &fc_pool->unused); + } + +unlock: + mutex_unlock(&fc_pool->pool_lock); +} diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c index c4edc940b1bc..91543d3878ef 100644 --- a/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c +++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c @@ -1,2862 +1,2867 @@ /*- * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_rss.h" #include "opt_ratelimit.h" #include #include #include #include #include #include #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) #define ADD_PRIO(name_val, flags_val, min_level_val, max_ft_val, caps_val, \ ...) {.type = FS_TYPE_PRIO,\ .name = name_val,\ .min_ft_level = min_level_val,\ .flags = flags_val,\ .max_ft = max_ft_val,\ .caps = caps_val,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } #define ADD_FT_PRIO(name_val, flags_val, max_ft_val, ...)\ ADD_PRIO(name_val, flags_val, 0, max_ft_val, {},\ __VA_ARGS__)\ #define ADD_NS(name_val, ...) {.type = FS_TYPE_NAMESPACE,\ .name = name_val,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\ sizeof(long)) #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap)) #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ .caps = (long[]) {__VA_ARGS__}} /* Flowtable sizes: */ #define BYPASS_MAX_FT 5 #define BYPASS_PRIO_MAX_FT 1 #define OFFLOADS_MAX_FT 2 #define KERNEL_MAX_FT 5 #define LEFTOVER_MAX_FT 1 /* Flowtable levels: */ #define OFFLOADS_MIN_LEVEL 3 #define KERNEL_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) #define LEFTOVER_MIN_LEVEL (KERNEL_MIN_LEVEL + 1) #define BYPASS_MIN_LEVEL (MLX5_NUM_BYPASS_FTS + LEFTOVER_MIN_LEVEL) struct node_caps { size_t arr_sz; long *caps; }; struct init_tree_node { enum fs_type type; const char *name; struct init_tree_node *children; int ar_size; struct node_caps caps; u8 flags; int min_ft_level; int prio; int max_ft; } root_fs = { .type = FS_TYPE_NAMESPACE, .name = "root", .ar_size = 4, .children = (struct init_tree_node[]) { ADD_PRIO("by_pass_prio", 0, BYPASS_MIN_LEVEL, 0, FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CAP(flow_table_properties_nic_receive.modify_root)), ADD_NS("by_pass_ns", ADD_FT_PRIO("prio0", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio1", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio2", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio3", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio4", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio5", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio6", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio7", 0, BYPASS_PRIO_MAX_FT), ADD_FT_PRIO("prio-mcast", 0, BYPASS_PRIO_MAX_FT))), ADD_PRIO("offloads_prio", 0, OFFLOADS_MIN_LEVEL, 0, {}, ADD_NS("offloads_ns", ADD_FT_PRIO("prio_offloads-0", 0, OFFLOADS_MAX_FT))), ADD_PRIO("kernel_prio", 0, KERNEL_MIN_LEVEL, 0, {}, ADD_NS("kernel_ns", ADD_FT_PRIO("prio_kernel-0", 0, KERNEL_MAX_FT))), ADD_PRIO("leftovers_prio", MLX5_CORE_FS_PRIO_SHARED, LEFTOVER_MIN_LEVEL, 0, FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CAP(flow_table_properties_nic_receive.modify_root)), ADD_NS("leftover_ns", ADD_FT_PRIO("leftovers_prio-0", MLX5_CORE_FS_PRIO_SHARED, LEFTOVER_MAX_FT))) } }; /* Tree creation functions */ static struct mlx5_flow_root_namespace *find_root(struct fs_base *node) { struct fs_base *parent; /* Make sure we only read it once while we go up the tree */ while ((parent = node->parent)) node = parent; if (node->type != FS_TYPE_NAMESPACE) { return NULL; } return container_of(container_of(node, struct mlx5_flow_namespace, base), struct mlx5_flow_root_namespace, ns); } static inline struct mlx5_core_dev *fs_get_dev(struct fs_base *node) { struct mlx5_flow_root_namespace *root = find_root(node); if (root) return root->dev; return NULL; } static void fs_init_node(struct fs_base *node, unsigned int refcount) { kref_init(&node->refcount); atomic_set(&node->users_refcount, refcount); init_completion(&node->complete); INIT_LIST_HEAD(&node->list); mutex_init(&node->lock); } static void _fs_add_node(struct fs_base *node, const char *name, struct fs_base *parent) { if (parent) atomic_inc(&parent->users_refcount); node->name = kstrdup_const(name, GFP_KERNEL); node->parent = parent; } static void fs_add_node(struct fs_base *node, struct fs_base *parent, const char *name, unsigned int refcount) { fs_init_node(node, refcount); _fs_add_node(node, name, parent); } static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref), bool parent_locked); static void fs_del_dst(struct mlx5_flow_rule *dst); static void _fs_del_ft(struct mlx5_flow_table *ft); static void fs_del_fg(struct mlx5_flow_group *fg); static void fs_del_fte(struct fs_fte *fte); static void cmd_remove_node(struct fs_base *base) { switch (base->type) { case FS_TYPE_FLOW_DEST: fs_del_dst(container_of(base, struct mlx5_flow_rule, base)); break; case FS_TYPE_FLOW_TABLE: _fs_del_ft(container_of(base, struct mlx5_flow_table, base)); break; case FS_TYPE_FLOW_GROUP: fs_del_fg(container_of(base, struct mlx5_flow_group, base)); break; case FS_TYPE_FLOW_ENTRY: fs_del_fte(container_of(base, struct fs_fte, base)); break; default: break; } } static void __fs_remove_node(struct kref *kref) { struct fs_base *node = container_of(kref, struct fs_base, refcount); if (node->parent) mutex_lock(&node->parent->lock); mutex_lock(&node->lock); cmd_remove_node(node); mutex_unlock(&node->lock); complete(&node->complete); if (node->parent) { mutex_unlock(&node->parent->lock); _fs_put(node->parent, _fs_remove_node, false); } } void _fs_remove_node(struct kref *kref) { struct fs_base *node = container_of(kref, struct fs_base, refcount); __fs_remove_node(kref); kfree_const(node->name); kfree(node); } static void fs_get(struct fs_base *node) { atomic_inc(&node->users_refcount); } static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref), bool parent_locked) { struct fs_base *parent_node = node->parent; if (parent_node && !parent_locked) mutex_lock(&parent_node->lock); if (atomic_dec_and_test(&node->users_refcount)) { if (parent_node) { /*remove from parent's list*/ list_del_init(&node->list); mutex_unlock(&parent_node->lock); } kref_put(&node->refcount, kref_cb); if (parent_node && parent_locked) mutex_lock(&parent_node->lock); } else if (parent_node && !parent_locked) { mutex_unlock(&parent_node->lock); } } static void fs_put(struct fs_base *node) { _fs_put(node, __fs_remove_node, false); } static void fs_put_parent_locked(struct fs_base *node) { _fs_put(node, __fs_remove_node, true); } static void fs_remove_node(struct fs_base *node) { fs_put(node); wait_for_completion(&node->complete); kfree_const(node->name); kfree(node); } static void fs_remove_node_parent_locked(struct fs_base *node) { fs_put_parent_locked(node); wait_for_completion(&node->complete); kfree_const(node->name); kfree(node); } static struct fs_fte *fs_alloc_fte(u32 sw_action, struct mlx5_flow_act *flow_act, u32 *match_value, unsigned int index) { struct fs_fte *fte; fte = kzalloc(sizeof(*fte), GFP_KERNEL); if (!fte) return ERR_PTR(-ENOMEM); memcpy(fte->val, match_value, sizeof(fte->val)); fte->base.type = FS_TYPE_FLOW_ENTRY; fte->dests_size = 0; fte->index = index; INIT_LIST_HEAD(&fte->dests); fte->flow_act = *flow_act; fte->sw_action = sw_action; return fte; } static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft, struct mlx5_flow_group *fg, u32 *match_value, unsigned int index) { int err; struct fs_fte *fte; struct mlx5_flow_rule *dst; struct mlx5_flow_act flow_act = { .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG, .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, }; if (fg->num_ftes == fg->max_ftes) return ERR_PTR(-ENOSPC); fte = fs_alloc_fte(MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, match_value, index); if (IS_ERR(fte)) return fte; /*create dst*/ dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) { err = -ENOMEM; goto free_fte; } fte->base.parent = &fg->base; fte->dests_size = 1; dst->dest_attr.type = MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE; dst->base.parent = &fte->base; list_add(&dst->base.list, &fte->dests); /* assumed that the callee creates the star rules sorted by index */ list_add_tail(&fte->base.list, &fg->ftes); fg->num_ftes++; return fte; free_fte: kfree(fte); return ERR_PTR(err); } /* assume that fte can't be changed */ static void free_star_fte_entry(struct fs_fte *fte) { struct mlx5_flow_group *fg; struct mlx5_flow_rule *dst, *temp; fs_get_parent(fg, fte); list_for_each_entry_safe(dst, temp, &fte->dests, base.list) { fte->dests_size--; list_del(&dst->base.list); kfree(dst); } list_del(&fte->base.list); fg->num_ftes--; kfree(fte); } static struct mlx5_flow_group *fs_alloc_fg(u32 *create_fg_in) { struct mlx5_flow_group *fg; void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, create_fg_in, match_criteria); u8 match_criteria_enable = MLX5_GET(create_flow_group_in, create_fg_in, match_criteria_enable); fg = kzalloc(sizeof(*fg), GFP_KERNEL); if (!fg) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&fg->ftes); fg->mask.match_criteria_enable = match_criteria_enable; memcpy(&fg->mask.match_criteria, match_criteria, sizeof(fg->mask.match_criteria)); fg->base.type = FS_TYPE_FLOW_GROUP; fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in, start_flow_index); fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in, end_flow_index) - fg->start_index + 1; return fg; } static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio); static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr, struct fs_prio *prio); /* assumed src_ft and dst_ft can't be freed */ static int fs_set_star_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *src_ft, struct mlx5_flow_table *dst_ft) { struct mlx5_flow_rule *src_dst; struct fs_fte *src_fte; int err = 0; u32 *match_value; int match_len = MLX5_ST_SZ_BYTES(fte_match_param); src_dst = list_first_entry(&src_ft->star_rule.fte->dests, struct mlx5_flow_rule, base.list); match_value = mlx5_vzalloc(match_len); if (!match_value) { mlx5_core_warn(dev, "failed to allocate inbox\n"); return -ENOMEM; } /*Create match context*/ fs_get_parent(src_fte, src_dst); src_dst->dest_attr.ft = dst_ft; if (dst_ft) { err = mlx5_cmd_fs_set_fte(dev, src_ft->vport, &src_fte->status, match_value, src_ft->type, src_ft->id, src_fte->index, src_ft->star_rule.fg->id, &src_fte->flow_act, src_fte->sw_action, src_fte->dests_size, &src_fte->dests); if (err) goto free; fs_get(&dst_ft->base); } else { mlx5_cmd_fs_delete_fte(dev, src_ft->vport, &src_fte->status, src_ft->type, src_ft->id, src_fte->index); } free: kvfree(match_value); return err; } static int connect_prev_fts(struct fs_prio *locked_prio, struct fs_prio *prev_prio, struct mlx5_flow_table *next_ft) { struct mlx5_flow_table *iter; int err = 0; struct mlx5_core_dev *dev = fs_get_dev(&prev_prio->base); if (!dev) return -ENODEV; mutex_lock(&prev_prio->base.lock); fs_for_each_ft(iter, prev_prio) { struct mlx5_flow_rule *src_dst = list_first_entry(&iter->star_rule.fte->dests, struct mlx5_flow_rule, base.list); struct mlx5_flow_table *prev_ft = src_dst->dest_attr.ft; if (prev_ft == next_ft) continue; err = fs_set_star_rule(dev, iter, next_ft); if (err) { mlx5_core_warn(dev, "mlx5: flow steering can't connect prev and next\n"); goto unlock; } else { /* Assume ft's prio is locked */ if (prev_ft) { struct fs_prio *prio; fs_get_parent(prio, prev_ft); if (prio == locked_prio) fs_put_parent_locked(&prev_ft->base); else fs_put(&prev_ft->base); } } } unlock: mutex_unlock(&prev_prio->base.lock); return 0; } static int create_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio) { struct mlx5_flow_group *fg; int err; u32 *fg_in; u32 *match_value; struct mlx5_flow_table *next_ft; struct mlx5_flow_table *prev_ft; struct mlx5_flow_root_namespace *root = find_root(&prio->base); int fg_inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int match_len = MLX5_ST_SZ_BYTES(fte_match_param); fg_in = mlx5_vzalloc(fg_inlen); if (!fg_in) { mlx5_core_warn(root->dev, "failed to allocate inbox\n"); return -ENOMEM; } match_value = mlx5_vzalloc(match_len); if (!match_value) { mlx5_core_warn(root->dev, "failed to allocate inbox\n"); kvfree(fg_in); return -ENOMEM; } MLX5_SET(create_flow_group_in, fg_in, start_flow_index, ft->max_fte); MLX5_SET(create_flow_group_in, fg_in, end_flow_index, ft->max_fte); fg = fs_alloc_fg(fg_in); if (IS_ERR(fg)) { err = PTR_ERR(fg); goto out; } ft->star_rule.fg = fg; err = mlx5_cmd_fs_create_fg(fs_get_dev(&prio->base), fg_in, ft->vport, ft->type, ft->id, &fg->id); if (err) goto free_fg; ft->star_rule.fte = alloc_star_ft_entry(ft, fg, match_value, ft->max_fte); if (IS_ERR(ft->star_rule.fte)) goto free_star_rule; mutex_lock(&root->fs_chain_lock); next_ft = find_next_ft(prio); err = fs_set_star_rule(root->dev, ft, next_ft); if (err) { mutex_unlock(&root->fs_chain_lock); goto free_star_rule; } if (next_ft) { struct fs_prio *parent; fs_get_parent(parent, next_ft); fs_put(&next_ft->base); } prev_ft = find_prev_ft(ft, prio); if (prev_ft) { struct fs_prio *prev_parent; fs_get_parent(prev_parent, prev_ft); err = connect_prev_fts(NULL, prev_parent, ft); if (err) { mutex_unlock(&root->fs_chain_lock); goto destroy_chained_star_rule; } fs_put(&prev_ft->base); } mutex_unlock(&root->fs_chain_lock); kvfree(fg_in); kvfree(match_value); return 0; destroy_chained_star_rule: fs_set_star_rule(fs_get_dev(&prio->base), ft, NULL); if (next_ft) fs_put(&next_ft->base); free_star_rule: free_star_fte_entry(ft->star_rule.fte); mlx5_cmd_fs_destroy_fg(fs_get_dev(&ft->base), ft->vport, ft->type, ft->id, fg->id); free_fg: kfree(fg); out: kvfree(fg_in); kvfree(match_value); return err; } static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio) { int err; struct mlx5_flow_root_namespace *root; struct mlx5_core_dev *dev = fs_get_dev(&prio->base); struct mlx5_flow_table *prev_ft, *next_ft; struct fs_prio *prev_prio; WARN_ON(!dev); root = find_root(&prio->base); if (!root) mlx5_core_err(dev, "flow steering failed to find root of priority %s", prio->base.name); /* In order to ensure atomic deletion, first update * prev ft to point on the next ft. */ mutex_lock(&root->fs_chain_lock); prev_ft = find_prev_ft(ft, prio); next_ft = find_next_ft(prio); if (prev_ft) { fs_get_parent(prev_prio, prev_ft); /*Prev is connected to ft, only if ft is the first(last) in the prio*/ err = connect_prev_fts(prio, prev_prio, next_ft); if (err) mlx5_core_warn(root->dev, "flow steering can't connect prev and next of flow table\n"); fs_put(&prev_ft->base); } err = fs_set_star_rule(root->dev, ft, NULL); /*One put is for fs_get in find next ft*/ if (next_ft) { fs_put(&next_ft->base); if (!err) fs_put(&next_ft->base); } mutex_unlock(&root->fs_chain_lock); err = mlx5_cmd_fs_destroy_fg(dev, ft->vport, ft->type, ft->id, ft->star_rule.fg->id); if (err) mlx5_core_warn(dev, "flow steering can't destroy star entry group(index:%d) of ft:%s\n", ft->star_rule.fg->start_index, ft->base.name); free_star_fte_entry(ft->star_rule.fte); kfree(ft->star_rule.fg); ft->star_rule.fg = NULL; } static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, unsigned int prio) { struct fs_prio *iter_prio; fs_for_each_prio(iter_prio, ns) { if (iter_prio->prio == prio) return iter_prio; } return NULL; } static unsigned int _alloc_new_level(struct fs_prio *prio, struct mlx5_flow_namespace *match); static unsigned int __alloc_new_level(struct mlx5_flow_namespace *ns, struct fs_prio *prio) { unsigned int level = 0; struct fs_prio *p; if (!ns) return 0; mutex_lock(&ns->base.lock); fs_for_each_prio(p, ns) { if (p != prio) level += p->max_ft; else break; } mutex_unlock(&ns->base.lock); fs_get_parent(prio, ns); if (prio) WARN_ON(prio->base.type != FS_TYPE_PRIO); return level + _alloc_new_level(prio, ns); } /* Called under lock of priority, hence locking all upper objects */ static unsigned int _alloc_new_level(struct fs_prio *prio, struct mlx5_flow_namespace *match) { struct mlx5_flow_namespace *ns; struct fs_base *it; unsigned int level = 0; if (!prio) return 0; mutex_lock(&prio->base.lock); fs_for_each_ns_or_ft_reverse(it, prio) { if (it->type == FS_TYPE_NAMESPACE) { struct fs_prio *p; fs_get_obj(ns, it); if (match != ns) { mutex_lock(&ns->base.lock); fs_for_each_prio(p, ns) level += p->max_ft; mutex_unlock(&ns->base.lock); } else { break; } } else { struct mlx5_flow_table *ft; fs_get_obj(ft, it); mutex_unlock(&prio->base.lock); return level + ft->level + 1; } } fs_get_parent(ns, prio); mutex_unlock(&prio->base.lock); return __alloc_new_level(ns, prio) + level; } static unsigned int alloc_new_level(struct fs_prio *prio) { return _alloc_new_level(prio, NULL); } static int update_root_ft_create(struct mlx5_flow_root_namespace *root, struct mlx5_flow_table *ft) { int err = 0; int min_level = INT_MAX; if (root->root_ft) min_level = root->root_ft->level; if (ft->level < min_level) err = mlx5_cmd_update_root_ft(root->dev, ft->type, ft->id); else return err; if (err) mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", ft->id); else root->root_ft = ft; return err; } static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns, u16 vport, struct fs_prio *fs_prio, int max_fte, const char *name) { struct mlx5_flow_table *ft; int err; int log_table_sz; int ft_size; char gen_name[20]; struct mlx5_flow_root_namespace *root = find_root(&ns->base); struct mlx5_core_dev *dev = fs_get_dev(&ns->base); if (!root) { mlx5_core_err(dev, "flow steering failed to find root of namespace %s", ns->base.name); return ERR_PTR(-ENODEV); } if (fs_prio->num_ft == fs_prio->max_ft) return ERR_PTR(-ENOSPC); ft = kzalloc(sizeof(*ft), GFP_KERNEL); if (!ft) return ERR_PTR(-ENOMEM); fs_init_node(&ft->base, 1); INIT_LIST_HEAD(&ft->fgs); /* Temporarily WA until we expose the level set in the API */ if (root->table_type == FS_FT_ESW_EGRESS_ACL || root->table_type == FS_FT_ESW_INGRESS_ACL) ft->level = 0; else ft->level = alloc_new_level(fs_prio); ft->base.type = FS_TYPE_FLOW_TABLE; ft->vport = vport; ft->type = root->table_type; /*Two entries are reserved for star rules*/ ft_size = roundup_pow_of_two(max_fte + 2); /*User isn't aware to those rules*/ ft->max_fte = ft_size - 2; log_table_sz = ilog2(ft_size); if (name == NULL || name[0] == '\0') { snprintf(gen_name, sizeof(gen_name), "flow_table_%u", ft->id); name = gen_name; } err = mlx5_cmd_fs_create_ft(root->dev, ft->vport, ft->type, ft->level, log_table_sz, name, &ft->id); if (err) goto free_ft; err = create_star_rule(ft, fs_prio); if (err) goto del_ft; if ((root->table_type == FS_FT_NIC_RX) && MLX5_CAP_FLOWTABLE(root->dev, flow_table_properties_nic_receive.modify_root)) { err = update_root_ft_create(root, ft); if (err) goto destroy_star_rule; } _fs_add_node(&ft->base, name, &fs_prio->base); list_add_tail(&ft->base.list, &fs_prio->objs); fs_prio->num_ft++; return ft; destroy_star_rule: destroy_star_rule(ft, fs_prio); del_ft: mlx5_cmd_fs_destroy_ft(root->dev, ft->vport, ft->type, ft->id); free_ft: kfree(ft); return ERR_PTR(err); } static struct mlx5_flow_table *create_ft_common(struct mlx5_flow_namespace *ns, u16 vport, unsigned int prio, int max_fte, const char *name) { struct fs_prio *fs_prio = NULL; fs_prio = find_prio(ns, prio); if (!fs_prio) return ERR_PTR(-EINVAL); return _create_ft_common(ns, vport, fs_prio, max_fte, name); } static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns, struct list_head *start); static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio, struct list_head *start); static struct mlx5_flow_table *mlx5_create_autogrouped_shared_flow_table(struct fs_prio *fs_prio) { struct mlx5_flow_table *ft; ft = find_first_ft_in_prio(fs_prio, &fs_prio->objs); if (ft) { ft->shared_refcount++; return ft; } return NULL; } struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, const char *name, int num_flow_table_entries, int max_num_groups, int num_reserved_entries) { struct mlx5_flow_table *ft = NULL; struct fs_prio *fs_prio; bool is_shared_prio; if (max_num_groups > (num_flow_table_entries - num_reserved_entries)) return ERR_PTR(-EINVAL); if (num_reserved_entries > num_flow_table_entries) return ERR_PTR(-EINVAL); fs_prio = find_prio(ns, prio); if (!fs_prio) return ERR_PTR(-EINVAL); is_shared_prio = fs_prio->flags & MLX5_CORE_FS_PRIO_SHARED; if (is_shared_prio) { mutex_lock(&fs_prio->shared_lock); ft = mlx5_create_autogrouped_shared_flow_table(fs_prio); } if (ft) goto return_ft; ft = create_ft_common(ns, 0, prio, num_flow_table_entries, name); if (IS_ERR(ft)) goto return_ft; ft->autogroup.active = true; ft->autogroup.max_types = max_num_groups; ft->autogroup.max_fte = num_flow_table_entries - num_reserved_entries; /* We save place for flow groups in addition to max types */ ft->autogroup.group_size = ft->autogroup.max_fte / (max_num_groups + 1); if (is_shared_prio) ft->shared_refcount = 1; return_ft: if (is_shared_prio) mutex_unlock(&fs_prio->shared_lock); return ft; } EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, u16 vport, int prio, const char *name, int num_flow_table_entries) { return create_ft_common(ns, vport, prio, num_flow_table_entries, name); } EXPORT_SYMBOL(mlx5_create_vport_flow_table); struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, const char *name, int num_flow_table_entries) { return create_ft_common(ns, 0, prio, num_flow_table_entries, name); } EXPORT_SYMBOL(mlx5_create_flow_table); static void _fs_del_ft(struct mlx5_flow_table *ft) { int err; struct mlx5_core_dev *dev = fs_get_dev(&ft->base); struct fs_prio *prio; err = mlx5_cmd_fs_destroy_ft(dev, ft->vport, ft->type, ft->id); if (err) mlx5_core_warn(dev, "flow steering can't destroy ft %s\n", ft->base.name); fs_get_parent(prio, ft); prio->num_ft--; } static int update_root_ft_destroy(struct mlx5_flow_root_namespace *root, struct mlx5_flow_table *ft) { int err = 0; struct fs_prio *prio; struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *put_ft = NULL; if (root->root_ft != ft) return 0; fs_get_parent(prio, ft); /*Assuming objs containis only flow tables and * flow tables are sorted by level. */ if (!list_is_last(&ft->base.list, &prio->objs)) { next_ft = list_next_entry(ft, base.list); } else { next_ft = find_next_ft(prio); put_ft = next_ft; } if (next_ft) { err = mlx5_cmd_update_root_ft(root->dev, next_ft->type, next_ft->id); if (err) mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", ft->id); } if (!err) root->root_ft = next_ft; if (put_ft) fs_put(&put_ft->base); return err; } /*Objects in the same prio are destroyed in the reverse order they were createrd*/ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) { int err = 0; struct fs_prio *prio; struct mlx5_flow_root_namespace *root; bool is_shared_prio; struct mlx5_core_dev *dev; fs_get_parent(prio, ft); root = find_root(&prio->base); dev = fs_get_dev(&prio->base); if (!root) { mlx5_core_err(dev, "flow steering failed to find root of priority %s", prio->base.name); return -ENODEV; } is_shared_prio = prio->flags & MLX5_CORE_FS_PRIO_SHARED; if (is_shared_prio) { mutex_lock(&prio->shared_lock); if (ft->shared_refcount > 1) { --ft->shared_refcount; fs_put(&ft->base); mutex_unlock(&prio->shared_lock); return 0; } } mutex_lock(&prio->base.lock); mutex_lock(&ft->base.lock); err = update_root_ft_destroy(root, ft); if (err) goto unlock_ft; /* delete two last entries */ destroy_star_rule(ft, prio); mutex_unlock(&ft->base.lock); fs_remove_node_parent_locked(&ft->base); mutex_unlock(&prio->base.lock); if (is_shared_prio) mutex_unlock(&prio->shared_lock); return err; unlock_ft: mutex_unlock(&ft->base.lock); mutex_unlock(&prio->base.lock); if (is_shared_prio) mutex_unlock(&prio->shared_lock); return err; } EXPORT_SYMBOL(mlx5_destroy_flow_table); static struct mlx5_flow_group *fs_create_fg(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct list_head *prev, u32 *fg_in, int refcount) { struct mlx5_flow_group *fg; unsigned int group_size; int err; char name[20]; fg = fs_alloc_fg(fg_in); if (IS_ERR(fg)) return fg; group_size = MLX5_GET(create_flow_group_in, fg_in, end_flow_index) - MLX5_GET(create_flow_group_in, fg_in, start_flow_index) + 1; err = mlx5_cmd_fs_create_fg(dev, fg_in, ft->vport, ft->type, ft->id, &fg->id); if (err) goto free_fg; mutex_lock(&ft->base.lock); if (ft->autogroup.active && group_size == ft->autogroup.group_size) ft->autogroup.num_types++; snprintf(name, sizeof(name), "group_%u", fg->id); /*Add node to tree*/ fs_add_node(&fg->base, &ft->base, name, refcount); /*Add node to group list*/ list_add(&fg->base.list, prev); mutex_unlock(&ft->base.lock); return fg; free_fg: kfree(fg); return ERR_PTR(err); } struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in) { struct mlx5_flow_group *fg; struct mlx5_core_dev *dev = fs_get_dev(&ft->base); unsigned int start_index; start_index = MLX5_GET(create_flow_group_in, in, start_flow_index); if (!dev) return ERR_PTR(-ENODEV); if (ft->autogroup.active && start_index < ft->autogroup.max_fte) return ERR_PTR(-EPERM); fg = fs_create_fg(dev, ft, ft->fgs.prev, in, 1); return fg; } EXPORT_SYMBOL(mlx5_create_flow_group); /*Group is destoyed when all the rules in the group were removed*/ static void fs_del_fg(struct mlx5_flow_group *fg) { struct mlx5_flow_table *parent_ft; struct mlx5_core_dev *dev; fs_get_parent(parent_ft, fg); dev = fs_get_dev(&parent_ft->base); WARN_ON(!dev); if (parent_ft->autogroup.active && fg->max_ftes == parent_ft->autogroup.group_size && fg->start_index < parent_ft->autogroup.max_fte) parent_ft->autogroup.num_types--; if (mlx5_cmd_fs_destroy_fg(dev, parent_ft->vport, parent_ft->type, parent_ft->id, fg->id)) mlx5_core_warn(dev, "flow steering can't destroy fg\n"); } void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) { fs_remove_node(&fg->base); } EXPORT_SYMBOL(mlx5_destroy_flow_group); static bool _fs_match_exact_val(void *mask, void *val1, void *val2, size_t size) { unsigned int i; /* TODO: optimize by comparing 64bits when possible */ for (i = 0; i < size; i++, mask++, val1++, val2++) if ((*((u8 *)val1) & (*(u8 *)mask)) != ((*(u8 *)val2) & (*(u8 *)mask))) return false; return true; } bool fs_match_exact_val(struct mlx5_core_fs_mask *mask, void *val1, void *val2) { if (mask->match_criteria_enable & 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) { void *fte_match1 = MLX5_ADDR_OF(fte_match_param, val1, outer_headers); void *fte_match2 = MLX5_ADDR_OF(fte_match_param, val2, outer_headers); void *fte_mask = MLX5_ADDR_OF(fte_match_param, mask->match_criteria, outer_headers); if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2, MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) return false; } if (mask->match_criteria_enable & 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) { void *fte_match1 = MLX5_ADDR_OF(fte_match_param, val1, misc_parameters); void *fte_match2 = MLX5_ADDR_OF(fte_match_param, val2, misc_parameters); void *fte_mask = MLX5_ADDR_OF(fte_match_param, mask->match_criteria, misc_parameters); if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2, MLX5_ST_SZ_BYTES(fte_match_set_misc))) return false; } if (mask->match_criteria_enable & 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) { void *fte_match1 = MLX5_ADDR_OF(fte_match_param, val1, inner_headers); void *fte_match2 = MLX5_ADDR_OF(fte_match_param, val2, inner_headers); void *fte_mask = MLX5_ADDR_OF(fte_match_param, mask->match_criteria, inner_headers); if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2, MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) return false; } return true; } bool fs_match_exact_mask(u8 match_criteria_enable1, u8 match_criteria_enable2, void *mask1, void *mask2) { return match_criteria_enable1 == match_criteria_enable2 && !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param)); } static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns, struct list_head *start); static struct mlx5_flow_table *_find_first_ft_in_prio_reverse(struct fs_prio *prio, struct list_head *start) { struct fs_base *it = container_of(start, struct fs_base, list); if (!prio) return NULL; fs_for_each_ns_or_ft_continue_reverse(it, prio) { struct mlx5_flow_namespace *ns; struct mlx5_flow_table *ft; if (it->type == FS_TYPE_FLOW_TABLE) { fs_get_obj(ft, it); fs_get(&ft->base); return ft; } fs_get_obj(ns, it); WARN_ON(ns->base.type != FS_TYPE_NAMESPACE); ft = find_first_ft_in_ns_reverse(ns, &ns->prios); if (ft) return ft; } return NULL; } static struct mlx5_flow_table *find_first_ft_in_prio_reverse(struct fs_prio *prio, struct list_head *start) { struct mlx5_flow_table *ft; if (!prio) return NULL; mutex_lock(&prio->base.lock); ft = _find_first_ft_in_prio_reverse(prio, start); mutex_unlock(&prio->base.lock); return ft; } static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns, struct list_head *start) { struct fs_prio *prio; if (!ns) return NULL; fs_get_obj(prio, container_of(start, struct fs_base, list)); mutex_lock(&ns->base.lock); fs_for_each_prio_continue_reverse(prio, ns) { struct mlx5_flow_table *ft; ft = find_first_ft_in_prio_reverse(prio, &prio->objs); if (ft) { mutex_unlock(&ns->base.lock); return ft; } } mutex_unlock(&ns->base.lock); return NULL; } /* Returned a held ft, assumed curr is protected, assumed curr's parent is * locked */ static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr, struct fs_prio *prio) { struct mlx5_flow_table *ft = NULL; struct fs_base *curr_base; if (!curr) return NULL; /* prio has either namespace or flow-tables, but not both */ if (!list_empty(&prio->objs) && list_first_entry(&prio->objs, struct mlx5_flow_table, base.list) != curr) return NULL; while (!ft && prio) { struct mlx5_flow_namespace *ns; fs_get_parent(ns, prio); ft = find_first_ft_in_ns_reverse(ns, &prio->base.list); curr_base = &ns->base; fs_get_parent(prio, ns); if (prio && !ft) ft = find_first_ft_in_prio_reverse(prio, &curr_base->list); } return ft; } static struct mlx5_flow_table *_find_first_ft_in_prio(struct fs_prio *prio, struct list_head *start) { struct fs_base *it = container_of(start, struct fs_base, list); if (!prio) return NULL; fs_for_each_ns_or_ft_continue(it, prio) { struct mlx5_flow_namespace *ns; struct mlx5_flow_table *ft; if (it->type == FS_TYPE_FLOW_TABLE) { fs_get_obj(ft, it); fs_get(&ft->base); return ft; } fs_get_obj(ns, it); WARN_ON(ns->base.type != FS_TYPE_NAMESPACE); ft = find_first_ft_in_ns(ns, &ns->prios); if (ft) return ft; } return NULL; } static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio, struct list_head *start) { struct mlx5_flow_table *ft; if (!prio) return NULL; mutex_lock(&prio->base.lock); ft = _find_first_ft_in_prio(prio, start); mutex_unlock(&prio->base.lock); return ft; } static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns, struct list_head *start) { struct fs_prio *prio; if (!ns) return NULL; fs_get_obj(prio, container_of(start, struct fs_base, list)); mutex_lock(&ns->base.lock); fs_for_each_prio_continue(prio, ns) { struct mlx5_flow_table *ft; ft = find_first_ft_in_prio(prio, &prio->objs); if (ft) { mutex_unlock(&ns->base.lock); return ft; } } mutex_unlock(&ns->base.lock); return NULL; } /* returned a held ft, assumed curr is protected, assumed curr's parent is * locked */ static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio) { struct mlx5_flow_table *ft = NULL; struct fs_base *curr_base; while (!ft && prio) { struct mlx5_flow_namespace *ns; fs_get_parent(ns, prio); ft = find_first_ft_in_ns(ns, &prio->base.list); curr_base = &ns->base; fs_get_parent(prio, ns); if (!ft && prio) ft = _find_first_ft_in_prio(prio, &curr_base->list); } return ft; } /* called under ft mutex lock */ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, u8 match_criteria_enable, u32 *match_criteria) { unsigned int group_size; unsigned int candidate_index = 0; struct mlx5_flow_group *g; struct mlx5_flow_group *ret; struct list_head *prev = &ft->fgs; struct mlx5_core_dev *dev; u32 *in; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); void *match_criteria_addr; u32 max_fte = ft->autogroup.max_fte; if (!ft->autogroup.active) return ERR_PTR(-ENOENT); dev = fs_get_dev(&ft->base); if (!dev) return ERR_PTR(-ENODEV); in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(dev, "failed to allocate inbox\n"); return ERR_PTR(-ENOMEM); } if (ft->autogroup.num_types < ft->autogroup.max_types) group_size = ft->autogroup.group_size; else group_size = 1; if (group_size == 0) { mlx5_core_warn(dev, "flow steering can't create group size of 0\n"); ret = ERR_PTR(-EINVAL); goto out; } /* sorted by start_index */ fs_for_each_fg(g, ft) { if (candidate_index + group_size > g->start_index) candidate_index = g->start_index + g->max_ftes; else break; prev = &g->base.list; } if (candidate_index + group_size > max_fte) { ret = ERR_PTR(-ENOSPC); goto out; } MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index); MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index + group_size - 1); match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); memcpy(match_criteria_addr, match_criteria, MLX5_ST_SZ_BYTES(fte_match_param)); ret = fs_create_fg(dev, ft, prev, in, 0); out: kvfree(in); return ret; } static struct mlx5_flow_namespace *get_ns_with_notifiers(struct fs_base *node) { struct mlx5_flow_namespace *ns = NULL; while (node && (node->type != FS_TYPE_NAMESPACE || list_empty(&container_of(node, struct mlx5_flow_namespace, base)->list_notifiers))) node = node->parent; if (node) fs_get_obj(ns, node); return ns; } /*Assumption- fte is locked*/ static void call_to_add_rule_notifiers(struct mlx5_flow_rule *dst, struct fs_fte *fte) { struct mlx5_flow_namespace *ns; struct mlx5_flow_handler *iter_handler; struct fs_client_priv_data *iter_client; void *data; bool is_new_rule = list_first_entry(&fte->dests, struct mlx5_flow_rule, base.list) == dst; int err; ns = get_ns_with_notifiers(&fte->base); if (!ns) return; down_read(&ns->notifiers_rw_sem); list_for_each_entry(iter_handler, &ns->list_notifiers, list) { if (iter_handler->add_dst_cb) { data = NULL; mutex_lock(&dst->clients_lock); list_for_each_entry( iter_client, &dst->clients_data, list) { if (iter_client->fs_handler == iter_handler) { data = iter_client->client_dst_data; break; } } mutex_unlock(&dst->clients_lock); err = iter_handler->add_dst_cb(dst, is_new_rule, data, iter_handler->client_context); if (err) break; } } up_read(&ns->notifiers_rw_sem); } static void call_to_del_rule_notifiers(struct mlx5_flow_rule *dst, struct fs_fte *fte) { struct mlx5_flow_namespace *ns; struct mlx5_flow_handler *iter_handler; struct fs_client_priv_data *iter_client; void *data; bool ctx_changed = (fte->dests_size == 0); ns = get_ns_with_notifiers(&fte->base); if (!ns) return; down_read(&ns->notifiers_rw_sem); list_for_each_entry(iter_handler, &ns->list_notifiers, list) { data = NULL; mutex_lock(&dst->clients_lock); list_for_each_entry(iter_client, &dst->clients_data, list) { if (iter_client->fs_handler == iter_handler) { data = iter_client->client_dst_data; break; } } mutex_unlock(&dst->clients_lock); if (iter_handler->del_dst_cb) { iter_handler->del_dst_cb(dst, ctx_changed, data, iter_handler->client_context); } } up_read(&ns->notifiers_rw_sem); } /* fte should not be deleted while calling this function */ static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte, struct mlx5_flow_group *fg, struct mlx5_flow_destination *dest) { struct mlx5_flow_table *ft; struct mlx5_flow_rule *dst; int err; dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); memcpy(&dst->dest_attr, dest, sizeof(*dest)); dst->base.type = FS_TYPE_FLOW_DEST; INIT_LIST_HEAD(&dst->clients_data); mutex_init(&dst->clients_lock); fs_get_parent(ft, fg); /*Add dest to dests list- added as first element after the head*/ list_add_tail(&dst->base.list, &fte->dests); fte->dests_size++; err = mlx5_cmd_fs_set_fte(fs_get_dev(&ft->base), ft->vport, &fte->status, fte->val, ft->type, ft->id, fte->index, fg->id, &fte->flow_act, fte->sw_action, fte->dests_size, &fte->dests); if (err) goto free_dst; list_del(&dst->base.list); return dst; free_dst: list_del(&dst->base.list); kfree(dst); fte->dests_size--; return ERR_PTR(err); } static char *get_dest_name(struct mlx5_flow_destination *dest) { char *name = kzalloc(sizeof(char) * 20, GFP_KERNEL); switch (dest->type) { case MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE: snprintf(name, 20, "dest_%s_%u", "flow_table", dest->ft->id); return name; case MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT: snprintf(name, 20, "dest_%s_%u", "vport", dest->vport_num); return name; case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR: snprintf(name, 20, "dest_%s_%u", "tir", dest->tir_num); return name; default: kfree(name); return NULL; } } /* assumed fg is locked */ static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg, struct list_head **prev) { struct fs_fte *fte; unsigned int start = fg->start_index; if (prev) *prev = &fg->ftes; /* assumed list is sorted by index */ fs_for_each_fte(fte, fg) { if (fte->index != start) return start; start++; if (prev) *prev = &fte->base.list; } return start; } static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg, u32 *match_value, u32 sw_action, struct mlx5_flow_act *flow_act, struct list_head **prev) { struct fs_fte *fte; int index = 0; index = fs_get_free_fg_index(fg, prev); fte = fs_alloc_fte(sw_action, flow_act, match_value, index); if (IS_ERR(fte)) return fte; return fte; } static void add_rule_to_tree(struct mlx5_flow_rule *rule, struct fs_fte *fte) { char *dest_name; dest_name = get_dest_name(&rule->dest_attr); fs_add_node(&rule->base, &fte->base, dest_name, 1); /* re-add to list, since fs_add_node reset our list */ list_add_tail(&rule->base.list, &fte->dests); kfree(dest_name); call_to_add_rule_notifiers(rule, fte); } static void fs_del_dst(struct mlx5_flow_rule *dst) { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct fs_fte *fte; u32 *match_value; struct mlx5_core_dev *dev = fs_get_dev(&dst->base); int match_len = MLX5_ST_SZ_BYTES(fte_match_param); int err; WARN_ON(!dev); match_value = mlx5_vzalloc(match_len); if (!match_value) { mlx5_core_warn(dev, "failed to allocate inbox\n"); return; } fs_get_parent(fte, dst); fs_get_parent(fg, fte); mutex_lock(&fg->base.lock); memcpy(match_value, fte->val, sizeof(fte->val)); /* ft can't be changed as fg is locked */ fs_get_parent(ft, fg); list_del(&dst->base.list); fte->dests_size--; if (fte->dests_size) { err = mlx5_cmd_fs_set_fte(dev, ft->vport, &fte->status, match_value, ft->type, ft->id, fte->index, fg->id, &fte->flow_act, fte->sw_action, fte->dests_size, &fte->dests); if (err) { mlx5_core_warn(dev, "%s can't delete dst %s\n", __func__, dst->base.name); goto err; } } call_to_del_rule_notifiers(dst, fte); err: mutex_unlock(&fg->base.lock); kvfree(match_value); } static void fs_del_fte(struct fs_fte *fte) { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; int err; struct mlx5_core_dev *dev; fs_get_parent(fg, fte); fs_get_parent(ft, fg); dev = fs_get_dev(&ft->base); WARN_ON(!dev); err = mlx5_cmd_fs_delete_fte(dev, ft->vport, &fte->status, ft->type, ft->id, fte->index); if (err) mlx5_core_warn(dev, "flow steering can't delete fte %s\n", fte->base.name); fg->num_ftes--; } static bool check_conflicting_actions(const struct mlx5_flow_act *act1, const struct mlx5_flow_act *act2) { u32 action1 = act1->actions; u32 action2 = act2->actions; u32 xored_actions; xored_actions = action1 ^ action2; if (xored_actions & (MLX5_FLOW_ACT_ACTIONS_FLOW_TAG)) return true; if (action1 & MLX5_FLOW_ACT_ACTIONS_FLOW_TAG && act1->flow_tag != act2->flow_tag) return true; /* Can even have complex actions in merged rules */ if (action1 & MLX5_FLOW_ACT_ACTIONS_MODIFY_HDR) return true; if (action1 & MLX5_FLOW_ACT_ACTIONS_PACKET_REFORMAT) return true; return false; } /* assuming parent fg is locked */ /* Add dst algorithm */ static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg, u32 *match_value, u32 sw_action, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest) { struct fs_fte *fte; struct mlx5_flow_rule *dst; struct mlx5_flow_table *ft; struct list_head *prev; char fte_name[20]; mutex_lock(&fg->base.lock); if (flow_act->flags & MLX5_FLOW_ACT_NO_APPEND) goto insert_fte; fs_for_each_fte(fte, fg) { /* TODO: Check of size against PRM max size */ mutex_lock(&fte->base.lock); if (fs_match_exact_val(&fg->mask, match_value, &fte->val) && sw_action == fte->sw_action && !check_conflicting_actions(flow_act, &fte->flow_act)) { dst = _fs_add_dst_fte(fte, fg, dest); mutex_unlock(&fte->base.lock); if (IS_ERR(dst)) goto unlock_fg; goto add_rule; } mutex_unlock(&fte->base.lock); } insert_fte: fs_get_parent(ft, fg); if (fg->num_ftes == fg->max_ftes) { dst = ERR_PTR(-ENOSPC); goto unlock_fg; } fte = fs_create_fte(fg, match_value, sw_action, flow_act, &prev); if (IS_ERR(fte)) { dst = (void *)fte; goto unlock_fg; } dst = _fs_add_dst_fte(fte, fg, dest); if (IS_ERR(dst)) { kfree(fte); goto unlock_fg; } fg->num_ftes++; snprintf(fte_name, sizeof(fte_name), "fte%u", fte->index); /* Add node to tree */ fs_add_node(&fte->base, &fg->base, fte_name, 0); list_add(&fte->base.list, prev); add_rule: add_rule_to_tree(dst, fte); unlock_fg: mutex_unlock(&fg->base.lock); return dst; } static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft, u8 match_criteria_enable, u32 *match_criteria, u32 *match_value, u32 sw_action, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest) { /*? where dst_entry is allocated*/ struct mlx5_flow_group *g; struct mlx5_flow_rule *dst; fs_get(&ft->base); mutex_lock(&ft->base.lock); fs_for_each_fg(g, ft) if (fs_match_exact_mask(g->mask.match_criteria_enable, match_criteria_enable, g->mask.match_criteria, match_criteria)) { mutex_unlock(&ft->base.lock); dst = fs_add_dst_fg(g, match_value, sw_action, flow_act, dest); if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC) goto unlock; } mutex_unlock(&ft->base.lock); g = create_autogroup(ft, match_criteria_enable, match_criteria); if (IS_ERR(g)) { dst = (void *)g; goto unlock; } dst = fs_add_dst_fg(g, match_value, sw_action, flow_act, dest); if (IS_ERR(dst)) { /* Remove assumes refcount > 0 and autogroup creates a group * with a refcount = 0. */ fs_get(&g->base); fs_remove_node(&g->base); goto unlock; } unlock: fs_put(&ft->base); return dst; } struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, u8 match_criteria_enable, u32 *match_criteria, u32 *match_value, u32 sw_action, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest) { struct mlx5_flow_rule *dst; struct mlx5_flow_namespace *ns; ns = get_ns_with_notifiers(&ft->base); if (ns) down_read(&ns->dests_rw_sem); dst = fs_add_dst_ft(ft, match_criteria_enable, match_criteria, match_value, sw_action, flow_act, dest); if (ns) up_read(&ns->dests_rw_sem); return dst; } EXPORT_SYMBOL(mlx5_add_flow_rule); void mlx5_del_flow_rule(struct mlx5_flow_rule **pp) { struct mlx5_flow_namespace *ns; struct mlx5_flow_rule *dst; dst = *pp; *pp = NULL; if (IS_ERR_OR_NULL(dst)) return; ns = get_ns_with_notifiers(&dst->base); if (ns) down_read(&ns->dests_rw_sem); fs_remove_node(&dst->base); if (ns) up_read(&ns->dests_rw_sem); } EXPORT_SYMBOL(mlx5_del_flow_rule); #define MLX5_CORE_FS_ROOT_NS_NAME "root" #define MLX5_CORE_FS_ESW_EGRESS_ACL "esw_egress_root" #define MLX5_CORE_FS_ESW_INGRESS_ACL "esw_ingress_root" #define MLX5_CORE_FS_FDB_ROOT_NS_NAME "fdb_root" #define MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME "sniffer_rx_root" #define MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME "sniffer_tx_root" #define MLX5_CORE_FS_PRIO_MAX_FT 4 #define MLX5_CORE_FS_PRIO_MAX_NS 1 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, unsigned prio, int max_ft, const char *name, u8 flags) { struct fs_prio *fs_prio; fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL); if (!fs_prio) return ERR_PTR(-ENOMEM); fs_prio->base.type = FS_TYPE_PRIO; fs_add_node(&fs_prio->base, &ns->base, name, 1); fs_prio->max_ft = max_ft; fs_prio->max_ns = MLX5_CORE_FS_PRIO_MAX_NS; fs_prio->prio = prio; fs_prio->flags = flags; list_add_tail(&fs_prio->base.list, &ns->prios); INIT_LIST_HEAD(&fs_prio->objs); mutex_init(&fs_prio->shared_lock); return fs_prio; } static void cleanup_root_ns(struct mlx5_core_dev *dev) { struct mlx5_flow_root_namespace *root_ns = dev->root_ns; struct fs_prio *iter_prio; if (!root_ns) return; /* stage 1 */ fs_for_each_prio(iter_prio, &root_ns->ns) { struct mlx5_flow_namespace *iter_ns; fs_for_each_ns(iter_ns, iter_prio) { while (!list_empty(&iter_ns->prios)) { struct fs_base *iter_prio2 = list_first_entry(&iter_ns->prios, struct fs_base, list); fs_remove_node(iter_prio2); } } } /* stage 2 */ fs_for_each_prio(iter_prio, &root_ns->ns) { while (!list_empty(&iter_prio->objs)) { struct fs_base *iter_ns = list_first_entry(&iter_prio->objs, struct fs_base, list); fs_remove_node(iter_ns); } } /* stage 3 */ while (!list_empty(&root_ns->ns.prios)) { struct fs_base *iter_prio = list_first_entry(&root_ns->ns.prios, struct fs_base, list); fs_remove_node(iter_prio); } fs_remove_node(&root_ns->ns.base); dev->root_ns = NULL; } static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, struct mlx5_flow_root_namespace *root_ns) { struct fs_base *prio; if (!root_ns) return; if (!list_empty(&root_ns->ns.prios)) { prio = list_first_entry(&root_ns->ns.prios, struct fs_base, list); fs_remove_node(prio); } fs_remove_node(&root_ns->ns.base); root_ns = NULL; } void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { + mlx5_cleanup_fc_stats(dev); cleanup_root_ns(dev); cleanup_single_prio_root_ns(dev, dev->sniffer_rx_root_ns); cleanup_single_prio_root_ns(dev, dev->sniffer_tx_root_ns); cleanup_single_prio_root_ns(dev, dev->fdb_root_ns); cleanup_single_prio_root_ns(dev, dev->esw_egress_root_ns); cleanup_single_prio_root_ns(dev, dev->esw_ingress_root_ns); } static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace *ns) { ns->base.type = FS_TYPE_NAMESPACE; init_rwsem(&ns->dests_rw_sem); init_rwsem(&ns->notifiers_rw_sem); INIT_LIST_HEAD(&ns->prios); INIT_LIST_HEAD(&ns->list_notifiers); return ns; } static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, enum fs_ft_type table_type, char *name) { struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_namespace *ns; /* create the root namespace */ root_ns = mlx5_vzalloc(sizeof(*root_ns)); if (!root_ns) goto err; root_ns->dev = dev; root_ns->table_type = table_type; mutex_init(&root_ns->fs_chain_lock); ns = &root_ns->ns; fs_init_namespace(ns); fs_add_node(&ns->base, NULL, name, 1); return root_ns; err: return NULL; } static int init_fdb_root_ns(struct mlx5_core_dev *dev) { struct fs_prio *prio; dev->fdb_root_ns = create_root_ns(dev, FS_FT_FDB, MLX5_CORE_FS_FDB_ROOT_NS_NAME); if (!dev->fdb_root_ns) return -ENOMEM; /* create 1 prio*/ prio = fs_create_prio(&dev->fdb_root_ns->ns, 0, 1, "fdb_prio", 0); if (IS_ERR(prio)) return PTR_ERR(prio); else return 0; } #define MAX_VPORTS 128 static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) { struct fs_prio *prio; dev->esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL, MLX5_CORE_FS_ESW_EGRESS_ACL); if (!dev->esw_egress_root_ns) return -ENOMEM; /* create 1 prio*/ prio = fs_create_prio(&dev->esw_egress_root_ns->ns, 0, MAX_VPORTS, "esw_egress_prio", 0); if (IS_ERR(prio)) return PTR_ERR(prio); else return 0; } static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) { struct fs_prio *prio; dev->esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL, MLX5_CORE_FS_ESW_INGRESS_ACL); if (!dev->esw_ingress_root_ns) return -ENOMEM; /* create 1 prio*/ prio = fs_create_prio(&dev->esw_ingress_root_ns->ns, 0, MAX_VPORTS, "esw_ingress_prio", 0); if (IS_ERR(prio)) return PTR_ERR(prio); else return 0; } static int init_sniffer_rx_root_ns(struct mlx5_core_dev *dev) { struct fs_prio *prio; dev->sniffer_rx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_RX, MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME); if (!dev->sniffer_rx_root_ns) return -ENOMEM; /* create 1 prio*/ prio = fs_create_prio(&dev->sniffer_rx_root_ns->ns, 0, 1, "sniffer_prio", 0); if (IS_ERR(prio)) return PTR_ERR(prio); else return 0; } static int init_sniffer_tx_root_ns(struct mlx5_core_dev *dev) { struct fs_prio *prio; dev->sniffer_tx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_TX, MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME); if (!dev->sniffer_tx_root_ns) return -ENOMEM; /* create 1 prio*/ prio = fs_create_prio(&dev->sniffer_tx_root_ns->ns, 0, 1, "sniffer_prio", 0); if (IS_ERR(prio)) return PTR_ERR(prio); else return 0; } static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio, const char *name) { struct mlx5_flow_namespace *ns; ns = kzalloc(sizeof(*ns), GFP_KERNEL); if (!ns) return ERR_PTR(-ENOMEM); fs_init_namespace(ns); fs_add_node(&ns->base, &prio->base, name, 1); list_add_tail(&ns->base.list, &prio->objs); return ns; } #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) { int i; for (i = 0; i < caps->arr_sz; i++) { if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i])) return false; } return true; } static int _init_root_tree(struct mlx5_core_dev *dev, int max_ft_level, struct init_tree_node *node, struct fs_base *base_parent, struct init_tree_node *tree_parent) { struct mlx5_flow_namespace *fs_ns; struct fs_prio *fs_prio; int priority; struct fs_base *base; int i; int err = 0; if (node->type == FS_TYPE_PRIO) { if ((node->min_ft_level > max_ft_level) || !has_required_caps(dev, &node->caps)) goto out; fs_get_obj(fs_ns, base_parent); priority = node - tree_parent->children; fs_prio = fs_create_prio(fs_ns, priority, node->max_ft, node->name, node->flags); if (IS_ERR(fs_prio)) { err = PTR_ERR(fs_prio); goto out; } base = &fs_prio->base; } else if (node->type == FS_TYPE_NAMESPACE) { fs_get_obj(fs_prio, base_parent); fs_ns = fs_create_namespace(fs_prio, node->name); if (IS_ERR(fs_ns)) { err = PTR_ERR(fs_ns); goto out; } base = &fs_ns->base; } else { return -EINVAL; } for (i = 0; i < node->ar_size; i++) { err = _init_root_tree(dev, max_ft_level, &node->children[i], base, node); if (err) break; } out: return err; } static int init_root_tree(struct mlx5_core_dev *dev, int max_ft_level, struct init_tree_node *node, struct fs_base *parent) { int i; struct mlx5_flow_namespace *fs_ns; int err = 0; fs_get_obj(fs_ns, parent); for (i = 0; i < node->ar_size; i++) { err = _init_root_tree(dev, max_ft_level, &node->children[i], &fs_ns->base, node); if (err) break; } return err; } static int sum_max_ft_in_prio(struct fs_prio *prio); static int sum_max_ft_in_ns(struct mlx5_flow_namespace *ns) { struct fs_prio *prio; int sum = 0; fs_for_each_prio(prio, ns) { sum += sum_max_ft_in_prio(prio); } return sum; } static int sum_max_ft_in_prio(struct fs_prio *prio) { int sum = 0; struct fs_base *it; struct mlx5_flow_namespace *ns; if (prio->max_ft) return prio->max_ft; fs_for_each_ns_or_ft(it, prio) { if (it->type == FS_TYPE_FLOW_TABLE) continue; fs_get_obj(ns, it); sum += sum_max_ft_in_ns(ns); } prio->max_ft = sum; return sum; } static void set_max_ft(struct mlx5_flow_namespace *ns) { struct fs_prio *prio; if (!ns) return; fs_for_each_prio(prio, ns) sum_max_ft_in_prio(prio); } static int init_root_ns(struct mlx5_core_dev *dev) { int max_ft_level = MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive. max_ft_level); dev->root_ns = create_root_ns(dev, FS_FT_NIC_RX, MLX5_CORE_FS_ROOT_NS_NAME); if (IS_ERR_OR_NULL(dev->root_ns)) goto err; if (init_root_tree(dev, max_ft_level, &root_fs, &dev->root_ns->ns.base)) goto err; set_max_ft(&dev->root_ns->ns); return 0; err: return -ENOMEM; } u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule) { struct fs_base *pbase; struct mlx5_flow_group *fg; pbase = rule->base.parent; WARN_ON(!pbase); pbase = pbase->parent; WARN_ON(!pbase); fs_get_obj(fg, pbase); return fg->mask.match_criteria_enable; } void mlx5_get_match_value(u32 *match_value, struct mlx5_flow_rule *rule) { struct fs_base *pbase; struct fs_fte *fte; pbase = rule->base.parent; WARN_ON(!pbase); fs_get_obj(fte, pbase); memcpy(match_value, fte->val, sizeof(fte->val)); } void mlx5_get_match_criteria(u32 *match_criteria, struct mlx5_flow_rule *rule) { struct fs_base *pbase; struct mlx5_flow_group *fg; pbase = rule->base.parent; WARN_ON(!pbase); pbase = pbase->parent; WARN_ON(!pbase); fs_get_obj(fg, pbase); memcpy(match_criteria, &fg->mask.match_criteria, sizeof(fg->mask.match_criteria)); } int mlx5_init_fs(struct mlx5_core_dev *dev) { int err; if (MLX5_CAP_GEN(dev, nic_flow_table)) { err = init_root_ns(dev); if (err) goto err; } err = init_fdb_root_ns(dev); if (err) goto err; err = init_egress_acl_root_ns(dev); if (err) goto err; err = init_ingress_acl_root_ns(dev); if (err) goto err; err = init_sniffer_tx_root_ns(dev); if (err) goto err; err = init_sniffer_rx_root_ns(dev); if (err) goto err; + err = mlx5_init_fc_stats(dev); + if (err) + goto err; + return 0; err: mlx5_cleanup_fs(dev); return err; } struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type) { struct mlx5_flow_root_namespace *root_ns = dev->root_ns; int prio; static struct fs_prio *fs_prio; struct mlx5_flow_namespace *ns; switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: prio = 0; break; case MLX5_FLOW_NAMESPACE_OFFLOADS: prio = 1; break; case MLX5_FLOW_NAMESPACE_KERNEL: prio = 2; break; case MLX5_FLOW_NAMESPACE_LEFTOVERS: prio = 3; break; case MLX5_FLOW_NAMESPACE_FDB: if (dev->fdb_root_ns) return &dev->fdb_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_EGRESS: if (dev->esw_egress_root_ns) return &dev->esw_egress_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_INGRESS: if (dev->esw_ingress_root_ns) return &dev->esw_ingress_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (dev->sniffer_rx_root_ns) return &dev->sniffer_rx_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_TX: if (dev->sniffer_tx_root_ns) return &dev->sniffer_tx_root_ns->ns; else return NULL; default: return NULL; } if (!root_ns) return NULL; fs_prio = find_prio(&root_ns->ns, prio); if (!fs_prio) return NULL; ns = list_first_entry(&fs_prio->objs, typeof(*ns), base.list); return ns; } EXPORT_SYMBOL(mlx5_get_flow_namespace); int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule, struct mlx5_flow_handler *fs_handler, void *client_data) { struct fs_client_priv_data *priv_data; mutex_lock(&rule->clients_lock); /*Check that hanlder isn't exists in the list already*/ list_for_each_entry(priv_data, &rule->clients_data, list) { if (priv_data->fs_handler == fs_handler) { priv_data->client_dst_data = client_data; goto unlock; } } priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); if (!priv_data) { mutex_unlock(&rule->clients_lock); return -ENOMEM; } priv_data->client_dst_data = client_data; priv_data->fs_handler = fs_handler; list_add(&priv_data->list, &rule->clients_data); unlock: mutex_unlock(&rule->clients_lock); return 0; } static int remove_from_clients(struct mlx5_flow_rule *rule, bool ctx_changed, void *client_data, void *context) { struct fs_client_priv_data *iter_client; struct fs_client_priv_data *temp_client; struct mlx5_flow_handler *handler = (struct mlx5_flow_handler*)context; mutex_lock(&rule->clients_lock); list_for_each_entry_safe(iter_client, temp_client, &rule->clients_data, list) { if (iter_client->fs_handler == handler) { list_del(&iter_client->list); kfree(iter_client); break; } } mutex_unlock(&rule->clients_lock); return 0; } struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type, rule_event_fn add_cb, rule_event_fn del_cb, void *context) { struct mlx5_flow_namespace *ns; struct mlx5_flow_handler *handler; ns = mlx5_get_flow_namespace(dev, ns_type); if (!ns) return ERR_PTR(-EINVAL); handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler) return ERR_PTR(-ENOMEM); handler->add_dst_cb = add_cb; handler->del_dst_cb = del_cb; handler->client_context = context; handler->ns = ns; down_write(&ns->notifiers_rw_sem); list_add_tail(&handler->list, &ns->list_notifiers); up_write(&ns->notifiers_rw_sem); return handler; } static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns, rule_event_fn add_rule_cb, void *context); void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler) { struct mlx5_flow_namespace *ns = handler->ns; /*Remove from dst's clients*/ down_write(&ns->dests_rw_sem); down_write(&ns->notifiers_rw_sem); iterate_rules_in_ns(ns, remove_from_clients, handler); list_del(&handler->list); up_write(&ns->notifiers_rw_sem); up_write(&ns->dests_rw_sem); kfree(handler); } static void iterate_rules_in_ft(struct mlx5_flow_table *ft, rule_event_fn add_rule_cb, void *context) { struct mlx5_flow_group *iter_fg; struct fs_fte *iter_fte; struct mlx5_flow_rule *iter_rule; int err = 0; bool is_new_rule; mutex_lock(&ft->base.lock); fs_for_each_fg(iter_fg, ft) { mutex_lock(&iter_fg->base.lock); fs_for_each_fte(iter_fte, iter_fg) { mutex_lock(&iter_fte->base.lock); is_new_rule = true; fs_for_each_dst(iter_rule, iter_fte) { fs_get(&iter_rule->base); err = add_rule_cb(iter_rule, is_new_rule, NULL, context); fs_put_parent_locked(&iter_rule->base); if (err) break; is_new_rule = false; } mutex_unlock(&iter_fte->base.lock); if (err) break; } mutex_unlock(&iter_fg->base.lock); if (err) break; } mutex_unlock(&ft->base.lock); } static void iterate_rules_in_prio(struct fs_prio *prio, rule_event_fn add_rule_cb, void *context) { struct fs_base *it; mutex_lock(&prio->base.lock); fs_for_each_ns_or_ft(it, prio) { if (it->type == FS_TYPE_FLOW_TABLE) { struct mlx5_flow_table *ft; fs_get_obj(ft, it); iterate_rules_in_ft(ft, add_rule_cb, context); } else { struct mlx5_flow_namespace *ns; fs_get_obj(ns, it); iterate_rules_in_ns(ns, add_rule_cb, context); } } mutex_unlock(&prio->base.lock); } static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns, rule_event_fn add_rule_cb, void *context) { struct fs_prio *iter_prio; mutex_lock(&ns->base.lock); fs_for_each_prio(iter_prio, ns) { iterate_rules_in_prio(iter_prio, add_rule_cb, context); } mutex_unlock(&ns->base.lock); } void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns, rule_event_fn add_rule_cb, void *context) { down_write(&ns->dests_rw_sem); down_read(&ns->notifiers_rw_sem); iterate_rules_in_ns(ns, add_rule_cb, context); up_read(&ns->notifiers_rw_sem); up_write(&ns->dests_rw_sem); } void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list) { struct mlx5_flow_rule_node *iter_node; struct mlx5_flow_rule_node *temp_node; list_for_each_entry_safe(iter_node, temp_node, &rules_list->head, list) { list_del(&iter_node->list); kfree(iter_node); } kfree(rules_list); } #define ROCEV1_ETHERTYPE 0x8915 static int set_rocev1_rules(struct list_head *rules_list) { struct mlx5_flow_rule_node *rocev1_rule; rocev1_rule = kzalloc(sizeof(*rocev1_rule), GFP_KERNEL); if (!rocev1_rule) return -ENOMEM; rocev1_rule->match_criteria_enable = 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS; MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_criteria, ethertype, 0xffff); MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_value, ethertype, ROCEV1_ETHERTYPE); list_add_tail(&rocev1_rule->list, rules_list); return 0; } #define ROCEV2_UDP_PORT 4791 static int set_rocev2_rules(struct list_head *rules_list) { struct mlx5_flow_rule_node *ipv4_rule; struct mlx5_flow_rule_node *ipv6_rule; ipv4_rule = kzalloc(sizeof(*ipv4_rule), GFP_KERNEL); if (!ipv4_rule) return -ENOMEM; ipv6_rule = kzalloc(sizeof(*ipv6_rule), GFP_KERNEL); if (!ipv6_rule) { kfree(ipv4_rule); return -ENOMEM; } ipv4_rule->match_criteria_enable = 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS; MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ethertype, 0xffff); MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ethertype, 0x0800); MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ip_protocol, IPPROTO_UDP); MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, udp_dport, 0xffff); MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, udp_dport, ROCEV2_UDP_PORT); ipv6_rule->match_criteria_enable = 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS; MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ethertype, 0xffff); MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ethertype, 0x86dd); MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ip_protocol, IPPROTO_UDP); MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, udp_dport, 0xffff); MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, udp_dport, ROCEV2_UDP_PORT); list_add_tail(&ipv4_rule->list, rules_list); list_add_tail(&ipv6_rule->list, rules_list); return 0; } struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode) { int err = 0; struct mlx5_flow_rules_list *rules_list = kzalloc(sizeof(*rules_list), GFP_KERNEL); if (!rules_list) return NULL; INIT_LIST_HEAD(&rules_list->head); if (roce_mode & MLX5_ROCE_VERSION_1_CAP) { err = set_rocev1_rules(&rules_list->head); if (err) goto free_list; } if (roce_mode & MLX5_ROCE_VERSION_2_CAP) err = set_rocev2_rules(&rules_list->head); if (err) goto free_list; return rules_list; free_list: mlx5_del_flow_rules_list(rules_list); return NULL; } struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type, u8 num_actions, void *modify_actions) { struct mlx5_modify_hdr *modify_hdr; int err; modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL); if (!modify_hdr) return ERR_PTR(-ENOMEM); modify_hdr->ns_type = ns_type; err = mlx5_cmd_modify_header_alloc(dev, ns_type, num_actions, modify_actions, modify_hdr); if (err) { kfree(modify_hdr); return ERR_PTR(err); } return modify_hdr; } EXPORT_SYMBOL(mlx5_modify_header_alloc); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr) { mlx5_cmd_modify_header_dealloc(dev, modify_hdr); kfree(modify_hdr); } EXPORT_SYMBOL(mlx5_modify_header_dealloc); struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat_params *params, enum mlx5_flow_namespace_type ns_type) { struct mlx5_pkt_reformat *pkt_reformat; int err; pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL); if (!pkt_reformat) return ERR_PTR(-ENOMEM); pkt_reformat->ns_type = ns_type; pkt_reformat->reformat_type = params->type; err = mlx5_cmd_packet_reformat_alloc(dev, params, ns_type, pkt_reformat); if (err) { kfree(pkt_reformat); return ERR_PTR(err); } return pkt_reformat; } EXPORT_SYMBOL(mlx5_packet_reformat_alloc); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *pkt_reformat) { mlx5_cmd_packet_reformat_dealloc(dev, pkt_reformat); kfree(pkt_reformat); } EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); diff --git a/sys/dev/mlx5/mlx5_ifc.h b/sys/dev/mlx5/mlx5_ifc.h index 382d6c195ac9..7e76112cbec9 100644 --- a/sys/dev/mlx5/mlx5_ifc.h +++ b/sys/dev/mlx5/mlx5_ifc.h @@ -1,11555 +1,11571 @@ /*- * Copyright (c) 2013-2020, Mellanox Technologies. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef MLX5_IFC_H #define MLX5_IFC_H #include enum { MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, MLX5_EVENT_TYPE_COMP = 0x0, MLX5_EVENT_TYPE_PATH_MIG = 0x1, MLX5_EVENT_TYPE_COMM_EST = 0x2, MLX5_EVENT_TYPE_SQ_DRAINED = 0x3, MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_CQ_ERROR = 0x4, MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x5, MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x7, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x8, MLX5_EVENT_TYPE_PORT_CHANGE = 0x9, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT = 0x1e, MLX5_EVENT_TYPE_CODING_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT = 0x22, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, MLX5_EVENT_TYPE_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CMD = 0xa, MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, MLX5_EVENT_TYPE_CODING_GENERAL_OBJ_EVENT = 0x27, }; enum { MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3, MLX5_MODIFY_TIR_BITMASK_SELF_LB_EN = 0x4 }; enum { MLX5_MODIFY_RQT_BITMASK_RQN_LIST = 0x1, }; enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, }; enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, MLX5_OBJ_TYPE_RMP = 0xff04, MLX5_OBJ_TYPE_XRC_SRQ = 0xff05, MLX5_OBJ_TYPE_RQ = 0xff06, MLX5_OBJ_TYPE_SQ = 0xff07, MLX5_OBJ_TYPE_TIR = 0xff08, MLX5_OBJ_TYPE_TIS = 0xff09, MLX5_OBJ_TYPE_DCT = 0xff0a, MLX5_OBJ_TYPE_XRQ = 0xff0b, MLX5_OBJ_TYPE_RQT = 0xff0e, MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, MLX5_OBJ_TYPE_CQ = 0xff10, }; enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_INIT_HCA = 0x102, MLX5_CMD_OP_TEARDOWN_HCA = 0x103, MLX5_CMD_OP_ENABLE_HCA = 0x104, MLX5_CMD_OP_DISABLE_HCA = 0x105, MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e, MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP = 0x10f, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, MLX5_CMD_OP_GEN_EQE = 0x304, MLX5_CMD_OP_CREATE_CQ = 0x400, MLX5_CMD_OP_DESTROY_CQ = 0x401, MLX5_CMD_OP_QUERY_CQ = 0x402, MLX5_CMD_OP_MODIFY_CQ = 0x403, MLX5_CMD_OP_CREATE_QP = 0x500, MLX5_CMD_OP_DESTROY_QP = 0x501, MLX5_CMD_OP_RST2INIT_QP = 0x502, MLX5_CMD_OP_INIT2RTR_QP = 0x503, MLX5_CMD_OP_RTR2RTS_QP = 0x504, MLX5_CMD_OP_RTS2RTS_QP = 0x505, MLX5_CMD_OP_SQERR2RTS_QP = 0x506, MLX5_CMD_OP_2ERR_QP = 0x507, MLX5_CMD_OP_2RST_QP = 0x50a, MLX5_CMD_OP_QUERY_QP = 0x50b, MLX5_CMD_OP_SQD_RTS_QP = 0x50c, MLX5_CMD_OP_INIT2INIT_QP = 0x50e, MLX5_CMD_OP_CREATE_PSV = 0x600, MLX5_CMD_OP_DESTROY_PSV = 0x601, MLX5_CMD_OP_CREATE_SRQ = 0x700, MLX5_CMD_OP_DESTROY_SRQ = 0x701, MLX5_CMD_OP_QUERY_SRQ = 0x702, MLX5_CMD_OP_ARM_RQ = 0x703, MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, MLX5_CMD_OP_CREATE_DCT = 0x710, MLX5_CMD_OP_DESTROY_DCT = 0x711, MLX5_CMD_OP_DRAIN_DCT = 0x712, MLX5_CMD_OP_QUERY_DCT = 0x713, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, MLX5_CMD_OP_SET_DC_CNAK_TRACE = 0x715, MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716, MLX5_CMD_OP_CREATE_XRQ = 0x717, MLX5_CMD_OP_DESTROY_XRQ = 0x718, MLX5_CMD_OP_QUERY_XRQ = 0x719, MLX5_CMD_OP_ARM_XRQ = 0x71a, MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, MLX5_CMD_OP_MODIFY_XRQ = 0x72a, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, MLX5_CMD_OP_DEALLOC_UAR = 0x803, MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, MLX5_CMD_OP_ACCESS_REG = 0x805, MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, MLX5_CMD_OP_MAD_IFC = 0x50d, MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, MLX5_CMD_OP_NOP = 0x80d, MLX5_CMD_OP_ALLOC_XRCD = 0x80e, MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, MLX5_CMD_OP_SET_BURST_SIZE = 0x812, MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813, MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819, MLX5_CMD_OP_SET_DIAGNOSTICS = 0x820, MLX5_CMD_OP_QUERY_DIAGNOSTICS = 0x821, MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, MLX5_CMD_OP_SET_WOL_ROL = 0x830, MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, MLX5_CMD_OP_CREATE_LAG = 0x840, MLX5_CMD_OP_MODIFY_LAG = 0x841, MLX5_CMD_OP_QUERY_LAG = 0x842, MLX5_CMD_OP_DESTROY_LAG = 0x843, MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844, MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, MLX5_CMD_OP_QUERY_TIR = 0x903, MLX5_CMD_OP_CREATE_SQ = 0x904, MLX5_CMD_OP_MODIFY_SQ = 0x905, MLX5_CMD_OP_DESTROY_SQ = 0x906, MLX5_CMD_OP_QUERY_SQ = 0x907, MLX5_CMD_OP_CREATE_RQ = 0x908, MLX5_CMD_OP_MODIFY_RQ = 0x909, MLX5_CMD_OP_DESTROY_RQ = 0x90a, MLX5_CMD_OP_QUERY_RQ = 0x90b, MLX5_CMD_OP_CREATE_RMP = 0x90c, MLX5_CMD_OP_MODIFY_RMP = 0x90d, MLX5_CMD_OP_DESTROY_RMP = 0x90e, MLX5_CMD_OP_QUERY_RMP = 0x90f, MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910, MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911, MLX5_CMD_OP_CREATE_TIS = 0x912, MLX5_CMD_OP_MODIFY_TIS = 0x913, MLX5_CMD_OP_DESTROY_TIS = 0x914, MLX5_CMD_OP_QUERY_TIS = 0x915, MLX5_CMD_OP_CREATE_RQT = 0x916, MLX5_CMD_OP_MODIFY_RQT = 0x917, MLX5_CMD_OP_DESTROY_RQT = 0x918, MLX5_CMD_OP_QUERY_RQT = 0x919, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f, MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e, MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f, MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, MLX5_CMD_OP_CREATE_GENERAL_OBJ = 0xa00, MLX5_CMD_OP_MODIFY_GENERAL_OBJ = 0xa01, MLX5_CMD_OP_QUERY_GENERAL_OBJ = 0xa02, MLX5_CMD_OP_DESTROY_GENERAL_OBJ = 0xa03, MLX5_CMD_OP_CREATE_UCTX = 0xa04, MLX5_CMD_OP_DESTROY_UCTX = 0xa06, MLX5_CMD_OP_CREATE_UMEM = 0xa08, MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, }; /* Valid range for general commands that don't work over an object */ enum { MLX5_CMD_OP_GENERAL_START = 0xb00, MLX5_CMD_OP_GENERAL_END = 0xd00, }; enum { MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_QUERY_FW_INFO = 0x8007, MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_CAPABILITY = 0x8400, MLX5_ICMD_CMDS_OPCODE_ICMD_ACCESS_REGISTER = 0x9001, MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_VIRTUAL_MAC = 0x9003, MLX5_ICMD_CMDS_OPCODE_ICMD_SET_VIRTUAL_MAC = 0x9004, MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_WOL_ROL = 0x9005, MLX5_ICMD_CMDS_OPCODE_ICMD_SET_WOL_ROL = 0x9006, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_INIT = 0x9007, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_QUERY_HEADER_STATUS = 0x9008, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_QUERY_ETOC_STATUS = 0x9009, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_SET_EVENT = 0x900a, MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_INIT_OCSD = 0xf004 }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, }; enum { MLX5_HCA_CAP_GENERAL_OBJ_TYPES_ENCRYPTION_KEY = 1 << 0xc, }; enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, }; enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1, }; struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_dmac[0x1]; u8 outer_smac[0x1]; u8 outer_ether_type[0x1]; u8 outer_ip_version[0x1]; u8 outer_first_prio[0x1]; u8 outer_first_cfi[0x1]; u8 outer_first_vid[0x1]; u8 reserved_1[0x1]; u8 outer_second_prio[0x1]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0x1]; u8 outer_ipv6_flow_label[0x1]; u8 outer_sip[0x1]; u8 outer_dip[0x1]; u8 outer_frag[0x1]; u8 outer_ip_protocol[0x1]; u8 outer_ip_ecn[0x1]; u8 outer_ip_dscp[0x1]; u8 outer_udp_sport[0x1]; u8 outer_udp_dport[0x1]; u8 outer_tcp_sport[0x1]; u8 outer_tcp_dport[0x1]; u8 outer_tcp_flags[0x1]; u8 outer_gre_protocol[0x1]; u8 outer_gre_key[0x1]; u8 outer_vxlan_vni[0x1]; u8 outer_geneve_vni[0x1]; u8 outer_geneve_oam[0x1]; u8 outer_geneve_protocol_type[0x1]; u8 outer_geneve_opt_len[0x1]; u8 reserved_2[0x1]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; u8 inner_smac[0x1]; u8 inner_ether_type[0x1]; u8 inner_ip_version[0x1]; u8 inner_first_prio[0x1]; u8 inner_first_cfi[0x1]; u8 inner_first_vid[0x1]; u8 reserved_4[0x1]; u8 inner_second_prio[0x1]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0x1]; u8 inner_ipv6_flow_label[0x1]; u8 inner_sip[0x1]; u8 inner_dip[0x1]; u8 inner_frag[0x1]; u8 inner_ip_protocol[0x1]; u8 inner_ip_ecn[0x1]; u8 inner_ip_dscp[0x1]; u8 inner_udp_sport[0x1]; u8 inner_udp_dport[0x1]; u8 inner_tcp_sport[0x1]; u8 inner_tcp_dport[0x1]; u8 inner_tcp_flags[0x1]; u8 reserved_5[0x9]; u8 reserved_6[0x1a]; u8 bth_dst_qp[0x1]; u8 reserved_7[0x4]; u8 source_sqn[0x1]; u8 reserved_8[0x20]; }; struct mlx5_ifc_eth_discard_cntrs_grp_bits { u8 ingress_general_high[0x20]; u8 ingress_general_low[0x20]; u8 ingress_policy_engine_high[0x20]; u8 ingress_policy_engine_low[0x20]; u8 ingress_vlan_membership_high[0x20]; u8 ingress_vlan_membership_low[0x20]; u8 ingress_tag_frame_type_high[0x20]; u8 ingress_tag_frame_type_low[0x20]; u8 egress_vlan_membership_high[0x20]; u8 egress_vlan_membership_low[0x20]; u8 loopback_filter_high[0x20]; u8 loopback_filter_low[0x20]; u8 egress_general_high[0x20]; u8 egress_general_low[0x20]; u8 reserved_at_1c0[0x40]; u8 egress_hoq_high[0x20]; u8 egress_hoq_low[0x20]; u8 port_isolation_high[0x20]; u8 port_isolation_low[0x20]; u8 egress_policy_engine_high[0x20]; u8 egress_policy_engine_low[0x20]; u8 ingress_tx_link_down_high[0x20]; u8 ingress_tx_link_down_low[0x20]; u8 egress_stp_filter_high[0x20]; u8 egress_stp_filter_low[0x20]; u8 egress_hoq_stall_high[0x20]; u8 egress_hoq_stall_low[0x20]; u8 reserved_at_340[0x440]; }; struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; u8 flow_tag[0x1]; u8 flow_counter[0x1]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table[0x1]; u8 flow_table_modify[0x1]; u8 encap[0x1]; u8 decap[0x1]; u8 reset_root_to_default[0x1]; u8 reserved_at_a[0x16]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; u8 max_modify_header_actions[0x8]; u8 max_ft_level[0x8]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; u8 log_max_flow_counter[0x8]; u8 log_max_destination[0x8]; u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; }; struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 send[0x1]; u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; u8 atomic[0x1]; u8 srq_receive[0x1]; u8 reserved_0[0x1a]; }; struct mlx5_ifc_flow_counter_list_bits { u8 reserved_0[0x10]; u8 flow_counter_id[0x10]; u8 reserved_1[0x20]; }; enum { MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0, MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 0x2, MLX5_FLOW_CONTEXT_DEST_TYPE_QP = 0x3, }; struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; u8 reserved_0[0x20]; }; struct mlx5_ifc_ipv4_layout_bits { u8 reserved_at_0[0x60]; u8 ipv4[0x20]; }; struct mlx5_ifc_ipv6_layout_bits { u8 ipv6[16][0x8]; }; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { struct mlx5_ifc_ipv6_layout_bits ipv6_layout; struct mlx5_ifc_ipv4_layout_bits ipv4_layout; u8 reserved_at_0[0x80]; }; struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; u8 smac_15_0[0x10]; u8 ethertype[0x10]; u8 dmac_47_16[0x20]; u8 dmac_15_0[0x10]; u8 first_prio[0x3]; u8 first_cfi[0x1]; u8 first_vid[0xc]; u8 ip_protocol[0x8]; u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; u8 cvlan_tag[0x1]; u8 svlan_tag[0x1]; u8 frag[0x1]; u8 ip_version[0x4]; u8 tcp_flags[0x9]; u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; u8 reserved_2[0x20]; u8 udp_sport[0x10]; u8 udp_dport[0x10]; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_0[0x8]; u8 source_sqn[0x18]; u8 reserved_1[0x10]; u8 source_port[0x10]; u8 outer_second_prio[0x3]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0xc]; u8 inner_second_prio[0x3]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0xc]; u8 outer_second_vlan_tag[0x1]; u8 inner_second_vlan_tag[0x1]; u8 reserved_2[0xe]; u8 gre_protocol[0x10]; u8 gre_key_h[0x18]; u8 gre_key_l[0x8]; u8 vxlan_vni[0x18]; u8 reserved_3[0x8]; u8 geneve_vni[0x18]; u8 reserved4[0x7]; u8 geneve_oam[0x1]; u8 reserved_5[0xc]; u8 outer_ipv6_flow_label[0x14]; u8 reserved_6[0xc]; u8 inner_ipv6_flow_label[0x14]; u8 reserved_7[0xa]; u8 geneve_opt_len[0x6]; u8 geneve_protocol_type[0x10]; u8 reserved_8[0x8]; u8 bth_dst_qp[0x18]; u8 reserved_9[0xa0]; }; struct mlx5_ifc_cmd_pas_bits { u8 pa_h[0x20]; u8 pa_l[0x14]; u8 reserved_0[0xc]; }; struct mlx5_ifc_uint64_bits { u8 hi[0x20]; u8 lo[0x20]; }; struct mlx5_ifc_application_prio_entry_bits { u8 reserved_0[0x8]; u8 priority[0x3]; u8 reserved_1[0x2]; u8 sel[0x3]; u8 protocol_id[0x10]; }; struct mlx5_ifc_nodnic_ring_doorbell_bits { u8 reserved_0[0x8]; u8 ring_pi[0x10]; u8 reserved_1[0x8]; }; enum { MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, MLX5_ADS_STAT_RATE_10GBPS = 0x8, MLX5_ADS_STAT_RATE_30GBPS = 0x9, MLX5_ADS_STAT_RATE_5GBPS = 0xa, MLX5_ADS_STAT_RATE_20GBPS = 0xb, MLX5_ADS_STAT_RATE_40GBPS = 0xc, MLX5_ADS_STAT_RATE_60GBPS = 0xd, MLX5_ADS_STAT_RATE_80GBPS = 0xe, MLX5_ADS_STAT_RATE_120GBPS = 0xf, }; struct mlx5_ifc_ads_bits { u8 fl[0x1]; u8 free_ar[0x1]; u8 reserved_0[0xe]; u8 pkey_index[0x10]; u8 reserved_1[0x8]; u8 grh[0x1]; u8 mlid[0x7]; u8 rlid[0x10]; u8 ack_timeout[0x5]; u8 reserved_2[0x3]; u8 src_addr_index[0x8]; u8 log_rtm[0x4]; u8 stat_rate[0x4]; u8 hop_limit[0x8]; u8 reserved_3[0x4]; u8 tclass[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; u8 reserved_4[0x4]; u8 f_dscp[0x1]; u8 f_ecn[0x1]; u8 reserved_5[0x1]; u8 f_eth_prio[0x1]; u8 ecn[0x2]; u8 dscp[0x6]; u8 udp_sport[0x10]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 sl[0x4]; u8 port[0x8]; u8 rmac_47_32[0x10]; u8 rmac_31_0[0x20]; }; struct mlx5_ifc_diagnostic_counter_cap_bits { u8 sync[0x1]; u8 reserved_0[0xf]; u8 counter_id[0x10]; }; struct mlx5_ifc_debug_cap_bits { u8 reserved_0[0x18]; u8 log_max_samples[0x8]; u8 single[0x1]; u8 repetitive[0x1]; u8 health_mon_rx_activity[0x1]; u8 reserved_1[0x15]; u8 log_min_sample_period[0x8]; u8 reserved_2[0x1c0]; struct mlx5_ifc_diagnostic_counter_cap_bits diagnostic_counter[0x1f0]; }; struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; u8 esw_scheduling[0x1]; u8 esw_bw_share[0x1]; u8 esw_rate_limit[0x1]; u8 hll[0x1]; u8 packet_pacing_burst_bound[0x1]; u8 packet_pacing_typical_size[0x1]; u8 reserved_at_7[0x19]; u8 reserved_at_20[0xA]; u8 qos_remap_pp[0x1]; u8 reserved_at_2b[0x15]; u8 packet_pacing_max_rate[0x20]; u8 packet_pacing_min_rate[0x20]; u8 reserved_at_80[0x10]; u8 packet_pacing_rate_table_size[0x10]; u8 esw_element_type[0x10]; u8 esw_tsar_type[0x10]; u8 reserved_at_c0[0x10]; u8 max_qos_para_vport[0x10]; u8 max_tsar_bw_share[0x20]; u8 reserved_at_100[0x700]; }; struct mlx5_ifc_snapshot_cap_bits { u8 reserved_0[0x1d]; u8 suspend_qp_uc[0x1]; u8 suspend_qp_ud[0x1]; u8 suspend_qp_rc[0x1]; u8 reserved_1[0x1c]; u8 restore_pd[0x1]; u8 restore_uar[0x1]; u8 restore_mkey[0x1]; u8 restore_qp[0x1]; u8 reserved_2[0x1e]; u8 named_mkey[0x1]; u8 named_qp[0x1]; u8 reserved_3[0x7a0]; }; struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; u8 reserved_at_5[0x1]; u8 vport_cvlan_insert_always[0x1]; u8 esw_shared_ingress_acl[0x1]; u8 esw_uplink_ingress_acl[0x1]; u8 root_ft_on_other_esw[0x1]; u8 reserved_at_a[0xf]; u8 esw_functions_changed[0x1]; u8 reserved_at_1a[0x1]; u8 ecpf_vport_exists[0x1]; u8 counter_eswitch_affinity[0x1]; u8 merged_eswitch[0x1]; u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; u8 vxlan_encap_decap[0x1]; u8 nvgre_encap_decap[0x1]; u8 reserved_at_22[0x1]; u8 log_max_fdb_encap_uplink[0x5]; u8 reserved_at_21[0x3]; u8 log_max_packet_reformat_context[0x5]; u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; u8 reserved_at_40[0xb]; u8 log_max_esw_sf[0x5]; u8 esw_sf_base_id[0x10]; u8 reserved_at_60[0x7a0]; }; struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_0[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; u8 reserved_1[0x7800]; }; struct mlx5_ifc_flow_table_nic_cap_bits { u8 nic_rx_multi_path_tirs[0x1]; u8 nic_rx_multi_path_tirs_fts[0x1]; u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; u8 reserved_at_3[0x4]; u8 sw_owner_reformat_supported[0x1]; u8 reserved_at_8[0x18]; u8 encap_general_header[0x1]; u8 reserved_at_21[0xa]; u8 log_max_packet_reformat_context[0x5]; u8 reserved_at_30[0x6]; u8 max_encap_header_size[0xa]; u8 reserved_at_40[0x1c0]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; u8 reserved_1[0x7200]; }; struct mlx5_ifc_pddr_module_info_bits { u8 cable_technology[0x8]; u8 cable_breakout[0x8]; u8 ext_ethernet_compliance_code[0x8]; u8 ethernet_compliance_code[0x8]; u8 cable_type[0x4]; u8 cable_vendor[0x4]; u8 cable_length[0x8]; u8 cable_identifier[0x8]; u8 cable_power_class[0x8]; u8 reserved_at_40[0x8]; u8 cable_rx_amp[0x8]; u8 cable_rx_emphasis[0x8]; u8 cable_tx_equalization[0x8]; u8 reserved_at_60[0x8]; u8 cable_attenuation_12g[0x8]; u8 cable_attenuation_7g[0x8]; u8 cable_attenuation_5g[0x8]; u8 reserved_at_80[0x8]; u8 rx_cdr_cap[0x4]; u8 tx_cdr_cap[0x4]; u8 reserved_at_90[0x4]; u8 rx_cdr_state[0x4]; u8 reserved_at_98[0x4]; u8 tx_cdr_state[0x4]; u8 vendor_name[16][0x8]; u8 vendor_pn[16][0x8]; u8 vendor_rev[0x20]; u8 fw_version[0x20]; u8 vendor_sn[16][0x8]; u8 temperature[0x10]; u8 voltage[0x10]; u8 rx_power_lane0[0x10]; u8 rx_power_lane1[0x10]; u8 rx_power_lane2[0x10]; u8 rx_power_lane3[0x10]; u8 reserved_at_2c0[0x40]; u8 tx_power_lane0[0x10]; u8 tx_power_lane1[0x10]; u8 tx_power_lane2[0x10]; u8 tx_power_lane3[0x10]; u8 reserved_at_340[0x40]; u8 tx_bias_lane0[0x10]; u8 tx_bias_lane1[0x10]; u8 tx_bias_lane2[0x10]; u8 tx_bias_lane3[0x10]; u8 reserved_at_3c0[0x40]; u8 temperature_high_th[0x10]; u8 temperature_low_th[0x10]; u8 voltage_high_th[0x10]; u8 voltage_low_th[0x10]; u8 rx_power_high_th[0x10]; u8 rx_power_low_th[0x10]; u8 tx_power_high_th[0x10]; u8 tx_power_low_th[0x10]; u8 tx_bias_high_th[0x10]; u8 tx_bias_low_th[0x10]; u8 reserved_at_4a0[0x10]; u8 wavelength[0x10]; u8 reserved_at_4c0[0x300]; }; struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; u8 lro_max_msg_sz_mode[0x2]; u8 wqe_vlan_insert[0x1]; u8 self_lb_en_modifiable[0x1]; u8 self_lb_mc[0x1]; u8 self_lb_uc[0x1]; u8 max_lso_cap[0x5]; u8 multi_pkt_send_wqe[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; u8 scatter_fcs[0x1]; u8 enhanced_multi_pkt_send_wqe[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 tunnel_lro_gre[0x1]; u8 tunnel_lro_vxlan[0x1]; u8 tunnel_statless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 reserved_2[0x1b]; u8 max_geneve_opt_len[0x1]; u8 tunnel_stateless_geneve_rx[0x1]; u8 reserved_3[0x10]; u8 lro_min_mss_size[0x10]; u8 reserved_4[0x120]; u8 lro_timer_supported_periods[4][0x20]; u8 reserved_5[0x600]; }; enum { MLX5_ROCE_CAP_L3_TYPE_GRH = 0x1, MLX5_ROCE_CAP_L3_TYPE_IPV4 = 0x2, MLX5_ROCE_CAP_L3_TYPE_IPV6 = 0x4, }; enum { MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, }; struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; u8 rts2rts_primary_eth_prio[0x1]; u8 roce_rx_allow_untagged[0x1]; u8 rts2rts_src_addr_index_for_vlan_valid_vlan_id[0x1]; u8 reserved_at_4[0x1a]; u8 qp_ts_format[0x2]; u8 reserved_1[0x60]; u8 reserved_2[0xc]; u8 l3_type[0x4]; u8 reserved_3[0x8]; u8 roce_version[0x8]; u8 reserved_4[0x10]; u8 r_roce_dest_udp_port[0x10]; u8 r_roce_max_src_udp_port[0x10]; u8 r_roce_min_src_udp_port[0x10]; u8 reserved_5[0x10]; u8 roce_address_table_size[0x10]; u8 reserved_6[0x700]; }; struct mlx5_ifc_device_event_cap_bits { u8 user_affiliated_events[4][0x40]; u8 user_unaffiliated_events[4][0x40]; }; enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x1, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, }; enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, }; struct mlx5_ifc_atomic_caps_bits { u8 reserved_0[0x40]; u8 atomic_req_8B_endianess_mode[0x2]; u8 reserved_1[0x4]; u8 supported_atomic_req_8B_endianess_mode_1[0x1]; u8 reserved_2[0x19]; u8 reserved_3[0x20]; u8 reserved_4[0x10]; u8 atomic_operations[0x10]; u8 reserved_5[0x10]; u8 atomic_size_qp[0x10]; u8 reserved_6[0x10]; u8 atomic_size_dc[0x10]; u8 reserved_7[0x720]; }; struct mlx5_ifc_odp_cap_bits { u8 reserved_0[0x40]; u8 sig[0x1]; u8 reserved_1[0x1f]; u8 reserved_2[0x20]; struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; u8 reserved_3[0x6e0]; }; enum { MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, }; enum { MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, }; enum { MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, }; enum { MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, }; enum { MLX5_UCTX_CAP_RAW_TX = 1UL << 0, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, }; enum { MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, }; enum { MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, }; struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_0[0x80]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; u8 event_cap[0x1]; u8 reserved_1[0xa]; u8 log_max_qp[0x5]; u8 reserved_2[0xb]; u8 log_max_srq[0x5]; u8 reserved_3[0x10]; u8 reserved_4[0x8]; u8 log_max_cq_sz[0x8]; u8 relaxed_ordering_write_umr[0x1]; u8 relaxed_ordering_read_umr[0x1]; u8 reserved_5[0x9]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; u8 relaxed_ordering_write[0x1]; u8 relaxed_ordering_read[0x1]; u8 log_max_mkey[0x6]; u8 reserved_7[0xb]; u8 fast_teardown[0x1]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; u8 reserved_8[0x1]; u8 log_max_mrw_sz[0x7]; u8 force_teardown[0x1]; u8 reserved_9[0x1]; u8 log_max_bsf_list_size[0x6]; u8 reserved_10[0x2]; u8 log_max_klm_list_size[0x6]; u8 reserved_11[0xa]; u8 log_max_ra_req_dc[0x6]; u8 reserved_12[0xa]; u8 log_max_ra_res_dc[0x6]; u8 reserved_13[0xa]; u8 log_max_ra_req_qp[0x6]; u8 reserved_14[0xa]; u8 log_max_ra_res_qp[0x6]; u8 pad_cap[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; u8 reserved_at_165[0xa]; u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; u8 debug[0x1]; u8 modify_rq_counters_set_id[0x1]; u8 rq_delay_drop[0x1]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; u8 vport_group_manager[0x1]; u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; u8 reserved_17[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; u8 reserved_18[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; u8 reserved_19[0x5]; u8 port_type[0x2]; u8 num_ports[0x8]; u8 snapshot[0x1]; u8 reserved_20[0x2]; u8 log_max_msg[0x5]; u8 reserved_21[0x4]; u8 max_tc[0x4]; u8 temp_warn_event[0x1]; u8 dcbx[0x1]; u8 general_notification_event[0x1]; u8 reserved_at_1d3[0x2]; u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_23[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; u8 wol_b[0x1]; u8 wol_m[0x1]; u8 wol_u[0x1]; u8 wol_p[0x1]; u8 stat_rate_support[0x10]; u8 reserved_24[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; u8 reserved_25[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_ipoib_offloads[0x1]; u8 reserved_26[0x8]; u8 dc_connect_qp[0x1]; u8 dc_cnak_trace[0x1]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; u8 reserved_27[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; u8 reserved_28[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; u8 qos[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; u8 reserved_30[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; u8 cq_period_mode_modify[0x1]; u8 cq_invalidate[0x1]; u8 reserved_at_225[0x1]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; u8 exponential_backoff[0x1]; u8 scqe_break_moderation[0x1]; u8 cq_period_start_from_cqe[0x1]; u8 cd[0x1]; u8 atm[0x1]; u8 apm[0x1]; u8 imaicl[0x1]; u8 reserved_32[0x6]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; u8 reserved_33[0x3]; u8 xrc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; u8 reserved_35[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; u8 reserved_36[0x8]; u8 log_bf_reg_size[0x5]; u8 reserved_37[0x10]; u8 num_of_diagnostic_counters[0x10]; u8 max_wqe_sz_sq[0x10]; u8 reserved_38[0x10]; u8 max_wqe_sz_rq[0x10]; u8 reserved_39[0x10]; u8 max_wqe_sz_sq_dc[0x10]; u8 reserved_40[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_41[0x18]; + u8 reserved_41[0x10]; + u8 flow_counter_bulk_alloc[0x8]; u8 log_max_mcg[0x8]; u8 reserved_42[0x3]; u8 log_max_transport_domain[0x5]; u8 reserved_43[0x3]; u8 log_max_pd[0x5]; u8 reserved_44[0xb]; u8 log_max_xrcd[0x5]; u8 nic_receive_steering_discard[0x1]; u8 reserved_45[0x7]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter[0x10]; u8 reserved_46[0x3]; u8 log_max_rq[0x5]; u8 reserved_47[0x3]; u8 log_max_sq[0x5]; u8 reserved_48[0x3]; u8 log_max_tir[0x5]; u8 reserved_49[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; u8 reserved_50[0x2]; u8 log_max_rmp[0x5]; u8 reserved_51[0x3]; u8 log_max_rqt[0x5]; u8 reserved_52[0x3]; u8 log_max_rqt_size[0x5]; u8 reserved_53[0x3]; u8 log_max_tis_per_sq[0x5]; u8 reserved_54[0x3]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_55[0x3]; u8 log_min_stride_sz_rq[0x5]; u8 reserved_56[0x3]; u8 log_max_stride_sz_sq[0x5]; u8 reserved_57[0x3]; u8 log_min_stride_sz_sq[0x5]; u8 reserved_58[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_mc[0x1]; u8 reserved_59[0x8]; u8 log_max_vlan_list[0x5]; u8 reserved_60[0x3]; u8 log_max_current_mc_list[0x5]; u8 reserved_61[0x3]; u8 log_max_current_uc_list[0x5]; u8 general_obj_types[0x40]; u8 sq_ts_format[0x2]; u8 rq_ts_format[0x2]; u8 reserved_at_444[0x4]; u8 create_qp_start_hint[0x18]; u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; u8 reserved_at_468[0x3]; u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; u8 reserved_at_480[0x1]; u8 tls_tx[0x1]; u8 tls_rx[0x1]; u8 log_max_l2_table[0x5]; u8 reserved_64[0x8]; u8 log_uar_page_sz[0x10]; u8 reserved_65[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; u8 reserved_at_500[0x20]; u8 num_of_uars_per_page[0x20]; u8 reserved_at_540[0x40]; u8 log_max_atomic_size_qp[0x8]; u8 reserved_67[0x10]; u8 log_max_atomic_size_dc[0x8]; u8 reserved_at_5a0[0x13]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; u8 cqe_128_always[0x1]; u8 cqe_compression_128b[0x1]; u8 cqe_compression[0x1]; u8 cqe_compression_timeout[0x10]; u8 cqe_compression_max_num[0x10]; u8 reserved_5e0[0xc0]; u8 uctx_cap[0x20]; u8 reserved_6c0[0xc0]; u8 vhca_tunnel_commands[0x40]; u8 reserved_at_7c0[0x40]; }; enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, }; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { struct mlx5_ifc_dest_format_struct_bits dest_format_struct; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; u8 reserved_0[0x40]; }; struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; u8 reserved_0[0xa00]; }; enum { MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, }; struct mlx5_ifc_rx_hash_field_select_bits { u8 l3_prot_type[0x1]; u8 l4_prot_type[0x1]; u8 selected_fields[0x1e]; }; struct mlx5_ifc_tls_capabilities_bits { u8 tls_1_2_aes_gcm_128[0x1]; u8 tls_1_3_aes_gcm_128[0x1]; u8 tls_1_2_aes_gcm_256[0x1]; u8 tls_1_3_aes_gcm_256[0x1]; u8 reserved_at_4[0x1c]; u8 reserved_at_20[0x7e0]; }; enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, MLX5_WQ_TYPE_STRQ_LINKED_LIST = 0x2, MLX5_WQ_TYPE_STRQ_CYCLIC = 0x3, }; enum rq_type { RQ_TYPE_NONE, RQ_TYPE_STRIDE, }; enum { MLX5_WQ_END_PAD_MODE_NONE = 0x0, MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, }; struct mlx5_ifc_wq_bits { u8 wq_type[0x4]; u8 wq_signature[0x1]; u8 end_padding_mode[0x2]; u8 cd_slave[0x1]; u8 reserved_0[0x18]; u8 hds_skip_first_sge[0x1]; u8 log2_hds_buf_size[0x3]; u8 reserved_1[0x7]; u8 page_offset[0x5]; u8 lwm[0x10]; u8 reserved_2[0x8]; u8 pd[0x18]; u8 reserved_3[0x8]; u8 uar_page[0x18]; u8 dbr_addr[0x40]; u8 hw_counter[0x20]; u8 sw_counter[0x20]; u8 reserved_4[0xc]; u8 log_wq_stride[0x4]; u8 reserved_5[0x3]; u8 log_wq_pg_sz[0x5]; u8 reserved_6[0x3]; u8 log_wq_sz[0x5]; u8 dbr_umem_valid[0x1]; u8 wq_umem_valid[0x1]; u8 reserved_7[0x13]; u8 single_wqe_log_num_of_strides[0x3]; u8 two_byte_shift_en[0x1]; u8 reserved_8[0x4]; u8 single_stride_log_num_of_bytes[0x3]; u8 reserved_9[0x4c0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; struct mlx5_ifc_rq_num_bits { u8 reserved_0[0x8]; u8 rq_num[0x18]; }; struct mlx5_ifc_mac_address_layout_bits { u8 reserved_0[0x10]; u8 mac_addr_47_32[0x10]; u8 mac_addr_31_0[0x20]; }; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_0[0xa0]; u8 min_time_between_cnps[0x20]; u8 reserved_1[0x12]; u8 cnp_dscp[0x6]; u8 reserved_2[0x4]; u8 cnp_prio_mode[0x1]; u8 cnp_802p_prio[0x3]; u8 reserved_3[0x720]; }; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { u8 reserved_0[0x60]; u8 reserved_1[0x4]; u8 clamp_tgt_rate[0x1]; u8 reserved_2[0x3]; u8 clamp_tgt_rate_after_time_inc[0x1]; u8 reserved_3[0x17]; u8 reserved_4[0x20]; u8 rpg_time_reset[0x20]; u8 rpg_byte_reset[0x20]; u8 rpg_threshold[0x20]; u8 rpg_max_rate[0x20]; u8 rpg_ai_rate[0x20]; u8 rpg_hai_rate[0x20]; u8 rpg_gd[0x20]; u8 rpg_min_dec_fac[0x20]; u8 rpg_min_rate[0x20]; u8 reserved_5[0xe0]; u8 rate_to_set_on_first_cnp[0x20]; u8 dce_tcp_g[0x20]; u8 dce_tcp_rtt[0x20]; u8 rate_reduce_monitor_period[0x20]; u8 reserved_6[0x20]; u8 initial_alpha_value[0x20]; u8 reserved_7[0x4a0]; }; struct mlx5_ifc_cong_control_802_1qau_rp_bits { u8 reserved_0[0x80]; u8 rppp_max_rps[0x20]; u8 rpg_time_reset[0x20]; u8 rpg_byte_reset[0x20]; u8 rpg_threshold[0x20]; u8 rpg_max_rate[0x20]; u8 rpg_ai_rate[0x20]; u8 rpg_hai_rate[0x20]; u8 rpg_gd[0x20]; u8 rpg_min_dec_fac[0x20]; u8 rpg_min_rate[0x20]; u8 reserved_1[0x640]; }; enum { MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, }; struct mlx5_ifc_resize_field_select_bits { u8 resize_field_select[0x20]; }; enum { MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD_MODE = 0x10, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_STATUS = 0x20, }; struct mlx5_ifc_modify_field_select_bits { u8 modify_field_select[0x20]; }; struct mlx5_ifc_field_select_r_roce_np_bits { u8 field_select_r_roce_np[0x20]; }; enum { MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_CLAMP_TGT_RATE = 0x2, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_CLAMP_TGT_RATE_AFTER_TIME_INC = 0x4, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_TIME_RESET = 0x8, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_BYTE_RESET = 0x10, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_THRESHOLD = 0x20, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MAX_RATE = 0x40, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_AI_RATE = 0x80, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_HAI_RATE = 0x100, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MIN_DEC_FAC = 0x200, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MIN_RATE = 0x400, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RATE_TO_SET_ON_FIRST_CNP = 0x800, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_DCE_TCP_G = 0x1000, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_DCE_TCP_RTT = 0x2000, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RATE_REDUCE_MONITOR_PERIOD = 0x4000, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_INITIAL_ALPHA_VALUE = 0x8000, }; struct mlx5_ifc_field_select_r_roce_rp_bits { u8 field_select_r_roce_rp[0x20]; }; enum { MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, }; struct mlx5_ifc_field_select_802_1qau_rp_bits { u8 field_select_8021qaurp[0x20]; }; struct mlx5_ifc_pptb_reg_bits { u8 reserved_at_0[0x2]; u8 mm[0x2]; u8 reserved_at_4[0x4]; u8 local_port[0x8]; u8 reserved_at_10[0x6]; u8 cm[0x1]; u8 um[0x1]; u8 pm[0x8]; u8 prio_x_buff[0x20]; u8 pm_msb[0x8]; u8 reserved_at_48[0x10]; u8 ctrl_buff[0x4]; u8 untagged_buff[0x4]; }; struct mlx5_ifc_dcbx_app_reg_bits { u8 reserved_0[0x8]; u8 port_number[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x1a]; u8 num_app_prio[0x6]; u8 reserved_3[0x40]; struct mlx5_ifc_application_prio_entry_bits app_prio[0]; }; struct mlx5_ifc_dcbx_param_reg_bits { u8 dcbx_cee_cap[0x1]; u8 dcbx_ieee_cap[0x1]; u8 dcbx_standby_cap[0x1]; u8 reserved_0[0x5]; u8 port_number[0x8]; u8 reserved_1[0xa]; u8 max_application_table_size[0x6]; u8 reserved_2[0x15]; u8 version_oper[0x3]; u8 reserved_3[0x5]; u8 version_admin[0x3]; u8 willing_admin[0x1]; u8 reserved_4[0x3]; u8 pfc_cap_oper[0x4]; u8 reserved_5[0x4]; u8 pfc_cap_admin[0x4]; u8 reserved_6[0x4]; u8 num_of_tc_oper[0x4]; u8 reserved_7[0x4]; u8 num_of_tc_admin[0x4]; u8 remote_willing[0x1]; u8 reserved_8[0x3]; u8 remote_pfc_cap[0x4]; u8 reserved_9[0x14]; u8 remote_num_of_tc[0x4]; u8 reserved_10[0x18]; u8 error[0x8]; u8 reserved_11[0x160]; }; struct mlx5_ifc_qhll_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x1b]; u8 hll_time[0x5]; u8 stall_en[0x1]; u8 reserved_at_41[0x1c]; u8 stall_cnt[0x3]; }; struct mlx5_ifc_qetcr_reg_bits { u8 operation_type[0x2]; u8 cap_local_admin[0x1]; u8 cap_remote_admin[0x1]; u8 reserved_0[0x4]; u8 port_number[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x20]; u8 tc[8][0x40]; u8 global_configuration[0x40]; }; struct mlx5_ifc_nodnic_ring_config_reg_bits { u8 queue_address_63_32[0x20]; u8 queue_address_31_12[0x14]; u8 reserved_0[0x6]; u8 log_size[0x6]; struct mlx5_ifc_nodnic_ring_doorbell_bits doorbell; u8 reserved_1[0x8]; u8 queue_number[0x18]; u8 q_key[0x20]; u8 reserved_2[0x10]; u8 pkey_index[0x10]; u8 reserved_3[0x40]; }; struct mlx5_ifc_nodnic_cq_arming_word_bits { u8 reserved_0[0x8]; u8 cq_ci[0x10]; u8 reserved_1[0x8]; }; enum { MLX5_NODNIC_EVENT_WORD_LINK_TYPE_INFINIBAND = 0x0, MLX5_NODNIC_EVENT_WORD_LINK_TYPE_ETHERNET = 0x1, }; enum { MLX5_NODNIC_EVENT_WORD_PORT_STATE_DOWN = 0x0, MLX5_NODNIC_EVENT_WORD_PORT_STATE_INITIALIZE = 0x1, MLX5_NODNIC_EVENT_WORD_PORT_STATE_ARMED = 0x2, MLX5_NODNIC_EVENT_WORD_PORT_STATE_ACTIVE = 0x3, }; struct mlx5_ifc_nodnic_event_word_bits { u8 driver_reset_needed[0x1]; u8 port_management_change_event[0x1]; u8 reserved_0[0x19]; u8 link_type[0x1]; u8 port_state[0x4]; }; struct mlx5_ifc_nic_vport_change_event_bits { u8 reserved_0[0x10]; u8 vport_num[0x10]; u8 reserved_1[0xc0]; }; struct mlx5_ifc_pages_req_event_bits { u8 reserved_0[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; u8 reserved_1[0xa0]; }; struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; u8 reserved_0[0xc0]; }; struct mlx5_ifc_stall_vl_event_bits { u8 reserved_0[0x18]; u8 port_num[0x1]; u8 reserved_1[0x3]; u8 vl[0x4]; u8 reserved_2[0xa0]; }; struct mlx5_ifc_db_bf_congestion_event_bits { u8 event_subtype[0x8]; u8 reserved_0[0x8]; u8 congestion_level[0x8]; u8 reserved_1[0x8]; u8 reserved_2[0xa0]; }; struct mlx5_ifc_gpio_event_bits { u8 reserved_0[0x60]; u8 gpio_event_hi[0x20]; u8 gpio_event_lo[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_port_state_change_event_bits { u8 reserved_0[0x40]; u8 port_num[0x4]; u8 reserved_1[0x1c]; u8 reserved_2[0x80]; }; struct mlx5_ifc_dropped_packet_logged_bits { u8 reserved_0[0xe0]; }; enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, }; struct mlx5_ifc_cq_error_bits { u8 reserved_0[0x8]; u8 cqn[0x18]; u8 reserved_1[0x20]; u8 reserved_2[0x18]; u8 syndrome[0x8]; u8 reserved_3[0x80]; }; struct mlx5_ifc_rdma_page_fault_event_bits { u8 bytes_commited[0x20]; u8 r_key[0x20]; u8 reserved_0[0x10]; u8 packet_len[0x10]; u8 rdma_op_len[0x20]; u8 rdma_va[0x40]; u8 reserved_1[0x5]; u8 rdma[0x1]; u8 write[0x1]; u8 requestor[0x1]; u8 qp_number[0x18]; }; struct mlx5_ifc_wqe_associated_page_fault_event_bits { u8 bytes_committed[0x20]; u8 reserved_0[0x10]; u8 wqe_index[0x10]; u8 reserved_1[0x10]; u8 len[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x5]; u8 rdma[0x1]; u8 write_read[0x1]; u8 requestor[0x1]; u8 qpn[0x18]; }; enum { MLX5_QP_EVENTS_TYPE_QP = 0x0, MLX5_QP_EVENTS_TYPE_RQ = 0x1, MLX5_QP_EVENTS_TYPE_SQ = 0x2, }; struct mlx5_ifc_qp_events_bits { u8 reserved_0[0xa0]; u8 type[0x8]; u8 reserved_1[0x18]; u8 reserved_2[0x8]; u8 qpn_rqn_sqn[0x18]; }; struct mlx5_ifc_dct_events_bits { u8 reserved_0[0xc0]; u8 reserved_1[0x8]; u8 dct_number[0x18]; }; struct mlx5_ifc_comp_event_bits { u8 reserved_0[0xc0]; u8 reserved_1[0x8]; u8 cq_number[0x18]; }; struct mlx5_ifc_fw_version_bits { u8 major[0x10]; u8 reserved_0[0x10]; u8 minor[0x10]; u8 subminor[0x10]; u8 second[0x8]; u8 minute[0x8]; u8 hour[0x8]; u8 reserved_1[0x8]; u8 year[0x10]; u8 month[0x8]; u8 day[0x8]; }; enum { MLX5_QPC_STATE_RST = 0x0, MLX5_QPC_STATE_INIT = 0x1, MLX5_QPC_STATE_RTR = 0x2, MLX5_QPC_STATE_RTS = 0x3, MLX5_QPC_STATE_SQER = 0x4, MLX5_QPC_STATE_SQD = 0x5, MLX5_QPC_STATE_ERR = 0x6, MLX5_QPC_STATE_SUSPENDED = 0x9, }; enum { MLX5_QPC_ST_RC = 0x0, MLX5_QPC_ST_UC = 0x1, MLX5_QPC_ST_UD = 0x2, MLX5_QPC_ST_XRC = 0x3, MLX5_QPC_ST_DCI = 0x5, MLX5_QPC_ST_QP0 = 0x7, MLX5_QPC_ST_QP1 = 0x8, MLX5_QPC_ST_RAW_DATAGRAM = 0x9, MLX5_QPC_ST_REG_UMR = 0xc, }; enum { MLX5_QP_PM_ARMED = 0x0, MLX5_QP_PM_REARM = 0x1, MLX5_QPC_PM_STATE_RESERVED = 0x2, MLX5_QP_PM_MIGRATED = 0x3, }; enum { MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, }; enum { MLX5_QPC_MTU_256_BYTES = 0x1, MLX5_QPC_MTU_512_BYTES = 0x2, MLX5_QPC_MTU_1K_BYTES = 0x3, MLX5_QPC_MTU_2K_BYTES = 0x4, MLX5_QPC_MTU_4K_BYTES = 0x5, MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, }; enum { MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, }; enum { MLX5_QPC_CS_REQ_DISABLE = 0x0, MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, }; enum { MLX5_QPC_CS_RES_DISABLE = 0x0, MLX5_QPC_CS_RES_UP_TO_32B = 0x1, MLX5_QPC_CS_RES_UP_TO_64B = 0x2, }; enum { MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1, MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, }; struct mlx5_ifc_qpc_bits { u8 state[0x4]; u8 lag_tx_port_affinity[0x4]; u8 st[0x8]; u8 reserved_1[0x3]; u8 pm_state[0x2]; u8 reserved_2[0x7]; u8 end_padding_mode[0x2]; u8 reserved_3[0x2]; u8 wq_signature[0x1]; u8 block_lb_mc[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; u8 reserved_4[0x1]; u8 drain_sigerr[0x1]; u8 reserved_5[0x2]; u8 pd[0x18]; u8 mtu[0x3]; u8 log_msg_max[0x5]; u8 reserved_6[0x1]; u8 log_rq_size[0x4]; u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; u8 reserved_at_55[0x3]; u8 ts_format[0x2]; u8 reserved_at_5a[0x1]; u8 rlky[0x1]; u8 ulp_stateless_offload_mode[0x4]; u8 counter_set_id[0x8]; u8 uar_page[0x18]; u8 reserved_8[0x8]; u8 user_index[0x18]; u8 reserved_9[0x3]; u8 log_page_size[0x5]; u8 remote_qpn[0x18]; struct mlx5_ifc_ads_bits primary_address_path; struct mlx5_ifc_ads_bits secondary_address_path; u8 log_ack_req_freq[0x4]; u8 reserved_10[0x4]; u8 log_sra_max[0x3]; u8 reserved_11[0x2]; u8 retry_count[0x3]; u8 rnr_retry[0x3]; u8 reserved_12[0x1]; u8 fre[0x1]; u8 cur_rnr_retry[0x3]; u8 cur_retry_count[0x3]; u8 reserved_13[0x5]; u8 reserved_14[0x20]; u8 reserved_15[0x8]; u8 next_send_psn[0x18]; u8 reserved_16[0x8]; u8 cqn_snd[0x18]; u8 reserved_at_400[0x8]; u8 deth_sqpn[0x18]; u8 reserved_17[0x20]; u8 reserved_18[0x8]; u8 last_acked_psn[0x18]; u8 reserved_19[0x8]; u8 ssn[0x18]; u8 reserved_20[0x8]; u8 log_rra_max[0x3]; u8 reserved_21[0x1]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; u8 reserved_22[0x1]; u8 page_offset[0x6]; u8 reserved_23[0x3]; u8 cd_slave_receive[0x1]; u8 cd_slave_send[0x1]; u8 cd_master[0x1]; u8 reserved_24[0x3]; u8 min_rnr_nak[0x5]; u8 next_rcv_psn[0x18]; u8 reserved_25[0x8]; u8 xrcd[0x18]; u8 reserved_26[0x8]; u8 cqn_rcv[0x18]; u8 dbr_addr[0x40]; u8 q_key[0x20]; u8 reserved_27[0x5]; u8 rq_type[0x3]; u8 srqn_rmpn[0x18]; u8 reserved_28[0x8]; u8 rmsn[0x18]; u8 hw_sq_wqebb_counter[0x10]; u8 sw_sq_wqebb_counter[0x10]; u8 hw_rq_counter[0x20]; u8 sw_rq_counter[0x20]; u8 reserved_29[0x20]; u8 reserved_30[0xf]; u8 cgs[0x1]; u8 cs_req[0x8]; u8 cs_res[0x8]; u8 dc_access_key[0x40]; u8 reserved_at_680[0x3]; u8 dbr_umem_valid[0x1]; u8 reserved_at_684[0xbc]; }; struct mlx5_ifc_roce_addr_layout_bits { u8 source_l3_address[16][0x8]; u8 reserved_0[0x3]; u8 vlan_valid[0x1]; u8 vlan_id[0xc]; u8 source_mac_47_32[0x10]; u8 source_mac_31_0[0x20]; u8 reserved_1[0x14]; u8 roce_l3_type[0x4]; u8 roce_version[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_rdbc_bits { u8 reserved_0[0x1c]; u8 type[0x4]; u8 reserved_1[0x20]; u8 reserved_2[0x8]; u8 psn[0x18]; u8 rkey[0x20]; u8 address[0x40]; u8 byte_count[0x20]; u8 reserved_3[0x20]; u8 atomic_resp[32][0x8]; }; enum { MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10, MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, }; struct mlx5_ifc_flow_context_bits { u8 reserved_0[0x20]; u8 group_id[0x20]; u8 reserved_1[0x8]; u8 flow_tag[0x18]; u8 reserved_2[0x10]; u8 action[0x10]; u8 reserved_3[0x8]; u8 destination_list_size[0x18]; u8 reserved_4[0x8]; u8 flow_counter_list_size[0x18]; u8 packet_reformat_id[0x20]; u8 modify_header_id[0x20]; u8 reserved_6[0x100]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_7[0x600]; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { MLX5_XRC_SRQC_STATE_GOOD = 0x0, MLX5_XRC_SRQC_STATE_ERROR = 0x1, }; struct mlx5_ifc_xrc_srqc_bits { u8 state[0x4]; u8 log_xrc_srq_size[0x4]; u8 reserved_0[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; u8 reserved_1[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; u8 reserved_at_46[0x1]; u8 dbr_umem_valid[0x1]; u8 cqn[0x18]; u8 reserved_3[0x20]; u8 reserved_4[0x2]; u8 log_page_size[0x6]; u8 user_index[0x18]; u8 reserved_5[0x20]; u8 reserved_6[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; u8 reserved_7[0x40]; u8 db_record_addr_h[0x20]; u8 db_record_addr_l[0x1e]; u8 reserved_8[0x2]; u8 reserved_9[0x80]; }; struct mlx5_ifc_vnic_diagnostic_statistics_bits { u8 counter_error_queues[0x20]; u8 total_error_queues[0x20]; u8 send_queue_priority_update_flow[0x20]; u8 reserved_at_60[0x20]; u8 nic_receive_steering_discard[0x40]; u8 receive_discard_vport_down[0x40]; u8 transmit_discard_vport_down[0x40]; u8 reserved_at_140[0xec0]; }; struct mlx5_ifc_traffic_counter_bits { u8 packets[0x40]; u8 octets[0x40]; }; struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; u8 tls_en[0x1]; u8 reserved_at_2[0x2]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; u8 prio[0x4]; u8 reserved_1[0x10]; u8 reserved_2[0x100]; u8 reserved_3[0x8]; u8 transport_domain[0x18]; u8 reserved_4[0x8]; u8 underlay_qpn[0x18]; u8 reserved_5[0x8]; u8 pd[0x18]; u8 reserved_6[0x380]; }; enum { MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, }; enum { MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, }; enum { MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, }; enum { MLX5_TIRC_SELF_LB_EN_ENABLE_UNICAST = 0x1, MLX5_TIRC_SELF_LB_EN_ENABLE_MULTICAST = 0x2, }; struct mlx5_ifc_tirc_bits { u8 reserved_0[0x20]; u8 disp_type[0x4]; u8 tls_en[0x1]; u8 reserved_at_25[0x1b]; u8 reserved_2[0x40]; u8 reserved_3[0x4]; u8 lro_timeout_period_usecs[0x10]; u8 lro_enable_mask[0x4]; u8 lro_max_msg_sz[0x8]; u8 reserved_4[0x40]; u8 reserved_5[0x8]; u8 inline_rqn[0x18]; u8 rx_hash_symmetric[0x1]; u8 reserved_6[0x1]; u8 tunneled_offload_en[0x1]; u8 reserved_7[0x5]; u8 indirect_table[0x18]; u8 rx_hash_fn[0x4]; u8 reserved_8[0x2]; u8 self_lb_en[0x2]; u8 transport_domain[0x18]; u8 rx_hash_toeplitz_key[10][0x20]; struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; u8 reserved_9[0x4c0]; }; enum { MLX5_SRQC_STATE_GOOD = 0x0, MLX5_SRQC_STATE_ERROR = 0x1, }; struct mlx5_ifc_srqc_bits { u8 state[0x4]; u8 log_srq_size[0x4]; u8 reserved_0[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; u8 reserved_1[0x1]; u8 rlky[0x1]; u8 reserved_2[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; u8 reserved_3[0x2]; u8 cqn[0x18]; u8 reserved_4[0x20]; u8 reserved_5[0x2]; u8 log_page_size[0x6]; u8 reserved_6[0x18]; u8 reserved_7[0x20]; u8 reserved_8[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; u8 reserved_9[0x40]; u8 dbr_addr[0x40]; u8 reserved_10[0x80]; }; enum { MLX5_SQC_STATE_RST = 0x0, MLX5_SQC_STATE_RDY = 0x1, MLX5_SQC_STATE_ERR = 0x3, }; enum { MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, }; struct mlx5_ifc_sqc_bits { u8 rlkey[0x1]; u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; u8 allow_multi_pkt_send_wqe[0x1]; u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; u8 allow_swp[0x1]; u8 reserved_at_e[0x4]; u8 qos_remap_en[0x1]; u8 reserved_at_d[0x7]; u8 ts_format[0x2]; u8 reserved_at_1c[0x4]; u8 reserved_1[0x8]; u8 user_index[0x18]; u8 reserved_2[0x8]; u8 cqn[0x18]; u8 reserved_3[0x80]; u8 qos_para_vport_number[0x10]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; u8 qos_queue_group_id[0x10]; u8 reserved_4[0x8]; u8 queue_handle[0x18]; u8 reserved_5[0x20]; u8 reserved_6[0x8]; u8 tis_num_0[0x18]; struct mlx5_ifc_wq_bits wq; }; struct mlx5_ifc_query_pp_rate_limit_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved1[0x10]; u8 op_mod[0x10]; u8 reserved2[0x10]; u8 rate_limit_index[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_pp_context_bits { u8 rate_limit[0x20]; u8 burst_upper_bound[0x20]; u8 reserved_1[0xc]; u8 rate_mode[0x4]; u8 typical_packet_size[0x10]; u8 reserved_2[0x8]; u8 qos_handle[0x18]; u8 reserved_3[0x40]; }; struct mlx5_ifc_query_pp_rate_limit_out_bits { u8 status[0x8]; u8 reserved_1[0x18]; u8 syndrome[0x20]; u8 reserved_2[0x40]; struct mlx5_ifc_pp_context_bits pp_context; }; enum { MLX5_TSAR_TYPE_DWRR = 0, MLX5_TSAR_TYPE_ROUND_ROUBIN = 1, MLX5_TSAR_TYPE_ETS = 2 }; struct mlx5_ifc_tsar_element_attributes_bits { u8 reserved_0[0x8]; u8 tsar_type[0x8]; u8 reserved_1[0x10]; }; struct mlx5_ifc_vport_element_attributes_bits { u8 reserved_0[0x10]; u8 vport_number[0x10]; }; struct mlx5_ifc_vport_tc_element_attributes_bits { u8 traffic_class[0x10]; u8 vport_number[0x10]; }; struct mlx5_ifc_para_vport_tc_element_attributes_bits { u8 reserved_0[0x0C]; u8 traffic_class[0x04]; u8 qos_para_vport_number[0x10]; }; enum { MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, }; struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; u8 element_attributes[0x20]; u8 parent_element_id[0x20]; u8 reserved_at_60[0x40]; u8 bw_share[0x20]; u8 max_average_bw[0x20]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_rqtc_bits { u8 reserved_0[0xa0]; u8 reserved_1[0x10]; u8 rqt_max_size[0x10]; u8 reserved_2[0x10]; u8 rqt_actual_size[0x10]; u8 reserved_3[0x6a0]; struct mlx5_ifc_rq_num_bits rq_num[0]; }; enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, MLX5_RQC_RQ_TYPE_MEMORY_RQ_RMP = 0x1, }; enum { MLX5_RQC_STATE_RST = 0x0, MLX5_RQC_STATE_RDY = 0x1, MLX5_RQC_STATE_ERR = 0x3, }; enum { MLX5_RQC_DROPLESS_MODE_DISABLE = 0x0, MLX5_RQC_DROPLESS_MODE_ENABLE = 0x1, }; enum { MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, }; struct mlx5_ifc_rqc_bits { u8 rlkey[0x1]; u8 delay_drop_en[0x1]; u8 scatter_fcs[0x1]; u8 vlan_strip_disable[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; u8 reserved_1[0x1]; u8 flush_in_error_en[0x1]; u8 reserved_at_e[0xc]; u8 ts_format[0x2]; u8 reserved_at_1c[0x4]; u8 reserved_3[0x8]; u8 user_index[0x18]; u8 reserved_4[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; u8 reserved_5[0x18]; u8 reserved_6[0x8]; u8 rmpn[0x18]; u8 reserved_7[0xe0]; struct mlx5_ifc_wq_bits wq; }; enum { MLX5_RMPC_STATE_RDY = 0x1, MLX5_RMPC_STATE_ERR = 0x3, }; struct mlx5_ifc_rmpc_bits { u8 reserved_0[0x8]; u8 state[0x4]; u8 reserved_1[0x14]; u8 basic_cyclic_rcv_wqe[0x1]; u8 reserved_2[0x1f]; u8 reserved_3[0x140]; struct mlx5_ifc_wq_bits wq; }; enum { MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_MC_MAC_ADDRESS = 0x1, MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST = 0x2, }; struct mlx5_ifc_nic_vport_context_bits { u8 reserved_0[0x5]; u8 min_wqe_inline_mode[0x3]; u8 reserved_1[0x15]; u8 disable_mc_local_lb[0x1]; u8 disable_uc_local_lb[0x1]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; u8 reserved_2[0x1a]; u8 event_on_mtu[0x1]; u8 event_on_promisc_change[0x1]; u8 event_on_vlan_change[0x1]; u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; u8 reserved_3[0xe0]; u8 reserved_4[0x10]; u8 mtu[0x10]; u8 system_image_guid[0x40]; u8 port_guid[0x40]; u8 node_guid[0x40]; u8 reserved_5[0x140]; u8 qkey_violation_counter[0x10]; u8 reserved_6[0x10]; u8 reserved_7[0x420]; u8 promisc_uc[0x1]; u8 promisc_mc[0x1]; u8 promisc_all[0x1]; u8 reserved_8[0x2]; u8 allowed_list_type[0x3]; u8 reserved_9[0xc]; u8 allowed_list_size[0xc]; struct mlx5_ifc_mac_address_layout_bits permanent_address; u8 reserved_10[0x20]; u8 current_uc_mac_address[0][0x40]; }; enum { MLX5_ACCESS_MODE_PA = 0x0, MLX5_ACCESS_MODE_MTT = 0x1, MLX5_ACCESS_MODE_KLM = 0x2, MLX5_ACCESS_MODE_KSM = 0x3, MLX5_ACCESS_MODE_SW_ICM = 0x4, MLX5_ACCESS_MODE_MEMIC = 0x5, }; struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; u8 reserved_at_2[0x1]; u8 access_mode_4_2[0x3]; u8 reserved_at_6[0x7]; u8 relaxed_ordering_write[0x1]; u8 reserved_at_e[0x1]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; u8 rw[0x1]; u8 rr[0x1]; u8 lw[0x1]; u8 lr[0x1]; u8 access_mode[0x2]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 mkey_7_0[0x8]; u8 reserved_3[0x20]; u8 length64[0x1]; u8 bsf_en[0x1]; u8 sync_umr[0x1]; u8 reserved_4[0x2]; u8 expected_sigerr_count[0x1]; u8 reserved_5[0x1]; u8 en_rinval[0x1]; u8 pd[0x18]; u8 start_addr[0x40]; u8 len[0x40]; u8 bsf_octword_size[0x20]; u8 reserved_6[0x80]; u8 translations_octword_size[0x20]; u8 reserved_at_1c0[0x19]; u8 relaxed_ordering_read[0x1]; u8 reserved_at_1d9[0x1]; u8 log_page_size[0x5]; u8 reserved_8[0x20]; }; struct mlx5_ifc_pkey_bits { u8 reserved_0[0x10]; u8 pkey[0x10]; }; struct mlx5_ifc_array128_auto_bits { u8 array128_auto[16][0x8]; }; enum { MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_PORT_GUID = 0x0, MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_NODE_GUID = 0x1, MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_VPORT_STATE_POLICY = 0x2, }; enum { MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_SLEEP = 0x1, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_POLLING = 0x2, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_DISABLED = 0x3, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_PORTCONFIGURATIONTRAINING = 0x4, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_LINKUP = 0x5, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_LINKERRORRECOVERY = 0x6, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_PHYTEST = 0x7, }; enum { MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_DOWN = 0x0, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_UP = 0x1, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_FOLLOW = 0x2, }; enum { MLX5_HCA_VPORT_CONTEXT_PORT_STATE_DOWN = 0x1, MLX5_HCA_VPORT_CONTEXT_PORT_STATE_INIT = 0x2, MLX5_HCA_VPORT_CONTEXT_PORT_STATE_ARM = 0x3, MLX5_HCA_VPORT_CONTEXT_PORT_STATE_ACTIVE = 0x4, }; enum { MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_DOWN = 0x1, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_INIT = 0x2, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_ARM = 0x3, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_ACTIVE = 0x4, }; struct mlx5_ifc_hca_vport_context_bits { u8 field_select[0x20]; u8 reserved_0[0xe0]; u8 sm_virt_aware[0x1]; u8 has_smi[0x1]; u8 has_raw[0x1]; u8 grh_required[0x1]; u8 reserved_1[0x1]; u8 min_wqe_inline_mode[0x3]; u8 reserved_2[0x8]; u8 port_physical_state[0x4]; u8 vport_state_policy[0x4]; u8 port_state[0x4]; u8 vport_state[0x4]; u8 reserved_3[0x20]; u8 system_image_guid[0x40]; u8 port_guid[0x40]; u8 node_guid[0x40]; u8 cap_mask1[0x20]; u8 cap_mask1_field_select[0x20]; u8 cap_mask2[0x20]; u8 cap_mask2_field_select[0x20]; u8 reserved_4[0x80]; u8 lid[0x10]; u8 reserved_5[0x4]; u8 init_type_reply[0x4]; u8 lmc[0x3]; u8 subnet_timeout[0x5]; u8 sm_lid[0x10]; u8 sm_sl[0x4]; u8 reserved_6[0xc]; u8 qkey_violation_counter[0x10]; u8 pkey_violation_counter[0x10]; u8 reserved_7[0xca0]; }; union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; struct mlx5_ifc_odp_cap_bits odp_cap; struct mlx5_ifc_atomic_caps_bits atomic_caps; struct mlx5_ifc_roce_cap_bits roce_cap; struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_snapshot_cap_bits snapshot_cap; struct mlx5_ifc_debug_cap_bits diagnostic_counters_cap; struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_tls_capabilities_bits tls_capabilities; u8 reserved_0[0x8000]; }; enum { MLX5_FLOW_TABLE_CONTEXT_TABLE_MISS_ACTION_DEFAULT = 0x0, MLX5_FLOW_TABLE_CONTEXT_TABLE_MISS_ACTION_IDENTIFIED = 0x1, }; struct mlx5_ifc_flow_table_context_bits { u8 reformat_en[0x1]; u8 decap_en[0x1]; u8 reserved_at_2[0x2]; u8 table_miss_action[0x4]; u8 level[0x8]; u8 reserved_at_10[0x8]; u8 log_size[0x8]; u8 reserved_at_20[0x8]; u8 table_miss_id[0x18]; u8 reserved_at_40[0x8]; u8 lag_master_next_table_id[0x18]; u8 reserved_at_60[0xe0]; }; struct mlx5_ifc_esw_vport_context_bits { u8 reserved_0[0x3]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; u8 reserved_1[0x18]; u8 reserved_2[0x20]; u8 svlan_cfi[0x1]; u8 svlan_pcp[0x3]; u8 svlan_id[0xc]; u8 cvlan_cfi[0x1]; u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; u8 reserved_3[0x7a0]; }; enum { MLX5_EQC_STATUS_OK = 0x0, MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, }; enum { MLX5_EQ_STATE_ARMED = 0x9, MLX5_EQ_STATE_FIRED = 0xa, }; struct mlx5_ifc_eqc_bits { u8 status[0x4]; u8 reserved_0[0x9]; u8 ec[0x1]; u8 oi[0x1]; u8 reserved_1[0x5]; u8 st[0x4]; u8 reserved_2[0x8]; u8 reserved_3[0x20]; u8 reserved_4[0x14]; u8 page_offset[0x6]; u8 reserved_5[0x6]; u8 reserved_6[0x3]; u8 log_eq_size[0x5]; u8 uar_page[0x18]; u8 reserved_7[0x20]; u8 reserved_8[0x18]; u8 intr[0x8]; u8 reserved_9[0x3]; u8 log_page_size[0x5]; u8 reserved_10[0x18]; u8 reserved_11[0x60]; u8 reserved_12[0x8]; u8 consumer_counter[0x18]; u8 reserved_13[0x8]; u8 producer_counter[0x18]; u8 reserved_14[0x80]; }; enum { MLX5_DCTC_STATE_ACTIVE = 0x0, MLX5_DCTC_STATE_DRAINING = 0x1, MLX5_DCTC_STATE_DRAINED = 0x2, }; enum { MLX5_DCTC_CS_RES_DISABLE = 0x0, MLX5_DCTC_CS_RES_NA = 0x1, MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, }; enum { MLX5_DCTC_MTU_256_BYTES = 0x1, MLX5_DCTC_MTU_512_BYTES = 0x2, MLX5_DCTC_MTU_1K_BYTES = 0x3, MLX5_DCTC_MTU_2K_BYTES = 0x4, MLX5_DCTC_MTU_4K_BYTES = 0x5, }; struct mlx5_ifc_dctc_bits { u8 reserved_0[0x4]; u8 state[0x4]; u8 reserved_1[0x18]; u8 reserved_2[0x8]; u8 user_index[0x18]; u8 reserved_3[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; u8 rlky[0x1]; u8 reserved_4[0xe]; u8 reserved_5[0x8]; u8 cs_res[0x8]; u8 reserved_6[0x3]; u8 min_rnr_nak[0x5]; u8 reserved_7[0x8]; u8 reserved_8[0x8]; u8 srqn[0x18]; u8 reserved_9[0x8]; u8 pd[0x18]; u8 tclass[0x8]; u8 reserved_10[0x4]; u8 flow_label[0x14]; u8 dc_access_key[0x40]; u8 reserved_11[0x5]; u8 mtu[0x3]; u8 port[0x8]; u8 pkey_index[0x10]; u8 reserved_12[0x8]; u8 my_addr_index[0x8]; u8 reserved_13[0x8]; u8 hop_limit[0x8]; u8 dc_access_key_violation_count[0x20]; u8 reserved_14[0x14]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 ecn[0x2]; u8 dscp[0x6]; u8 reserved_15[0x40]; }; enum { MLX5_CQC_STATUS_OK = 0x0, MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, }; enum { CQE_SIZE_64 = 0x0, CQE_SIZE_128 = 0x1, }; enum { MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, }; enum { MLX5_CQ_STATE_SOLICITED_ARMED = 0x6, MLX5_CQ_STATE_ARMED = 0x9, MLX5_CQ_STATE_FIRED = 0xa, }; struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x2]; u8 dbr_umem_valid[0x1]; u8 reserved_at_7[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_1[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; u8 cq_period_mode[0x2]; u8 cqe_compression_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; u8 reserved_2[0x8]; u8 reserved_3[0x20]; u8 reserved_4[0x14]; u8 page_offset[0x6]; u8 reserved_5[0x6]; u8 reserved_6[0x3]; u8 log_cq_size[0x5]; u8 uar_page[0x18]; u8 reserved_7[0x4]; u8 cq_period[0xc]; u8 cq_max_count[0x10]; u8 reserved_8[0x18]; u8 c_eqn[0x8]; u8 reserved_9[0x3]; u8 log_page_size[0x5]; u8 reserved_10[0x18]; u8 reserved_11[0x20]; u8 reserved_12[0x8]; u8 last_notified_index[0x18]; u8 reserved_13[0x8]; u8 last_solicit_index[0x18]; u8 reserved_14[0x8]; u8 consumer_counter[0x18]; u8 reserved_15[0x8]; u8 producer_counter[0x18]; u8 reserved_16[0x40]; u8 dbr_addr[0x40]; }; union mlx5_ifc_cong_control_roce_ecn_auto_bits { struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; u8 reserved_0[0x800]; }; struct mlx5_ifc_query_adapter_param_block_bits { u8 reserved_0[0xc0]; u8 reserved_1[0x8]; u8 ieee_vendor_id[0x18]; u8 reserved_2[0x10]; u8 vsd_vendor_id[0x10]; u8 vsd[208][0x8]; u8 vsd_contd_psid[16][0x8]; }; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { struct mlx5_ifc_modify_field_select_bits modify_field_select; struct mlx5_ifc_resize_field_select_bits resize_field_select; u8 reserved_0[0x20]; }; union mlx5_ifc_field_select_802_1_r_roce_auto_bits { struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; u8 reserved_0[0x20]; }; struct mlx5_ifc_bufferx_reg_bits { u8 reserved_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; u8 reserved_1[0xc]; u8 size[0xc]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; }; struct mlx5_ifc_config_item_bits { u8 valid[0x2]; u8 reserved_0[0x2]; u8 header_type[0x2]; u8 reserved_1[0x2]; u8 default_location[0x1]; u8 reserved_2[0x7]; u8 version[0x4]; u8 reserved_3[0x3]; u8 length[0x9]; u8 type[0x20]; u8 reserved_4[0x10]; u8 crc16[0x10]; }; enum { MLX5_XRQC_STATE_GOOD = 0x0, MLX5_XRQC_STATE_ERROR = 0x1, }; enum { MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0, MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1, }; enum { MLX5_XRQC_OFFLOAD_RNDV = 0x1, }; struct mlx5_ifc_tag_matching_topology_context_bits { u8 log_matching_list_sz[0x4]; u8 reserved_at_4[0xc]; u8 append_next_index[0x10]; u8 sw_phase_cnt[0x10]; u8 hw_phase_cnt[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_xrqc_bits { u8 state[0x4]; u8 rlkey[0x1]; u8 reserved_at_5[0xf]; u8 topology[0x4]; u8 reserved_at_18[0x4]; u8 offload[0x4]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 reserved_at_60[0xa0]; struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; u8 reserved_at_180[0x280]; struct mlx5_ifc_wq_bits wq; }; struct mlx5_ifc_nodnic_port_config_reg_bits { struct mlx5_ifc_nodnic_event_word_bits event; u8 network_en[0x1]; u8 dma_en[0x1]; u8 promisc_en[0x1]; u8 promisc_multicast_en[0x1]; u8 reserved_0[0x17]; u8 receive_filter_en[0x5]; u8 reserved_1[0x10]; u8 mac_47_32[0x10]; u8 mac_31_0[0x20]; u8 receive_filters_mgid_mac[64][0x8]; u8 gid[16][0x8]; u8 reserved_2[0x10]; u8 lid[0x10]; u8 reserved_3[0xc]; u8 sm_sl[0x4]; u8 sm_lid[0x10]; u8 completion_address_63_32[0x20]; u8 completion_address_31_12[0x14]; u8 reserved_4[0x6]; u8 log_cq_size[0x6]; u8 working_buffer_address_63_32[0x20]; u8 working_buffer_address_31_12[0x14]; u8 reserved_5[0xc]; struct mlx5_ifc_nodnic_cq_arming_word_bits arm_cq; u8 pkey_index[0x10]; u8 pkey[0x10]; struct mlx5_ifc_nodnic_ring_config_reg_bits send_ring0; struct mlx5_ifc_nodnic_ring_config_reg_bits send_ring1; struct mlx5_ifc_nodnic_ring_config_reg_bits receive_ring0; struct mlx5_ifc_nodnic_ring_config_reg_bits receive_ring1; u8 reserved_6[0x400]; }; union mlx5_ifc_event_auto_bits { struct mlx5_ifc_comp_event_bits comp_event; struct mlx5_ifc_dct_events_bits dct_events; struct mlx5_ifc_qp_events_bits qp_events; struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; struct mlx5_ifc_cq_error_bits cq_error; struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; struct mlx5_ifc_port_state_change_event_bits port_state_change_event; struct mlx5_ifc_gpio_event_bits gpio_event; struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; struct mlx5_ifc_stall_vl_event_bits stall_vl_event; struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; struct mlx5_ifc_pages_req_event_bits pages_req_event; struct mlx5_ifc_nic_vport_change_event_bits nic_vport_change_event; u8 reserved_0[0xe0]; }; struct mlx5_ifc_health_buffer_bits { u8 reserved_0[0x100]; u8 assert_existptr[0x20]; u8 assert_callra[0x20]; u8 reserved_1[0x40]; u8 fw_version[0x20]; u8 hw_id[0x20]; u8 reserved_2[0x20]; u8 irisc_index[0x8]; u8 synd[0x8]; u8 ext_synd[0x10]; }; struct mlx5_ifc_register_loopback_control_bits { u8 no_lb[0x1]; u8 reserved_0[0x7]; u8 port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x60]; }; struct mlx5_ifc_lrh_bits { u8 vl[4]; u8 lver[4]; u8 sl[4]; u8 reserved2[2]; u8 lnh[2]; u8 dlid[16]; u8 reserved5[5]; u8 pkt_len[11]; u8 slid[16]; }; struct mlx5_ifc_icmd_set_wol_rol_out_bits { u8 reserved_0[0x40]; u8 reserved_1[0x10]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; }; struct mlx5_ifc_icmd_set_wol_rol_in_bits { u8 reserved_0[0x40]; u8 rol_mode_valid[0x1]; u8 wol_mode_valid[0x1]; u8 reserved_1[0xe]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_2[0x7a0]; }; struct mlx5_ifc_icmd_set_virtual_mac_in_bits { u8 virtual_mac_en[0x1]; u8 mac_aux_v[0x1]; u8 reserved_0[0x1e]; u8 reserved_1[0x40]; struct mlx5_ifc_mac_address_layout_bits virtual_mac; u8 reserved_2[0x760]; }; struct mlx5_ifc_icmd_query_virtual_mac_out_bits { u8 virtual_mac_en[0x1]; u8 mac_aux_v[0x1]; u8 reserved_0[0x1e]; struct mlx5_ifc_mac_address_layout_bits permanent_mac; struct mlx5_ifc_mac_address_layout_bits virtual_mac; u8 reserved_1[0x760]; }; struct mlx5_ifc_icmd_query_fw_info_out_bits { struct mlx5_ifc_fw_version_bits fw_version; u8 reserved_0[0x10]; u8 hash_signature[0x10]; u8 psid[16][0x8]; u8 reserved_1[0x6e0]; }; struct mlx5_ifc_icmd_query_cap_in_bits { u8 reserved_0[0x10]; u8 capability_group[0x10]; }; struct mlx5_ifc_icmd_query_cap_general_bits { u8 nv_access[0x1]; u8 fw_info_psid[0x1]; u8 reserved_0[0x1e]; u8 reserved_1[0x16]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_2[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; u8 wol_b[0x1]; u8 wol_m[0x1]; u8 wol_u[0x1]; u8 wol_p[0x1]; }; struct mlx5_ifc_icmd_ocbb_query_header_stats_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 reserved_1[0x7e0]; }; struct mlx5_ifc_icmd_ocbb_query_etoc_stats_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 reserved_1[0x7e0]; }; struct mlx5_ifc_icmd_ocbb_init_in_bits { u8 address_hi[0x20]; u8 address_lo[0x20]; u8 reserved_0[0x7c0]; }; struct mlx5_ifc_icmd_init_ocsd_in_bits { u8 reserved_0[0x20]; u8 address_hi[0x20]; u8 address_lo[0x20]; u8 reserved_1[0x7a0]; }; struct mlx5_ifc_icmd_access_reg_out_bits { u8 reserved_0[0x11]; u8 status[0x7]; u8 reserved_1[0x8]; u8 register_id[0x10]; u8 reserved_2[0x10]; u8 reserved_3[0x40]; u8 reserved_4[0x5]; u8 len[0xb]; u8 reserved_5[0x10]; u8 register_data[0][0x20]; }; enum { MLX5_ICMD_ACCESS_REG_IN_METHOD_QUERY = 0x1, MLX5_ICMD_ACCESS_REG_IN_METHOD_WRITE = 0x2, }; struct mlx5_ifc_icmd_access_reg_in_bits { u8 constant_1[0x5]; u8 constant_2[0xb]; u8 reserved_0[0x10]; u8 register_id[0x10]; u8 reserved_1[0x1]; u8 method[0x7]; u8 constant_3[0x8]; u8 reserved_2[0x40]; u8 constant_4[0x5]; u8 len[0xb]; u8 reserved_3[0x10]; u8 register_data[0][0x20]; }; enum { MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, }; struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x3f]; u8 state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2, }; struct mlx5_ifc_teardown_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 profile[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_delay_drop_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_delay_drop_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 delay_drop_timeout[0x10]; }; struct mlx5_ifc_query_delay_drop_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 delay_drop_timeout[0x10]; }; struct mlx5_ifc_query_delay_drop_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_suspend_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_suspend_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_sqerr2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_sqerr2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_sqd2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_sqd2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_set_wol_rol_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_wol_rol_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 rol_mode_valid[0x1]; u8 wol_mode_valid[0x1]; u8 reserved_2[0xe]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_roce_address_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_roce_address_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; u8 reserved_2[0x10]; u8 reserved_3[0x20]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_set_rdb_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_rdb_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x18]; u8 rdb_list_size[0x8]; struct mlx5_ifc_rdbc_bits rdb_context[0]; }; struct mlx5_ifc_set_mad_demux_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, }; struct mlx5_ifc_set_mad_demux_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x6]; u8 demux_mode[0x2]; u8 reserved_4[0x18]; }; struct mlx5_ifc_set_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x8]; u8 table_index[0x18]; u8 reserved_4[0x20]; u8 reserved_5[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; u8 reserved_6[0xc0]; }; struct mlx5_ifc_set_issi_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_issi_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 current_issi[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_hca_cap_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 }; struct mlx5_ifc_set_flow_table_root_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_flow_table_root_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x8]; u8 underlay_qpn[0x18]; u8 reserved_7[0x120]; }; struct mlx5_ifc_set_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_fte_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x18]; u8 modify_enable_mask[0x8]; u8 reserved_7[0x20]; u8 flow_index[0x20]; u8 reserved_8[0xe0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_set_driver_version_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_driver_version_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; u8 driver_version[64][0x8]; }; struct mlx5_ifc_set_dc_cnak_trace_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_dc_cnak_trace_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 enable[0x1]; u8 reserved_2[0x1f]; u8 reserved_3[0x160]; struct mlx5_ifc_cmd_pas_bits pas; }; struct mlx5_ifc_set_burst_size_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_burst_size_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x9]; u8 device_burst_size[0x17]; }; struct mlx5_ifc_rts2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rts2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_rtr2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_rst2init_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rst2init_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_query_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_xrqc_bits xrq_context; }; struct mlx5_ifc_query_xrq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_resume_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_resume_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; u8 reserved_2[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrc_srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_wol_rol_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_query_wol_rol_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; enum { MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, }; struct mlx5_ifc_query_vport_state_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 reserved_2[0x18]; u8 admin_state[0x4]; u8 state[0x4]; }; enum { MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_query_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_vnic_env_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env; }; enum { MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0, }; struct mlx5_ifc_query_vnic_env_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_traffic_counter_bits received_errors; struct mlx5_ifc_traffic_counter_bits transmit_errors; struct mlx5_ifc_traffic_counter_bits received_ib_unicast; struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; struct mlx5_ifc_traffic_counter_bits received_ib_multicast; struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; struct mlx5_ifc_traffic_counter_bits received_eth_unicast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; struct mlx5_ifc_traffic_counter_bits received_eth_multicast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; u8 reserved_2[0xa00]; }; enum { MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, }; struct mlx5_ifc_query_vport_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x60]; u8 clear[0x1]; u8 reserved_4[0x1f]; u8 reserved_5[0x20]; }; struct mlx5_ifc_query_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_tisc_bits tis_context; }; struct mlx5_ifc_query_tis_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tisn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_query_tir_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tirn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; u8 reserved_2[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_sqc_bits sq_context; }; struct mlx5_ifc_query_sq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 sqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_special_contexts_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; }; struct mlx5_ifc_query_special_contexts_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; enum { MLX5_SCHEDULING_ELEMENT_IN_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_query_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_query_rqt_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqtn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_rqc_bits rq_context; }; struct mlx5_ifc_query_rq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_roce_address_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_query_roce_address_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; u8 reserved_2[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_rmpc_bits rmp_context; }; struct mlx5_ifc_query_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rmpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_rdb_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 reserved_2[0x18]; u8 rdb_list_size[0x8]; struct mlx5_ifc_rdbc_bits rdb_context[0]; }; struct mlx5_ifc_query_rdb_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 opt_param_mask[0x20]; u8 reserved_2[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_3[0x80]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_q_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 rx_write_requests[0x20]; u8 reserved_2[0x20]; u8 rx_read_requests[0x20]; u8 reserved_3[0x20]; u8 rx_atomic_requests[0x20]; u8 reserved_4[0x20]; u8 rx_dct_connect[0x20]; u8 reserved_5[0x20]; u8 out_of_buffer[0x20]; u8 reserved_7[0x20]; u8 out_of_sequence[0x20]; u8 reserved_8[0x20]; u8 duplicate_request[0x20]; u8 reserved_9[0x20]; u8 rnr_nak_retry_err[0x20]; u8 reserved_10[0x20]; u8 packet_seq_err[0x20]; u8 reserved_11[0x20]; u8 implied_nak_seq_err[0x20]; u8 reserved_12[0x20]; u8 local_ack_timeout_err[0x20]; u8 reserved_13[0x20]; u8 resp_rnr_nak[0x20]; u8 reserved_14[0x20]; u8 req_rnr_retries_exceeded[0x20]; u8 reserved_15[0x460]; }; struct mlx5_ifc_query_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x80]; u8 clear[0x1]; u8 reserved_3[0x1f]; u8 reserved_4[0x18]; u8 counter_set_id[0x8]; }; struct mlx5_ifc_query_pages_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; }; enum { MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, }; struct mlx5_ifc_query_pages_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_nic_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_query_nic_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x5]; u8 allowed_list_type[0x3]; u8 reserved_4[0x18]; }; struct mlx5_ifc_query_mkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; u8 reserved_2[0x600]; u8 bsf0_klm0_pas_mtt0_1[16][0x8]; u8 bsf1_klm1_pas_mtt2_3[16][0x8]; }; struct mlx5_ifc_query_mkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 mkey_index[0x18]; u8 pg_access[0x1]; u8 reserved_3[0x1f]; }; struct mlx5_ifc_query_mad_demux_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 mad_dumux_parameters_block[0x20]; }; struct mlx5_ifc_query_mad_demux_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xa0]; u8 reserved_2[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; u8 reserved_3[0xc0]; }; struct mlx5_ifc_query_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x8]; u8 table_index[0x18]; u8 reserved_4[0x140]; }; struct mlx5_ifc_query_issi_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 current_issi[0x10]; u8 reserved_2[0xa0]; u8 supported_issi_reserved[76][0x8]; u8 supported_issi_dw0[0x20]; }; struct mlx5_ifc_query_issi_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_pkey_bits pkey[0]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x10]; u8 pkey_index[0x10]; }; struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 gids_num[0x10]; u8 reserved_2[0x10]; struct mlx5_ifc_array128_auto_bits gid[0]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x10]; u8 gid_index[0x10]; }; struct mlx5_ifc_query_hca_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_query_hca_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_hca_cap_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; struct mlx5_ifc_query_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x80]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_query_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x140]; }; struct mlx5_ifc_query_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x1c0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_query_fte_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x40]; u8 flow_index[0x20]; u8 reserved_7[0xe0]; }; enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, }; struct mlx5_ifc_query_flow_group_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xa0]; u8 start_flow_index[0x20]; u8 reserved_2[0x20]; u8 end_flow_index[0x20]; u8 reserved_3[0xa0]; u8 reserved_4[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; u8 reserved_5[0xe00]; }; struct mlx5_ifc_query_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; u8 reserved_6[0x120]; }; struct mlx5_ifc_query_flow_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; }; struct mlx5_ifc_query_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x80]; u8 clear[0x1]; u8 reserved_at_c1[0xf]; u8 num_of_counters[0x10]; u8 reserved_at_e0[0x10]; u8 flow_counter_id[0x10]; }; struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_query_esw_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; u8 reserved_2[0x40]; u8 event_bitmask[0x40]; u8 reserved_3[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_eq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 eq_number[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_action_in_bits { u8 action_type[0x4]; u8 field[0xc]; u8 reserved_at_10[0x3]; u8 offset[0x5]; u8 reserved_at_18[0x3]; u8 length[0x5]; u8 data[0x20]; }; struct mlx5_ifc_add_action_in_bits { u8 action_type[0x4]; u8 field[0xc]; u8 reserved_at_10[0x10]; u8 data[0x20]; }; struct mlx5_ifc_copy_action_in_bits { u8 action_type[0x4]; u8 src_field[0xc]; u8 reserved_at_10[0x3]; u8 src_offset[0x5]; u8 reserved_at_18[0x3]; u8 length[0x5]; u8 reserved_at_20[0x4]; u8 dst_field[0xc]; u8 reserved_at_30[0x3]; u8 dst_offset[0x5]; u8 reserved_at_38[0x8]; }; union mlx5_ifc_set_add_copy_action_in_auto_bits { struct mlx5_ifc_set_action_in_bits set_action_in; struct mlx5_ifc_add_action_in_bits add_action_in; struct mlx5_ifc_copy_action_in_bits copy_action_in; u8 reserved_at_0[0x40]; }; enum { MLX5_ACTION_TYPE_SET = 0x1, MLX5_ACTION_TYPE_ADD = 0x2, MLX5_ACTION_TYPE_COPY = 0x3, }; enum { MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1, MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2, MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3, MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4, MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5, MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6, MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7, MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8, MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9, MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa, MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb, MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc, MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd, MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe, MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf, MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10, MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11, MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12, MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13, MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49, MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53, MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57, MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F, MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 modify_header_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_modify_header_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 table_type[0x8]; u8 reserved_at_68[0x10]; u8 num_of_actions[0x8]; union mlx5_ifc_set_add_copy_action_in_auto_bits actions[]; }; struct mlx5_ifc_dealloc_modify_header_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_modify_header_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 modify_header_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_modify_header_context_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 modify_header_id[0x20]; u8 reserved_at_60[0xa0]; }; struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; u8 reserved_2[0x180]; }; struct mlx5_ifc_query_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_dc_cnak_trace_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 enable[0x1]; u8 reserved_1[0x1f]; u8 reserved_2[0x160]; struct mlx5_ifc_cmd_pas_bits pas; }; struct mlx5_ifc_query_dc_cnak_trace_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_packet_reformat_context_in_bits { u8 reformat_type[0x8]; u8 reserved_at_8[0x4]; u8 reformat_param_0[0x4]; u8 reserved_at_10[0x6]; u8 reformat_data_size[0xa]; u8 reformat_param_1[0x8]; u8 reserved_at_28[0x8]; u8 reformat_data[2][0x8]; u8 more_reformat_data[][0x8]; }; struct mlx5_ifc_query_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xa0]; struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0]; }; struct mlx5_ifc_query_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 packet_reformat_id[0x20]; u8 reserved_at_60[0xa0]; }; struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 packet_reformat_id[0x20]; u8 reserved_at_60[0x20]; }; enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, }; struct mlx5_ifc_alloc_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xa0]; struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context; }; struct mlx5_ifc_dealloc_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_20[0x10]; u8 op_mod[0x10]; u8 packet_reformat_id[0x20]; u8 reserved_60[0x20]; }; struct mlx5_ifc_diagnostic_cntr_struct_bits { u8 counter_id[0x10]; u8 sample_id[0x10]; u8 time_stamp_31_0[0x20]; u8 counter_value_h[0x20]; u8 counter_value_l[0x20]; }; enum { MLX5_DIAGNOSTIC_PARAMS_CONTEXT_ENABLE_ENABLE = 0x1, MLX5_DIAGNOSTIC_PARAMS_CONTEXT_ENABLE_DISABLE = 0x0, }; struct mlx5_ifc_query_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_2[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_cq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 cqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_cong_status_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 enable[0x1]; u8 tag_enable[0x1]; u8 reserved_2[0x1e]; }; struct mlx5_ifc_query_cong_status_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_cong_statistics_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 rp_cur_flows[0x20]; u8 sum_flows[0x20]; u8 rp_cnp_ignored_high[0x20]; u8 rp_cnp_ignored_low[0x20]; u8 rp_cnp_handled_high[0x20]; u8 rp_cnp_handled_low[0x20]; u8 reserved_2[0x100]; u8 time_stamp_high[0x20]; u8 time_stamp_low[0x20]; u8 accumulators_period[0x20]; u8 np_ecn_marked_roce_packets_high[0x20]; u8 np_ecn_marked_roce_packets_low[0x20]; u8 np_cnp_sent_high[0x20]; u8 np_cnp_sent_low[0x20]; u8 reserved_3[0x560]; }; struct mlx5_ifc_query_cong_statistics_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 clear[0x1]; u8 reserved_2[0x1f]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_cong_params_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_query_cong_params_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x1c]; u8 cong_protocol[0x4]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_burst_size_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 reserved_2[0x9]; u8 device_burst_size[0x17]; }; struct mlx5_ifc_query_burst_size_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_adapter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; }; struct mlx5_ifc_query_adapter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_qp_2rst_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_qp_2rst_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_qp_2err_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_qp_2err_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_para_vport_element_bits { u8 reserved_at_0[0xc]; u8 traffic_class[0x4]; u8 qos_para_vport_number[0x10]; }; struct mlx5_ifc_page_fault_resume_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_page_fault_resume_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 error[0x1]; u8 reserved_2[0x4]; u8 rdma[0x1]; u8 read_write[0x1]; u8 req_res[0x1]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_nop_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_nop_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_modify_vport_state_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_NIC_VPORT = 0x0, MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2, }; enum { MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_DOWN = 0x0, MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_UP = 0x1, MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_FOLLOW = 0x2, }; struct mlx5_ifc_modify_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x18]; u8 admin_state[0x4]; u8 reserved_4[0x4]; }; struct mlx5_ifc_modify_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_tis_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1d]; u8 lag_tx_port_affinity[0x1]; u8 strict_lag_tx_port_affinity[0x1]; u8 prio[0x1]; }; struct mlx5_ifc_modify_tis_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tisn[0x18]; u8 reserved_3[0x20]; struct mlx5_ifc_modify_tis_bitmask_bits bitmask; u8 reserved_4[0x40]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_modify_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_SQ_BITMASK_PACKET_PACING_RATE_LIMIT_INDEX = 0x1 << 0, MLX5_MODIFY_SQ_BITMASK_QOS_PARA_VPORT_NUMBER = 0x1 << 1 }; struct mlx5_ifc_modify_tir_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tirn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_modify_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_sq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 sq_state[0x4]; u8 reserved_2[0x4]; u8 sqn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_modify_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; enum { MLX5_MODIFY_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; enum { MLX5_MODIFY_SCHEDULING_ELEMENT_BITMASK_BW_SHARE = 0x1, MLX5_MODIFY_SCHEDULING_ELEMENT_BITMASK_MAX_AVERAGE_BW = 0x2, }; struct mlx5_ifc_modify_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x20]; u8 modify_bitmask[0x20]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rqt_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1f]; u8 rqn_list[0x1]; }; struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqtn[0x18]; u8 reserved_3[0x20]; struct mlx5_ifc_rqt_bitmask_bits bitmask; u8 reserved_4[0x40]; struct mlx5_ifc_rqtc_bits ctx; }; struct mlx5_ifc_modify_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3, }; struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 rq_state[0x4]; u8 reserved_2[0x4]; u8 rqn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_modify_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rmp_bitmask_bits { u8 reserved[0x20]; u8 reserved1[0x1f]; u8 lwm[0x1]; }; struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 rmp_state[0x4]; u8 reserved_2[0x4]; u8 rmpn[0x18]; u8 reserved_3[0x20]; struct mlx5_ifc_rmp_bitmask_bits bitmask; u8 reserved_4[0x40]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_modify_nic_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_nic_vport_field_select_bits { u8 reserved_0[0x14]; u8 disable_uc_local_lb[0x1]; u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; u8 port_guid[0x1]; u8 min_wqe_inline_mode[0x1]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; u8 permanent_address[0x1]; u8 addresses_list[0x1]; u8 roce_en[0x1]; u8 reserved_1[0x1]; }; struct mlx5_ifc_modify_nic_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; u8 reserved_3[0x780]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_modify_hca_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_grh_bits { u8 ip_version[4]; u8 traffic_class[8]; u8 flow_label[20]; u8 payload_length[16]; u8 next_header[8]; u8 hop_limit[8]; u8 sgid[128]; u8 dgid[128]; }; struct mlx5_ifc_bth_bits { u8 opcode[8]; u8 se[1]; u8 migreq[1]; u8 pad_count[2]; u8 tver[4]; u8 p_key[16]; u8 reserved8[8]; u8 dest_qp[24]; u8 ack_req[1]; u8 reserved7[7]; u8 psn[24]; }; struct mlx5_ifc_aeth_bits { u8 syndrome[8]; u8 msn[24]; }; struct mlx5_ifc_dceth_bits { u8 reserved0[8]; u8 session_id[24]; u8 reserved1[8]; u8 dci_dct[24]; }; struct mlx5_ifc_modify_hca_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x20]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_modify_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_MODIFY_FLOW_TABLE_SELECT_MISS_ACTION_AND_ID = 0x1, MLX5_MODIFY_FLOW_TABLE_SELECT_LAG_MASTER_NEXT_TABLE_ID = 0x8000, }; struct mlx5_ifc_modify_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_modify_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_esw_vport_context_fields_select_bits { u8 reserved[0x1c]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_strip[0x1]; }; struct mlx5_ifc_modify_esw_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_modify_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, }; struct mlx5_ifc_modify_cq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 cqn[0x18]; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_at_280[0x60]; u8 cq_umem_valid[0x1]; u8 reserved_at_2e1[0x1f]; u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_cong_status_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 enable[0x1]; u8 tag_enable[0x1]; u8 reserved_3[0x1e]; }; struct mlx5_ifc_modify_cong_params_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_cong_params_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x1c]; u8 cong_protocol[0x4]; union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; u8 reserved_3[0x80]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_manage_pages_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 output_num_entries[0x20]; u8 reserved_1[0x20]; u8 pas[0][0x40]; }; enum { MLX5_PAGES_CANT_GIVE = 0x0, MLX5_PAGES_GIVE = 0x1, MLX5_PAGES_TAKE = 0x2, }; struct mlx5_ifc_manage_pages_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 input_num_entries[0x20]; u8 pas[0][0x40]; }; struct mlx5_ifc_mad_ifc_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 response_mad_packet[256][0x8]; }; struct mlx5_ifc_mad_ifc_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 remote_lid[0x10]; u8 reserved_2[0x8]; u8 port[0x8]; u8 reserved_3[0x20]; u8 mad[256][0x8]; }; struct mlx5_ifc_init_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_INIT_HCA_IN_OP_MOD_INIT = 0x0, MLX5_INIT_HCA_IN_OP_MOD_PRE_INIT = 0x1, }; struct mlx5_ifc_init_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_init2rtr_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_init2rtr_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_init2init_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_init2init_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_get_dropped_packet_log_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 packet_headers_log[128][0x8]; u8 packet_syndrome[64][0x8]; }; struct mlx5_ifc_get_dropped_packet_log_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_encryption_key_obj_bits { u8 modify_field_select[0x40]; u8 reserved_at_40[0x14]; u8 key_size[0x4]; u8 reserved_at_58[0x4]; u8 key_type[0x4]; u8 reserved_at_60[0x8]; u8 pd[0x18]; u8 reserved_at_80[0x180]; u8 key[8][0x20]; u8 reserved_at_300[0x500]; }; struct mlx5_ifc_gen_eqe_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 eq_number[0x8]; u8 reserved_3[0x20]; u8 eqe[64][0x8]; }; struct mlx5_ifc_gen_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_enable_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; }; struct mlx5_ifc_enable_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_drain_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_drain_dct_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_disable_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; }; struct mlx5_ifc_disable_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_detach_from_mcg_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_detach_from_mcg_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_destroy_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrc_srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_tis_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tisn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_tir_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tirn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_sq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 sqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; enum { MLX5_DESTROY_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_destroy_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_rqt_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqtn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rmpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_qos_para_vport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; struct mlx5_ifc_destroy_qos_para_vport_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 qos_para_vport_number[0x10]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_destroy_psv_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_psv_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 psvn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_mkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 mkey_index[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_flow_table_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x140]; }; struct mlx5_ifc_destroy_flow_group_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; u8 reserved_6[0x120]; }; struct mlx5_ifc_destroy_encryption_key_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_encryption_key_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_eq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 eq_number[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_dct_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_cq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 cqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_delete_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_delete_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x8]; u8 table_index[0x18]; u8 reserved_4[0x140]; }; struct mlx5_ifc_delete_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_delete_fte_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x40]; u8 flow_index[0x20]; u8 reserved_7[0xe0]; }; struct mlx5_ifc_dealloc_xrcd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_xrcd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrcd[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_uar_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 uar[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_transport_domain_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_transport_domain_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 transport_domain[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_q_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_counter_id_bits { u8 reserved[0x10]; u8 counter_id[0x10]; }; struct mlx5_ifc_diagnostic_params_context_bits { u8 num_of_counters[0x10]; u8 reserved_2[0x8]; u8 log_num_of_samples[0x8]; u8 single[0x1]; u8 repetitive[0x1]; u8 sync[0x1]; u8 clear[0x1]; u8 on_demand[0x1]; u8 enable[0x1]; u8 reserved_3[0x12]; u8 log_sample_period[0x8]; u8 reserved_4[0x80]; struct mlx5_ifc_counter_id_bits counter_id[0]; }; struct mlx5_ifc_query_diagnostic_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_diagnostic_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; struct mlx5_ifc_diagnostic_params_context_bits diagnostic_params_ctx; }; struct mlx5_ifc_set_diagnostic_params_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; struct mlx5_ifc_diagnostic_params_context_bits diagnostic_params_ctx; }; struct mlx5_ifc_set_diagnostic_params_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_query_diagnostic_counters_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 num_of_samples[0x10]; u8 sample_index[0x10]; u8 reserved_2[0x20]; }; struct mlx5_ifc_diagnostic_counter_bits { u8 counter_id[0x10]; u8 sample_id[0x10]; u8 time_stamp_31_0[0x20]; u8 counter_value_h[0x20]; u8 counter_value_l[0x20]; }; struct mlx5_ifc_query_diagnostic_counters_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_diagnostic_counter_bits diag_counter[0]; }; struct mlx5_ifc_dealloc_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 counter_set_id[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_pd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_pd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 pd[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_flow_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; - u8 flow_counter_id[0x10]; + u8 flow_counter_id[0x20]; u8 reserved_3[0x20]; }; struct mlx5_ifc_create_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_xrq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_xrqc_bits xrq_context; }; struct mlx5_ifc_deactivate_tracer_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_deactivate_tracer_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 mkey[0x20]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 xrc_srqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; u8 reserved_at_280[0x60]; u8 xrc_srq_umem_valid[0x1]; u8 reserved_at_2e1[0x1f]; u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 tisn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_tis_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_create_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 tirn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_tir_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_create_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 srqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; u8 reserved_3[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 sqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_sq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_create_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 scheduling_element_id[0x20]; u8 reserved_at_a0[0x160]; }; enum { MLX5_CREATE_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_create_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 reserved_at_60[0xa0]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 rqtn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_rqt_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_create_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 rqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_create_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 rmpn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_rmp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_create_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 qpn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 input_qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x60]; u8 wq_umem_valid[0x1]; u8 reserved_at_861[0x1f]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_qos_para_vport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 qos_para_vport_number[0x10]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_create_qos_para_vport_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x1c0]; }; struct mlx5_ifc_create_psv_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 reserved_2[0x8]; u8 psv0_index[0x18]; u8 reserved_3[0x8]; u8 psv1_index[0x18]; u8 reserved_4[0x8]; u8 psv2_index[0x18]; u8 reserved_5[0x8]; u8 psv3_index[0x18]; }; struct mlx5_ifc_create_psv_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 num_psv[0x4]; u8 reserved_2[0x4]; u8 pd[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_create_mkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 mkey_index[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 pg_access[0x1]; u8 mkey_umem_valid[0x1]; u8 reserved_at_62[0x1e]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; u8 reserved_4[0x80]; u8 translations_octword_actual_size[0x20]; u8 reserved_5[0x560]; u8 klm_pas_mtt[0][0x20]; }; struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 table_id[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_create_flow_group_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 group_id[0x18]; u8 reserved_2[0x20]; }; enum { MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, }; struct mlx5_ifc_create_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x20]; u8 start_flow_index[0x20]; u8 reserved_7[0x20]; u8 end_flow_index[0x20]; u8 reserved_8[0xa0]; u8 reserved_9[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; u8 reserved_10[0xe00]; }; struct mlx5_ifc_create_encryption_key_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 obj_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_encryption_key_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 obj_type[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; }; struct mlx5_ifc_create_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x18]; u8 eq_number[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_eq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; u8 reserved_3[0x40]; u8 event_bitmask[0x40]; u8 reserved_4[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 dctn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_dct_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; u8 reserved_3[0x180]; }; struct mlx5_ifc_create_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 cqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_cq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_at_280[0x60]; u8 cq_umem_valid[0x1]; u8 reserved_at_2e1[0x59f]; u8 pas[0][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; u8 reserved_2[0x20]; }; enum { MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, }; struct mlx5_ifc_config_int_moderation_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_attach_to_mcg_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_attach_to_mcg_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_arm_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_arm_xrq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, }; struct mlx5_ifc_arm_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrc_srqn[0x18]; u8 reserved_3[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, }; struct mlx5_ifc_arm_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 srq_number[0x18]; u8 reserved_3[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_arm_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_alloc_xrcd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 xrcd[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_xrcd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_uar_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 uar[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_transport_domain_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 transport_domain[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_transport_domain_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_q_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x18]; u8 counter_set_id[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_q_counter_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_pd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 pd[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_pd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_flow_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 flow_counter_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x38]; u8 flow_counter_bulk[0x8]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_activate_tracer_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_activate_tracer_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 mkey[0x20]; u8 reserved_2[0x20]; }; struct mlx5_ifc_set_rate_limit_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_rate_limit_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 rate_limit_index[0x10]; u8 reserved_at_60[0x20]; u8 rate_limit[0x20]; u8 burst_upper_bound[0x20]; u8 reserved_at_c0[0x10]; u8 typical_packet_size[0x10]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_access_register_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 register_data[0][0x20]; }; enum { MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, }; struct mlx5_ifc_access_register_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 register_id[0x10]; u8 argument[0x20]; u8 register_data[0][0x20]; }; struct mlx5_ifc_sltp_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x2]; u8 lane[0x4]; u8 reserved_1[0x8]; u8 reserved_2[0x20]; u8 reserved_3[0x7]; u8 polarity[0x1]; u8 ob_tap0[0x8]; u8 ob_tap1[0x8]; u8 ob_tap2[0x8]; u8 reserved_4[0xc]; u8 ob_preemp_mode[0x4]; u8 ob_reg[0x8]; u8 ob_bias[0x8]; u8 reserved_5[0x20]; }; struct mlx5_ifc_slrp_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x2]; u8 lane[0x4]; u8 reserved_1[0x8]; u8 ib_sel[0x2]; u8 reserved_2[0x11]; u8 dp_sel[0x1]; u8 dp90sel[0x4]; u8 mix90phase[0x8]; u8 ffe_tap0[0x8]; u8 ffe_tap1[0x8]; u8 ffe_tap2[0x8]; u8 ffe_tap3[0x8]; u8 ffe_tap4[0x8]; u8 ffe_tap5[0x8]; u8 ffe_tap6[0x8]; u8 ffe_tap7[0x8]; u8 ffe_tap8[0x8]; u8 mixerbias_tap_amp[0x8]; u8 reserved_3[0x7]; u8 ffe_tap_en[0x9]; u8 ffe_tap_offset0[0x8]; u8 ffe_tap_offset1[0x8]; u8 slicer_offset0[0x10]; u8 mixer_offset0[0x10]; u8 mixer_offset1[0x10]; u8 mixerbgn_inp[0x8]; u8 mixerbgn_inn[0x8]; u8 mixerbgn_refp[0x8]; u8 mixerbgn_refn[0x8]; u8 sel_slicer_lctrl_h[0x1]; u8 sel_slicer_lctrl_l[0x1]; u8 reserved_4[0x1]; u8 ref_mixer_vreg[0x5]; u8 slicer_gctrl[0x8]; u8 lctrl_input[0x8]; u8 mixer_offset_cm1[0x8]; u8 common_mode[0x6]; u8 reserved_5[0x1]; u8 mixer_offset_cm0[0x9]; u8 reserved_6[0x7]; u8 slicer_offset_cm[0x9]; }; struct mlx5_ifc_slrg_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x2]; u8 lane[0x4]; u8 reserved_1[0x8]; u8 time_to_link_up[0x10]; u8 reserved_2[0xc]; u8 grade_lane_speed[0x4]; u8 grade_version[0x8]; u8 grade[0x18]; u8 reserved_3[0x4]; u8 height_grade_type[0x4]; u8 height_grade[0x18]; u8 height_dz[0x10]; u8 height_dv[0x10]; u8 reserved_4[0x10]; u8 height_sigma[0x10]; u8 reserved_5[0x20]; u8 reserved_6[0x4]; u8 phase_grade_type[0x4]; u8 phase_grade[0x18]; u8 reserved_7[0x8]; u8 phase_eo_pos[0x8]; u8 reserved_8[0x8]; u8 phase_eo_neg[0x8]; u8 ffe_set_tested[0x10]; u8 test_errors_per_lane[0x10]; }; struct mlx5_ifc_pvlc_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x1c]; u8 vl_hw_cap[0x4]; u8 reserved_3[0x1c]; u8 vl_admin[0x4]; u8 reserved_4[0x1c]; u8 vl_operational[0x4]; }; struct mlx5_ifc_pude_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 reserved_0[0x4]; u8 admin_status[0x4]; u8 reserved_1[0x4]; u8 oper_status[0x4]; u8 reserved_2[0x60]; }; enum { MLX5_PTYS_REG_PROTO_MASK_INFINIBAND = 0x1, MLX5_PTYS_REG_PROTO_MASK_ETHERNET = 0x4, }; struct mlx5_ifc_ptys_reg_bits { u8 reserved_0[0x1]; u8 an_disable_admin[0x1]; u8 an_disable_cap[0x1]; u8 reserved_1[0x4]; u8 force_tx_aba_param[0x1]; u8 local_port[0x8]; u8 reserved_2[0xd]; u8 proto_mask[0x3]; u8 an_status[0x4]; u8 reserved_3[0xc]; u8 data_rate_oper[0x10]; u8 ext_eth_proto_capability[0x20]; u8 eth_proto_capability[0x20]; u8 ib_link_width_capability[0x10]; u8 ib_proto_capability[0x10]; u8 ext_eth_proto_admin[0x20]; u8 eth_proto_admin[0x20]; u8 ib_link_width_admin[0x10]; u8 ib_proto_admin[0x10]; u8 ext_eth_proto_oper[0x20]; u8 eth_proto_oper[0x20]; u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; u8 reserved_4[0x1c]; u8 connector_type[0x4]; u8 eth_proto_lp_advertise[0x20]; u8 reserved_5[0x60]; }; struct mlx5_ifc_ptas_reg_bits { u8 reserved_0[0x20]; u8 algorithm_options[0x10]; u8 reserved_1[0x4]; u8 repetitions_mode[0x4]; u8 num_of_repetitions[0x8]; u8 grade_version[0x8]; u8 height_grade_type[0x4]; u8 phase_grade_type[0x4]; u8 height_grade_weight[0x8]; u8 phase_grade_weight[0x8]; u8 gisim_measure_bits[0x10]; u8 adaptive_tap_measure_bits[0x10]; u8 ber_bath_high_error_threshold[0x10]; u8 ber_bath_mid_error_threshold[0x10]; u8 ber_bath_low_error_threshold[0x10]; u8 one_ratio_high_threshold[0x10]; u8 one_ratio_high_mid_threshold[0x10]; u8 one_ratio_low_mid_threshold[0x10]; u8 one_ratio_low_threshold[0x10]; u8 ndeo_error_threshold[0x10]; u8 mixer_offset_step_size[0x10]; u8 reserved_2[0x8]; u8 mix90_phase_for_voltage_bath[0x8]; u8 mixer_offset_start[0x10]; u8 mixer_offset_end[0x10]; u8 reserved_3[0x15]; u8 ber_test_time[0xb]; }; struct mlx5_ifc_pspa_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 sub_port[0x8]; u8 reserved_0[0x8]; u8 reserved_1[0x20]; }; struct mlx5_ifc_ppsc_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x1c]; u8 wrps_admin[0x4]; u8 reserved_4[0x1c]; u8 wrps_status[0x4]; u8 up_th_vld[0x1]; u8 down_th_vld[0x1]; u8 reserved_5[0x6]; u8 up_threshold[0x8]; u8 reserved_6[0x8]; u8 down_threshold[0x8]; u8 reserved_7[0x20]; u8 reserved_8[0x1c]; u8 srps_admin[0x4]; u8 reserved_9[0x60]; }; struct mlx5_ifc_pplr_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x8]; u8 lb_cap[0x8]; u8 reserved_3[0x8]; u8 lb_en[0x8]; }; struct mlx5_ifc_pplm_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x20]; u8 port_profile_mode[0x8]; u8 static_port_profile[0x8]; u8 active_port_profile[0x8]; u8 reserved_at_58[0x8]; u8 retransmission_active[0x8]; u8 fec_mode_active[0x18]; u8 rs_fec_correction_bypass_cap[0x4]; u8 reserved_at_84[0x8]; u8 fec_override_cap_56g[0x4]; u8 fec_override_cap_100g[0x4]; u8 fec_override_cap_50g[0x4]; u8 fec_override_cap_25g[0x4]; u8 fec_override_cap_10g_40g[0x4]; u8 rs_fec_correction_bypass_admin[0x4]; u8 reserved_at_a4[0x8]; u8 fec_override_admin_56g[0x4]; u8 fec_override_admin_100g[0x4]; u8 fec_override_admin_50g[0x4]; u8 fec_override_admin_25g[0x4]; u8 fec_override_admin_10g_40g[0x4]; u8 fec_override_cap_400g_8x[0x10]; u8 fec_override_cap_200g_4x[0x10]; u8 fec_override_cap_100g_2x[0x10]; u8 fec_override_cap_50g_1x[0x10]; u8 fec_override_admin_400g_8x[0x10]; u8 fec_override_admin_200g_4x[0x10]; u8 fec_override_admin_100g_2x[0x10]; u8 fec_override_admin_50g_1x[0x10]; u8 reserved_at_140[0x140]; }; struct mlx5_ifc_ppll_reg_bits { u8 num_pll_groups[0x8]; u8 pll_group[0x8]; u8 reserved_0[0x4]; u8 num_plls[0x4]; u8 reserved_1[0x8]; u8 reserved_2[0x1f]; u8 ae[0x1]; u8 pll_status[4][0x40]; }; struct mlx5_ifc_ppad_reg_bits { u8 reserved_0[0x3]; u8 single_mac[0x1]; u8 reserved_1[0x4]; u8 local_port[0x8]; u8 mac_47_32[0x10]; u8 mac_31_0[0x20]; u8 reserved_2[0x40]; }; struct mlx5_ifc_pmtu_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 max_mtu[0x10]; u8 reserved_2[0x10]; u8 admin_mtu[0x10]; u8 reserved_3[0x10]; u8 oper_mtu[0x10]; u8 reserved_4[0x10]; }; struct mlx5_ifc_pmpr_reg_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x18]; u8 attenuation_5g[0x8]; u8 reserved_3[0x18]; u8 attenuation_7g[0x8]; u8 reserved_4[0x18]; u8 attenuation_12g[0x8]; }; struct mlx5_ifc_pmpe_reg_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0xc]; u8 module_status[0x4]; u8 reserved_2[0x14]; u8 error_type[0x4]; u8 reserved_3[0x8]; u8 reserved_4[0x40]; }; struct mlx5_ifc_pmpc_reg_bits { u8 module_state_updated[32][0x8]; }; struct mlx5_ifc_pmlpn_reg_bits { u8 reserved_0[0x4]; u8 mlpn_status[0x4]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 e[0x1]; u8 reserved_2[0x1f]; }; struct mlx5_ifc_pmlp_reg_bits { u8 rxtx[0x1]; u8 reserved_0[0x7]; u8 local_port[0x8]; u8 reserved_1[0x8]; u8 width[0x8]; u8 lane0_module_mapping[0x20]; u8 lane1_module_mapping[0x20]; u8 lane2_module_mapping[0x20]; u8 lane3_module_mapping[0x20]; u8 reserved_2[0x160]; }; struct mlx5_ifc_pmaos_reg_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0x4]; u8 admin_status[0x4]; u8 reserved_2[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; u8 reserved_3[0x12]; u8 error_type[0x4]; u8 reserved_4[0x6]; u8 e[0x2]; u8 reserved_5[0x40]; }; struct mlx5_ifc_plpc_reg_bits { u8 reserved_0[0x4]; u8 profile_id[0xc]; u8 reserved_1[0x4]; u8 proto_mask[0x4]; u8 reserved_2[0x8]; u8 reserved_3[0x10]; u8 lane_speed[0x10]; u8 reserved_4[0x17]; u8 lpbf[0x1]; u8 fec_mode_policy[0x8]; u8 retransmission_capability[0x8]; u8 fec_mode_capability[0x18]; u8 retransmission_support_admin[0x8]; u8 fec_mode_support_admin[0x18]; u8 retransmission_request_admin[0x8]; u8 fec_mode_request_admin[0x18]; u8 reserved_5[0x80]; }; struct mlx5_ifc_pll_status_data_bits { u8 reserved_0[0x1]; u8 lock_cal[0x1]; u8 lock_status[0x2]; u8 reserved_1[0x2]; u8 algo_f_ctrl[0xa]; u8 analog_algo_num_var[0x6]; u8 f_ctrl_measure[0xa]; u8 reserved_2[0x2]; u8 analog_var[0x6]; u8 reserved_3[0x2]; u8 high_var[0x6]; u8 reserved_4[0x2]; u8 low_var[0x6]; u8 reserved_5[0x2]; u8 mid_val[0x6]; }; struct mlx5_ifc_plib_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x8]; u8 ib_port[0x8]; u8 reserved_2[0x60]; }; struct mlx5_ifc_plbf_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0xd]; u8 lbf_mode[0x3]; u8 reserved_2[0x20]; }; struct mlx5_ifc_pipg_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 dic[0x1]; u8 reserved_2[0x19]; u8 ipg[0x4]; u8 reserved_3[0x2]; }; struct mlx5_ifc_pifr_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0xe0]; u8 port_filter[8][0x20]; u8 port_filter_update_en[8][0x20]; }; struct mlx5_ifc_phys_layer_cntrs_bits { u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 symbol_errors_high[0x20]; u8 symbol_errors_low[0x20]; u8 sync_headers_errors_high[0x20]; u8 sync_headers_errors_low[0x20]; u8 edpl_bip_errors_lane0_high[0x20]; u8 edpl_bip_errors_lane0_low[0x20]; u8 edpl_bip_errors_lane1_high[0x20]; u8 edpl_bip_errors_lane1_low[0x20]; u8 edpl_bip_errors_lane2_high[0x20]; u8 edpl_bip_errors_lane2_low[0x20]; u8 edpl_bip_errors_lane3_high[0x20]; u8 edpl_bip_errors_lane3_low[0x20]; u8 fc_fec_corrected_blocks_lane0_high[0x20]; u8 fc_fec_corrected_blocks_lane0_low[0x20]; u8 fc_fec_corrected_blocks_lane1_high[0x20]; u8 fc_fec_corrected_blocks_lane1_low[0x20]; u8 fc_fec_corrected_blocks_lane2_high[0x20]; u8 fc_fec_corrected_blocks_lane2_low[0x20]; u8 fc_fec_corrected_blocks_lane3_high[0x20]; u8 fc_fec_corrected_blocks_lane3_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; u8 rs_fec_corrected_blocks_high[0x20]; u8 rs_fec_corrected_blocks_low[0x20]; u8 rs_fec_uncorrectable_blocks_high[0x20]; u8 rs_fec_uncorrectable_blocks_low[0x20]; u8 rs_fec_no_errors_blocks_high[0x20]; u8 rs_fec_no_errors_blocks_low[0x20]; u8 rs_fec_single_error_blocks_high[0x20]; u8 rs_fec_single_error_blocks_low[0x20]; u8 rs_fec_corrected_symbols_total_high[0x20]; u8 rs_fec_corrected_symbols_total_low[0x20]; u8 rs_fec_corrected_symbols_lane0_high[0x20]; u8 rs_fec_corrected_symbols_lane0_low[0x20]; u8 rs_fec_corrected_symbols_lane1_high[0x20]; u8 rs_fec_corrected_symbols_lane1_low[0x20]; u8 rs_fec_corrected_symbols_lane2_high[0x20]; u8 rs_fec_corrected_symbols_lane2_low[0x20]; u8 rs_fec_corrected_symbols_lane3_high[0x20]; u8 rs_fec_corrected_symbols_lane3_low[0x20]; u8 link_down_events[0x20]; u8 successful_recovery_events[0x20]; u8 reserved_0[0x180]; }; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_at_70[0x8]; u8 link_overrun_errors[0x8]; u8 reserved_at_80[0x10]; u8 vl_15_dropped[0x10]; u8 reserved_at_a0[0xa0]; }; struct mlx5_ifc_phys_layer_statistical_cntrs_bits { u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 phy_received_bits_high[0x20]; u8 phy_received_bits_low[0x20]; u8 phy_symbol_errors_high[0x20]; u8 phy_symbol_errors_low[0x20]; u8 phy_corrected_bits_high[0x20]; u8 phy_corrected_bits_low[0x20]; u8 phy_corrected_bits_lane0_high[0x20]; u8 phy_corrected_bits_lane0_low[0x20]; u8 phy_corrected_bits_lane1_high[0x20]; u8 phy_corrected_bits_lane1_low[0x20]; u8 phy_corrected_bits_lane2_high[0x20]; u8 phy_corrected_bits_lane2_low[0x20]; u8 phy_corrected_bits_lane3_high[0x20]; u8 phy_corrected_bits_lane3_low[0x20]; u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_infiniband_port_cntrs_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_0[0x8]; u8 local_link_integrity_errors[0x4]; u8 excessive_buffer_overrun_errors[0x4]; u8 reserved_1[0x10]; u8 vl_15_dropped[0x10]; u8 port_xmit_data[0x20]; u8 port_rcv_data[0x20]; u8 port_xmit_pkts[0x20]; u8 port_rcv_pkts[0x20]; u8 port_xmit_wait[0x20]; u8 reserved_2[0x680]; }; struct mlx5_ifc_phrr_reg_bits { u8 clr[0x1]; u8 reserved_0[0x7]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 hist_group[0x8]; u8 reserved_2[0x10]; u8 hist_id[0x8]; u8 reserved_3[0x40]; u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 bin[10][0x20]; }; struct mlx5_ifc_phbr_for_prio_reg_bits { u8 reserved_0[0x18]; u8 prio[0x8]; }; struct mlx5_ifc_phbr_for_port_tclass_reg_bits { u8 reserved_0[0x18]; u8 tclass[0x8]; }; struct mlx5_ifc_phbr_binding_reg_bits { u8 opcode[0x4]; u8 reserved_0[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_1[0xe]; u8 hist_group[0x8]; u8 reserved_2[0x10]; u8 hist_id[0x8]; u8 reserved_3[0x10]; u8 hist_type[0x10]; u8 hist_parameters[0x20]; u8 hist_min_value[0x20]; u8 hist_max_value[0x20]; u8 sample_time[0x20]; }; enum { MLX5_PFCC_REG_PPAN_DISABLED = 0x0, MLX5_PFCC_REG_PPAN_ENABLED = 0x1, }; struct mlx5_ifc_pfcc_reg_bits { u8 dcbx_operation_type[0x2]; u8 cap_local_admin[0x1]; u8 cap_remote_admin[0x1]; u8 reserved_0[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_1[0xc]; u8 shl_cap[0x1]; u8 shl_opr[0x1]; u8 ppan[0x4]; u8 reserved_2[0x4]; u8 prio_mask_tx[0x8]; u8 reserved_3[0x8]; u8 prio_mask_rx[0x8]; u8 pptx[0x1]; u8 aptx[0x1]; u8 reserved_4[0x6]; u8 pfctx[0x8]; u8 reserved_5[0x8]; u8 cbftx[0x8]; u8 pprx[0x1]; u8 aprx[0x1]; u8 reserved_6[0x6]; u8 pfcrx[0x8]; u8 reserved_7[0x8]; u8 cbfrx[0x8]; u8 device_stall_minor_watermark[0x10]; u8 device_stall_critical_watermark[0x10]; u8 reserved_8[0x60]; }; struct mlx5_ifc_pelc_reg_bits { u8 op[0x4]; u8 reserved_0[0x4]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 op_admin[0x8]; u8 op_capability[0x8]; u8 op_request[0x8]; u8 op_active[0x8]; u8 admin[0x40]; u8 capability[0x40]; u8 request[0x40]; u8 active[0x40]; u8 reserved_2[0x80]; }; struct mlx5_ifc_peir_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0xc]; u8 error_count[0x4]; u8 reserved_3[0x10]; u8 reserved_4[0xc]; u8 lane[0x4]; u8 reserved_5[0x8]; u8 error_type[0x8]; }; struct mlx5_ifc_qcam_access_reg_cap_mask { u8 qcam_access_reg_cap_mask_127_to_20[0x6C]; u8 qpdpm[0x1]; u8 qcam_access_reg_cap_mask_18_to_4[0x0F]; u8 qdpm[0x1]; u8 qpts[0x1]; u8 qcap[0x1]; u8 qcam_access_reg_cap_mask_0[0x1]; }; struct mlx5_ifc_qcam_qos_feature_cap_mask { u8 qcam_qos_feature_cap_mask_127_to_1[0x7F]; u8 qpts_trust_both[0x1]; }; struct mlx5_ifc_qcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap; u8 reserved_at_0[0x80]; } qos_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap; u8 reserved_at_0[0x80]; } qos_feature_cap_mask; u8 reserved_at_1c0[0x80]; }; struct mlx5_ifc_pcam_enhanced_features_bits { u8 reserved_at_0[0x6d]; u8 rx_icrc_encapsulated_counter[0x1]; u8 reserved_at_6e[0x4]; u8 ptys_extended_ethernet[0x1]; u8 reserved_at_73[0x3]; u8 pfcc_mask[0x1]; u8 reserved_at_77[0x3]; u8 per_lane_error_counters[0x1]; u8 rx_buffer_fullness_counters[0x1]; u8 ptys_connector_type[0x1]; u8 reserved_at_7d[0x1]; u8 ppcnt_discard_group[0x1]; u8 ppcnt_statistical_group[0x1]; }; struct mlx5_ifc_pcam_regs_5000_to_507f_bits { u8 port_access_reg_cap_mask_127_to_96[0x20]; u8 port_access_reg_cap_mask_95_to_64[0x20]; u8 reserved_at_40[0xe]; u8 pddr[0x1]; u8 reserved_at_4f[0xd]; u8 pplm[0x1]; u8 port_access_reg_cap_mask_34_to_32[0x3]; u8 port_access_reg_cap_mask_31_to_13[0x13]; u8 pbmc[0x1]; u8 pptb[0x1]; u8 port_access_reg_cap_mask_10_to_09[0x2]; u8 ppcnt[0x1]; u8 port_access_reg_cap_mask_07_to_00[0x8]; }; struct mlx5_ifc_pcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f; u8 reserved_at_0[0x80]; } port_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features; u8 reserved_at_0[0x80]; } feature_cap_mask; u8 reserved_at_1c0[0xc0]; }; struct mlx5_ifc_mcam_enhanced_features_bits { u8 reserved_at_0[0x6e]; u8 pcie_status_and_power[0x1]; u8 reserved_at_111[0x10]; u8 pcie_performance_group[0x1]; }; struct mlx5_ifc_mcam_access_reg_bits { u8 reserved_at_0[0x1c]; u8 mcda[0x1]; u8 mcc[0x1]; u8 mcqi[0x1]; u8 reserved_at_1f[0x1]; u8 regs_95_to_64[0x20]; u8 regs_63_to_32[0x20]; u8 regs_31_to_0[0x20]; }; struct mlx5_ifc_mcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_mcam_access_reg_bits access_regs; u8 reserved_at_0[0x80]; } mng_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features; u8 reserved_at_0[0x80]; } mng_feature_cap_mask; u8 reserved_at_1c0[0x80]; }; struct mlx5_ifc_pcap_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 port_capability_mask[4][0x20]; }; struct mlx5_ifc_pbmc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 xoff_timer_value[0x10]; u8 xoff_refresh[0x10]; u8 reserved_at_40[0x9]; u8 fullness_threshold[0x7]; u8 port_buffer_size[0x10]; struct mlx5_ifc_bufferx_reg_bits buffer[10]; u8 reserved_at_2e0[0x80]; }; struct mlx5_ifc_paos_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 reserved_0[0x4]; u8 admin_status[0x4]; u8 reserved_1[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; u8 reserved_2[0x1c]; u8 e[0x2]; u8 reserved_3[0x40]; }; struct mlx5_ifc_pamp_reg_bits { u8 reserved_0[0x8]; u8 opamp_group[0x8]; u8 reserved_1[0xc]; u8 opamp_group_type[0x4]; u8 start_index[0x10]; u8 reserved_2[0x4]; u8 num_of_indices[0xc]; u8 index_data[18][0x10]; }; struct mlx5_ifc_link_level_retrans_cntr_grp_date_bits { u8 llr_rx_cells_high[0x20]; u8 llr_rx_cells_low[0x20]; u8 llr_rx_error_high[0x20]; u8 llr_rx_error_low[0x20]; u8 llr_rx_crc_error_high[0x20]; u8 llr_rx_crc_error_low[0x20]; u8 llr_tx_cells_high[0x20]; u8 llr_tx_cells_low[0x20]; u8 llr_tx_ret_cells_high[0x20]; u8 llr_tx_ret_cells_low[0x20]; u8 llr_tx_ret_events_high[0x20]; u8 llr_tx_ret_events_low[0x20]; u8 reserved_0[0x640]; }; struct mlx5_ifc_mtmp_reg_bits { u8 i[0x1]; u8 reserved_at_1[0x18]; u8 sensor_index[0x7]; u8 reserved_at_20[0x10]; u8 temperature[0x10]; u8 mte[0x1]; u8 mtr[0x1]; u8 reserved_at_42[0x0e]; u8 max_temperature[0x10]; u8 tee[0x2]; u8 reserved_at_62[0x0e]; u8 temperature_threshold_hi[0x10]; u8 reserved_at_80[0x10]; u8 temperature_threshold_lo[0x10]; u8 reserved_at_100[0x20]; u8 sensor_name[0x40]; }; struct mlx5_ifc_lane_2_module_mapping_bits { u8 reserved_0[0x6]; u8 rx_lane[0x2]; u8 reserved_1[0x6]; u8 tx_lane[0x2]; u8 reserved_2[0x8]; u8 module[0x8]; }; struct mlx5_ifc_eth_per_traffic_class_layout_bits { u8 transmit_queue_high[0x20]; u8 transmit_queue_low[0x20]; u8 reserved_0[0x780]; }; struct mlx5_ifc_eth_per_traffic_class_cong_layout_bits { u8 no_buffer_discard_uc_high[0x20]; u8 no_buffer_discard_uc_low[0x20]; u8 wred_discard_high[0x20]; u8 wred_discard_low[0x20]; u8 reserved_0[0x740]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_octets_high[0x20]; u8 rx_octets_low[0x20]; u8 reserved_0[0xc0]; u8 rx_frames_high[0x20]; u8 rx_frames_low[0x20]; u8 tx_octets_high[0x20]; u8 tx_octets_low[0x20]; u8 reserved_1[0xc0]; u8 tx_frames_high[0x20]; u8 tx_frames_low[0x20]; u8 rx_pause_high[0x20]; u8 rx_pause_low[0x20]; u8 rx_pause_duration_high[0x20]; u8 rx_pause_duration_low[0x20]; u8 tx_pause_high[0x20]; u8 tx_pause_low[0x20]; u8 tx_pause_duration_high[0x20]; u8 tx_pause_duration_low[0x20]; u8 rx_pause_transition_high[0x20]; u8 rx_pause_transition_low[0x20]; u8 rx_discards_high[0x20]; u8 rx_discards_low[0x20]; u8 device_stall_minor_watermark_cnt_high[0x20]; u8 device_stall_minor_watermark_cnt_low[0x20]; u8 device_stall_critical_watermark_cnt_high[0x20]; u8 device_stall_critical_watermark_cnt_low[0x20]; u8 reserved_2[0x340]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 port_transmit_wait_high[0x20]; u8 port_transmit_wait_low[0x20]; u8 ecn_marked_high[0x20]; u8 ecn_marked_low[0x20]; u8 no_buffer_discard_mc_high[0x20]; u8 no_buffer_discard_mc_low[0x20]; u8 rx_ebp_high[0x20]; u8 rx_ebp_low[0x20]; u8 tx_ebp_high[0x20]; u8 tx_ebp_low[0x20]; u8 rx_buffer_almost_full_high[0x20]; u8 rx_buffer_almost_full_low[0x20]; u8 rx_buffer_full_high[0x20]; u8 rx_buffer_full_low[0x20]; u8 rx_icrc_encapsulated_high[0x20]; u8 rx_icrc_encapsulated_low[0x20]; u8 reserved_0[0x80]; u8 tx_stats_pkts64octets_high[0x20]; u8 tx_stats_pkts64octets_low[0x20]; u8 tx_stats_pkts65to127octets_high[0x20]; u8 tx_stats_pkts65to127octets_low[0x20]; u8 tx_stats_pkts128to255octets_high[0x20]; u8 tx_stats_pkts128to255octets_low[0x20]; u8 tx_stats_pkts256to511octets_high[0x20]; u8 tx_stats_pkts256to511octets_low[0x20]; u8 tx_stats_pkts512to1023octets_high[0x20]; u8 tx_stats_pkts512to1023octets_low[0x20]; u8 tx_stats_pkts1024to1518octets_high[0x20]; u8 tx_stats_pkts1024to1518octets_low[0x20]; u8 tx_stats_pkts1519to2047octets_high[0x20]; u8 tx_stats_pkts1519to2047octets_low[0x20]; u8 tx_stats_pkts2048to4095octets_high[0x20]; u8 tx_stats_pkts2048to4095octets_low[0x20]; u8 tx_stats_pkts4096to8191octets_high[0x20]; u8 tx_stats_pkts4096to8191octets_low[0x20]; u8 tx_stats_pkts8192to10239octets_high[0x20]; u8 tx_stats_pkts8192to10239octets_low[0x20]; u8 reserved_1[0x2C0]; }; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 a_frames_transmitted_ok_high[0x20]; u8 a_frames_transmitted_ok_low[0x20]; u8 a_frames_received_ok_high[0x20]; u8 a_frames_received_ok_low[0x20]; u8 a_frame_check_sequence_errors_high[0x20]; u8 a_frame_check_sequence_errors_low[0x20]; u8 a_alignment_errors_high[0x20]; u8 a_alignment_errors_low[0x20]; u8 a_octets_transmitted_ok_high[0x20]; u8 a_octets_transmitted_ok_low[0x20]; u8 a_octets_received_ok_high[0x20]; u8 a_octets_received_ok_low[0x20]; u8 a_multicast_frames_xmitted_ok_high[0x20]; u8 a_multicast_frames_xmitted_ok_low[0x20]; u8 a_broadcast_frames_xmitted_ok_high[0x20]; u8 a_broadcast_frames_xmitted_ok_low[0x20]; u8 a_multicast_frames_received_ok_high[0x20]; u8 a_multicast_frames_received_ok_low[0x20]; u8 a_broadcast_frames_recieved_ok_high[0x20]; u8 a_broadcast_frames_recieved_ok_low[0x20]; u8 a_in_range_length_errors_high[0x20]; u8 a_in_range_length_errors_low[0x20]; u8 a_out_of_range_length_field_high[0x20]; u8 a_out_of_range_length_field_low[0x20]; u8 a_frame_too_long_errors_high[0x20]; u8 a_frame_too_long_errors_low[0x20]; u8 a_symbol_error_during_carrier_high[0x20]; u8 a_symbol_error_during_carrier_low[0x20]; u8 a_mac_control_frames_transmitted_high[0x20]; u8 a_mac_control_frames_transmitted_low[0x20]; u8 a_mac_control_frames_received_high[0x20]; u8 a_mac_control_frames_received_low[0x20]; u8 a_unsupported_opcodes_received_high[0x20]; u8 a_unsupported_opcodes_received_low[0x20]; u8 a_pause_mac_ctrl_frames_received_high[0x20]; u8 a_pause_mac_ctrl_frames_received_low[0x20]; u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; u8 reserved_0[0x300]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { u8 dot3stats_alignment_errors_high[0x20]; u8 dot3stats_alignment_errors_low[0x20]; u8 dot3stats_fcs_errors_high[0x20]; u8 dot3stats_fcs_errors_low[0x20]; u8 dot3stats_single_collision_frames_high[0x20]; u8 dot3stats_single_collision_frames_low[0x20]; u8 dot3stats_multiple_collision_frames_high[0x20]; u8 dot3stats_multiple_collision_frames_low[0x20]; u8 dot3stats_sqe_test_errors_high[0x20]; u8 dot3stats_sqe_test_errors_low[0x20]; u8 dot3stats_deferred_transmissions_high[0x20]; u8 dot3stats_deferred_transmissions_low[0x20]; u8 dot3stats_late_collisions_high[0x20]; u8 dot3stats_late_collisions_low[0x20]; u8 dot3stats_excessive_collisions_high[0x20]; u8 dot3stats_excessive_collisions_low[0x20]; u8 dot3stats_internal_mac_transmit_errors_high[0x20]; u8 dot3stats_internal_mac_transmit_errors_low[0x20]; u8 dot3stats_carrier_sense_errors_high[0x20]; u8 dot3stats_carrier_sense_errors_low[0x20]; u8 dot3stats_frame_too_longs_high[0x20]; u8 dot3stats_frame_too_longs_low[0x20]; u8 dot3stats_internal_mac_receive_errors_high[0x20]; u8 dot3stats_internal_mac_receive_errors_low[0x20]; u8 dot3stats_symbol_errors_high[0x20]; u8 dot3stats_symbol_errors_low[0x20]; u8 dot3control_in_unknown_opcodes_high[0x20]; u8 dot3control_in_unknown_opcodes_low[0x20]; u8 dot3in_pause_frames_high[0x20]; u8 dot3in_pause_frames_low[0x20]; u8 dot3out_pause_frames_high[0x20]; u8 dot3out_pause_frames_low[0x20]; u8 reserved_0[0x3c0]; }; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { u8 if_in_octets_high[0x20]; u8 if_in_octets_low[0x20]; u8 if_in_ucast_pkts_high[0x20]; u8 if_in_ucast_pkts_low[0x20]; u8 if_in_discards_high[0x20]; u8 if_in_discards_low[0x20]; u8 if_in_errors_high[0x20]; u8 if_in_errors_low[0x20]; u8 if_in_unknown_protos_high[0x20]; u8 if_in_unknown_protos_low[0x20]; u8 if_out_octets_high[0x20]; u8 if_out_octets_low[0x20]; u8 if_out_ucast_pkts_high[0x20]; u8 if_out_ucast_pkts_low[0x20]; u8 if_out_discards_high[0x20]; u8 if_out_discards_low[0x20]; u8 if_out_errors_high[0x20]; u8 if_out_errors_low[0x20]; u8 if_in_multicast_pkts_high[0x20]; u8 if_in_multicast_pkts_low[0x20]; u8 if_in_broadcast_pkts_high[0x20]; u8 if_in_broadcast_pkts_low[0x20]; u8 if_out_multicast_pkts_high[0x20]; u8 if_out_multicast_pkts_low[0x20]; u8 if_out_broadcast_pkts_high[0x20]; u8 if_out_broadcast_pkts_low[0x20]; u8 reserved_0[0x480]; }; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { u8 ether_stats_drop_events_high[0x20]; u8 ether_stats_drop_events_low[0x20]; u8 ether_stats_octets_high[0x20]; u8 ether_stats_octets_low[0x20]; u8 ether_stats_pkts_high[0x20]; u8 ether_stats_pkts_low[0x20]; u8 ether_stats_broadcast_pkts_high[0x20]; u8 ether_stats_broadcast_pkts_low[0x20]; u8 ether_stats_multicast_pkts_high[0x20]; u8 ether_stats_multicast_pkts_low[0x20]; u8 ether_stats_crc_align_errors_high[0x20]; u8 ether_stats_crc_align_errors_low[0x20]; u8 ether_stats_undersize_pkts_high[0x20]; u8 ether_stats_undersize_pkts_low[0x20]; u8 ether_stats_oversize_pkts_high[0x20]; u8 ether_stats_oversize_pkts_low[0x20]; u8 ether_stats_fragments_high[0x20]; u8 ether_stats_fragments_low[0x20]; u8 ether_stats_jabbers_high[0x20]; u8 ether_stats_jabbers_low[0x20]; u8 ether_stats_collisions_high[0x20]; u8 ether_stats_collisions_low[0x20]; u8 ether_stats_pkts64octets_high[0x20]; u8 ether_stats_pkts64octets_low[0x20]; u8 ether_stats_pkts65to127octets_high[0x20]; u8 ether_stats_pkts65to127octets_low[0x20]; u8 ether_stats_pkts128to255octets_high[0x20]; u8 ether_stats_pkts128to255octets_low[0x20]; u8 ether_stats_pkts256to511octets_high[0x20]; u8 ether_stats_pkts256to511octets_low[0x20]; u8 ether_stats_pkts512to1023octets_high[0x20]; u8 ether_stats_pkts512to1023octets_low[0x20]; u8 ether_stats_pkts1024to1518octets_high[0x20]; u8 ether_stats_pkts1024to1518octets_low[0x20]; u8 ether_stats_pkts1519to2047octets_high[0x20]; u8 ether_stats_pkts1519to2047octets_low[0x20]; u8 ether_stats_pkts2048to4095octets_high[0x20]; u8 ether_stats_pkts2048to4095octets_low[0x20]; u8 ether_stats_pkts4096to8191octets_high[0x20]; u8 ether_stats_pkts4096to8191octets_low[0x20]; u8 ether_stats_pkts8192to10239octets_high[0x20]; u8 ether_stats_pkts8192to10239octets_low[0x20]; u8 reserved_0[0x280]; }; struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_0[0x8]; u8 local_link_integrity_errors[0x4]; u8 excessive_buffer_overrun_errors[0x4]; u8 reserved_1[0x10]; u8 vl_15_dropped[0x10]; u8 port_xmit_data[0x20]; u8 port_rcv_data[0x20]; u8 port_xmit_pkts[0x20]; u8 port_rcv_pkts[0x20]; u8 port_xmit_wait[0x20]; u8 reserved_2[0x680]; }; struct mlx5_ifc_trc_tlb_reg_bits { u8 reserved_0[0x80]; u8 tlb_addr[0][0x40]; }; struct mlx5_ifc_trc_read_fifo_reg_bits { u8 reserved_0[0x10]; u8 requested_event_num[0x10]; u8 reserved_1[0x20]; u8 reserved_2[0x10]; u8 acual_event_num[0x10]; u8 reserved_3[0x20]; u8 event[0][0x40]; }; struct mlx5_ifc_trc_lock_reg_bits { u8 reserved_0[0x1f]; u8 lock[0x1]; u8 reserved_1[0x60]; }; struct mlx5_ifc_trc_filter_reg_bits { u8 status[0x1]; u8 reserved_0[0xf]; u8 filter_index[0x10]; u8 reserved_1[0x20]; u8 filter_val[0x20]; u8 reserved_2[0x1a0]; }; struct mlx5_ifc_trc_event_reg_bits { u8 status[0x1]; u8 reserved_0[0xf]; u8 event_index[0x10]; u8 reserved_1[0x20]; u8 event_id[0x20]; u8 event_selector_val[0x10]; u8 event_selector_size[0x10]; u8 reserved_2[0x180]; }; struct mlx5_ifc_trc_conf_reg_bits { u8 limit_en[0x1]; u8 reserved_0[0x3]; u8 dump_mode[0x4]; u8 reserved_1[0x15]; u8 state[0x3]; u8 reserved_2[0x20]; u8 limit_event_index[0x20]; u8 mkey[0x20]; u8 fifo_ready_ev_num[0x20]; u8 reserved_3[0x160]; }; struct mlx5_ifc_trc_cap_reg_bits { u8 reserved_0[0x18]; u8 dump_mode[0x8]; u8 reserved_1[0x20]; u8 num_of_events[0x10]; u8 num_of_filters[0x10]; u8 fifo_size[0x20]; u8 tlb_size[0x10]; u8 event_size[0x10]; u8 reserved_2[0x160]; }; struct mlx5_ifc_set_node_in_bits { u8 node_description[64][0x8]; }; struct mlx5_ifc_register_power_settings_bits { u8 reserved_0[0x18]; u8 power_settings_level[0x8]; u8 reserved_1[0x60]; }; struct mlx5_ifc_register_host_endianess_bits { u8 he[0x1]; u8 reserved_0[0x1f]; u8 reserved_1[0x60]; }; struct mlx5_ifc_register_diag_buffer_ctrl_bits { u8 physical_address[0x40]; }; struct mlx5_ifc_qtct_reg_bits { u8 operation_type[0x2]; u8 cap_local_admin[0x1]; u8 cap_remote_admin[0x1]; u8 reserved_0[0x4]; u8 port_number[0x8]; u8 reserved_1[0xd]; u8 prio[0x3]; u8 reserved_2[0x1d]; u8 tclass[0x3]; }; struct mlx5_ifc_qpdp_reg_bits { u8 reserved_0[0x8]; u8 port_number[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x1d]; u8 pprio[0x3]; }; struct mlx5_ifc_port_info_ro_fields_param_bits { u8 reserved_0[0x8]; u8 port[0x8]; u8 max_gid[0x10]; u8 reserved_1[0x20]; u8 port_guid[0x40]; }; struct mlx5_ifc_nvqc_reg_bits { u8 type[0x20]; u8 reserved_0[0x18]; u8 version[0x4]; u8 reserved_1[0x2]; u8 support_wr[0x1]; u8 support_rd[0x1]; }; struct mlx5_ifc_nvia_reg_bits { u8 reserved_0[0x1d]; u8 target[0x3]; u8 reserved_1[0x20]; }; struct mlx5_ifc_nvdi_reg_bits { struct mlx5_ifc_config_item_bits configuration_item_header; }; struct mlx5_ifc_nvda_reg_bits { struct mlx5_ifc_config_item_bits configuration_item_header; u8 configuration_item_data[0x20]; }; struct mlx5_ifc_node_info_ro_fields_param_bits { u8 system_image_guid[0x40]; u8 reserved_0[0x40]; u8 node_guid[0x40]; u8 reserved_1[0x10]; u8 max_pkey[0x10]; u8 reserved_2[0x20]; }; struct mlx5_ifc_ets_tcn_config_reg_bits { u8 g[0x1]; u8 b[0x1]; u8 r[0x1]; u8 reserved_0[0x9]; u8 group[0x4]; u8 reserved_1[0x9]; u8 bw_allocation[0x7]; u8 reserved_2[0xc]; u8 max_bw_units[0x4]; u8 reserved_3[0x8]; u8 max_bw_value[0x8]; }; struct mlx5_ifc_ets_global_config_reg_bits { u8 reserved_0[0x2]; u8 r[0x1]; u8 reserved_1[0x1d]; u8 reserved_2[0xc]; u8 max_bw_units[0x4]; u8 reserved_3[0x8]; u8 max_bw_value[0x8]; }; struct mlx5_ifc_qetc_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; u8 reserved_at_10[0x30]; struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; struct mlx5_ifc_ets_global_config_reg_bits global_configuration; }; struct mlx5_ifc_nodnic_mac_filters_bits { struct mlx5_ifc_mac_address_layout_bits mac_filter0; struct mlx5_ifc_mac_address_layout_bits mac_filter1; struct mlx5_ifc_mac_address_layout_bits mac_filter2; struct mlx5_ifc_mac_address_layout_bits mac_filter3; struct mlx5_ifc_mac_address_layout_bits mac_filter4; u8 reserved_0[0xc0]; }; struct mlx5_ifc_nodnic_gid_filters_bits { u8 mgid_filter0[16][0x8]; u8 mgid_filter1[16][0x8]; u8 mgid_filter2[16][0x8]; u8 mgid_filter3[16][0x8]; }; enum { MLX5_NODNIC_CONFIG_REG_NUM_PORTS_SINGLE_PORT = 0x0, MLX5_NODNIC_CONFIG_REG_NUM_PORTS_DUAL_PORT = 0x1, }; enum { MLX5_NODNIC_CONFIG_REG_CQE_FORMAT_LEGACY_CQE = 0x0, MLX5_NODNIC_CONFIG_REG_CQE_FORMAT_NEW_CQE = 0x1, }; struct mlx5_ifc_nodnic_config_reg_bits { u8 no_dram_nic_revision[0x8]; u8 hardware_format[0x8]; u8 support_receive_filter[0x1]; u8 support_promisc_filter[0x1]; u8 support_promisc_multicast_filter[0x1]; u8 reserved_0[0x2]; u8 log_working_buffer_size[0x3]; u8 log_pkey_table_size[0x4]; u8 reserved_1[0x3]; u8 num_ports[0x1]; u8 reserved_2[0x2]; u8 log_max_ring_size[0x6]; u8 reserved_3[0x18]; u8 lkey[0x20]; u8 cqe_format[0x4]; u8 reserved_4[0x1c]; u8 node_guid[0x40]; u8 reserved_5[0x740]; struct mlx5_ifc_nodnic_port_config_reg_bits port1_settings; struct mlx5_ifc_nodnic_port_config_reg_bits port2_settings; }; struct mlx5_ifc_vlan_layout_bits { u8 reserved_0[0x14]; u8 vlan[0xc]; u8 reserved_1[0x20]; }; struct mlx5_ifc_umr_pointer_desc_argument_bits { u8 reserved_0[0x20]; u8 mkey[0x20]; u8 addressh_63_32[0x20]; u8 addressl_31_0[0x20]; }; struct mlx5_ifc_ud_adrs_vector_bits { u8 dc_key[0x40]; u8 ext[0x1]; u8 reserved_0[0x7]; u8 destination_qp_dct[0x18]; u8 static_rate[0x4]; u8 sl_eth_prio[0x4]; u8 fl[0x1]; u8 mlid[0x7]; u8 rlid_udp_sport[0x10]; u8 reserved_1[0x20]; u8 rmac_47_16[0x20]; u8 rmac_15_0[0x10]; u8 tclass[0x8]; u8 hop_limit[0x8]; u8 reserved_2[0x1]; u8 grh[0x1]; u8 reserved_3[0x2]; u8 src_addr_index[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; }; struct mlx5_ifc_port_module_event_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0xc]; u8 module_status[0x4]; u8 reserved_2[0x14]; u8 error_type[0x4]; u8 reserved_3[0x8]; u8 reserved_4[0xa0]; }; struct mlx5_ifc_icmd_control_bits { u8 opcode[0x10]; u8 status[0x8]; u8 reserved_0[0x7]; u8 busy[0x1]; }; struct mlx5_ifc_eqe_bits { u8 reserved_0[0x8]; u8 event_type[0x8]; u8 reserved_1[0x8]; u8 event_sub_type[0x8]; u8 reserved_2[0xe0]; union mlx5_ifc_event_auto_bits event_data; u8 reserved_3[0x10]; u8 signature[0x8]; u8 reserved_4[0x7]; u8 owner[0x1]; }; enum { MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, }; struct mlx5_ifc_cmd_queue_entry_bits { u8 type[0x8]; u8 reserved_0[0x18]; u8 input_length[0x20]; u8 input_mailbox_pointer_63_32[0x20]; u8 input_mailbox_pointer_31_9[0x17]; u8 reserved_1[0x9]; u8 command_input_inline_data[16][0x8]; u8 command_output_inline_data[16][0x8]; u8 output_mailbox_pointer_63_32[0x20]; u8 output_mailbox_pointer_31_9[0x17]; u8 reserved_2[0x9]; u8 output_length[0x20]; u8 token[0x8]; u8 signature[0x8]; u8 reserved_3[0x8]; u8 status[0x7]; u8 ownership[0x1]; }; struct mlx5_ifc_cmd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 command_output[0x20]; }; struct mlx5_ifc_cmd_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 command[0][0x20]; }; struct mlx5_ifc_cmd_if_box_bits { u8 mailbox_data[512][0x8]; u8 reserved_0[0x180]; u8 next_pointer_63_32[0x20]; u8 next_pointer_31_10[0x16]; u8 reserved_1[0xa]; u8 block_number[0x20]; u8 reserved_2[0x8]; u8 token[0x8]; u8 ctrl_signature[0x8]; u8 signature[0x8]; }; struct mlx5_ifc_mtt_bits { u8 ptag_63_32[0x20]; u8 ptag_31_8[0x18]; u8 reserved_0[0x6]; u8 wr_en[0x1]; u8 rd_en[0x1]; }; struct mlx5_ifc_tls_progress_params_bits { u8 valid[0x1]; u8 reserved_at_1[0x7]; u8 pd[0x18]; u8 next_record_tcp_sn[0x20]; u8 hw_resync_tcp_sn[0x20]; u8 record_tracker_state[0x2]; u8 auth_state[0x2]; u8 reserved_at_64[0x4]; u8 hw_offset_record_number[0x18]; }; struct mlx5_ifc_tls_static_params_bits { u8 const_2[0x2]; u8 tls_version[0x4]; u8 const_1[0x2]; u8 reserved_at_8[0x14]; u8 encryption_standard[0x4]; u8 reserved_at_20[0x20]; u8 initial_record_number[0x40]; u8 resync_tcp_sn[0x20]; u8 gcm_iv[0x20]; u8 implicit_iv[0x40]; u8 reserved_at_100[0x8]; u8 dek_index[0x18]; u8 reserved_at_120[0xe0]; }; /* Vendor Specific Capabilities, VSC */ enum { MLX5_VSC_DOMAIN_ICMD = 0x1, MLX5_VSC_DOMAIN_PROTECTED_CRSPACE = 0x6, MLX5_VSC_DOMAIN_SCAN_CRSPACE = 0x7, MLX5_VSC_DOMAIN_SEMAPHORES = 0xA, }; struct mlx5_ifc_vendor_specific_cap_bits { u8 type[0x8]; u8 length[0x8]; u8 next_pointer[0x8]; u8 capability_id[0x8]; u8 status[0x3]; u8 reserved_0[0xd]; u8 space[0x10]; u8 counter[0x20]; u8 semaphore[0x20]; u8 flag[0x1]; u8 reserved_1[0x1]; u8 address[0x1e]; u8 data[0x20]; }; struct mlx5_ifc_vsc_space_bits { u8 status[0x3]; u8 reserved0[0xd]; u8 space[0x10]; }; struct mlx5_ifc_vsc_addr_bits { u8 flag[0x1]; u8 reserved0[0x1]; u8 address[0x1e]; }; enum { MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, }; enum { MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, }; enum { MLX5_HEALTH_SYNDR_FW_ERR = 0x1, MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8, MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, MLX5_HEALTH_SYNDR_EQ_INV = 0xe, MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10, }; struct mlx5_ifc_initial_seg_bits { u8 fw_rev_minor[0x10]; u8 fw_rev_major[0x10]; u8 cmd_interface_rev[0x10]; u8 fw_rev_subminor[0x10]; u8 reserved_0[0x40]; u8 cmdq_phy_addr_63_32[0x20]; u8 cmdq_phy_addr_31_12[0x14]; u8 reserved_1[0x2]; u8 nic_interface[0x2]; u8 log_cmdq_size[0x4]; u8 log_cmdq_stride[0x4]; u8 command_doorbell_vector[0x20]; u8 reserved_2[0xf00]; u8 initializing[0x1]; u8 reserved_3[0x4]; u8 nic_interface_supported[0x3]; u8 reserved_4[0x18]; struct mlx5_ifc_health_buffer_bits health_buffer; u8 no_dram_nic_offset[0x20]; u8 reserved_5[0x6de0]; u8 internal_timer_h[0x20]; u8 internal_timer_l[0x20]; u8 reserved_6[0x20]; u8 reserved_7[0x1f]; u8 clear_int[0x1]; u8 health_syndrome[0x8]; u8 health_counter[0x18]; u8 reserved_8[0x17fc0]; }; union mlx5_ifc_icmd_interface_document_bits { struct mlx5_ifc_fw_version_bits fw_version; struct mlx5_ifc_icmd_access_reg_in_bits icmd_access_reg_in; struct mlx5_ifc_icmd_access_reg_out_bits icmd_access_reg_out; struct mlx5_ifc_icmd_init_ocsd_in_bits icmd_init_ocsd_in; struct mlx5_ifc_icmd_ocbb_init_in_bits icmd_ocbb_init_in; struct mlx5_ifc_icmd_ocbb_query_etoc_stats_out_bits icmd_ocbb_query_etoc_stats_out; struct mlx5_ifc_icmd_ocbb_query_header_stats_out_bits icmd_ocbb_query_header_stats_out; struct mlx5_ifc_icmd_query_cap_general_bits icmd_query_cap_general; struct mlx5_ifc_icmd_query_cap_in_bits icmd_query_cap_in; struct mlx5_ifc_icmd_query_fw_info_out_bits icmd_query_fw_info_out; struct mlx5_ifc_icmd_query_virtual_mac_out_bits icmd_query_virtual_mac_out; struct mlx5_ifc_icmd_set_virtual_mac_in_bits icmd_set_virtual_mac_in; struct mlx5_ifc_icmd_set_wol_rol_in_bits icmd_set_wol_rol_in; struct mlx5_ifc_icmd_set_wol_rol_out_bits icmd_set_wol_rol_out; u8 reserved_0[0x42c0]; }; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_discard_cntrs_grp_bits eth_discard_cntrs_grp; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; struct mlx5_ifc_infiniband_port_cntrs_bits infiniband_port_cntrs; u8 reserved_0[0x7c0]; }; struct mlx5_ifc_ppcnt_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x8]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_1[0x1c]; u8 prio_tc[0x3]; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; struct mlx5_ifc_pcie_lanes_counters_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 error_counter_lane0[0x20]; u8 error_counter_lane1[0x20]; u8 error_counter_lane2[0x20]; u8 error_counter_lane3[0x20]; u8 error_counter_lane4[0x20]; u8 error_counter_lane5[0x20]; u8 error_counter_lane6[0x20]; u8 error_counter_lane7[0x20]; u8 error_counter_lane8[0x20]; u8 error_counter_lane9[0x20]; u8 error_counter_lane10[0x20]; u8 error_counter_lane11[0x20]; u8 error_counter_lane12[0x20]; u8 error_counter_lane13[0x20]; u8 error_counter_lane14[0x20]; u8 error_counter_lane15[0x20]; u8 reserved_at_240[0x580]; }; struct mlx5_ifc_pcie_lanes_counters_ext_bits { u8 reserved_at_0[0x40]; u8 error_counter_lane0[0x20]; u8 error_counter_lane1[0x20]; u8 error_counter_lane2[0x20]; u8 error_counter_lane3[0x20]; u8 error_counter_lane4[0x20]; u8 error_counter_lane5[0x20]; u8 error_counter_lane6[0x20]; u8 error_counter_lane7[0x20]; u8 error_counter_lane8[0x20]; u8 error_counter_lane9[0x20]; u8 error_counter_lane10[0x20]; u8 error_counter_lane11[0x20]; u8 error_counter_lane12[0x20]; u8 error_counter_lane13[0x20]; u8 error_counter_lane14[0x20]; u8 error_counter_lane15[0x20]; u8 reserved_at_240[0x580]; }; struct mlx5_ifc_pcie_perf_counters_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 rx_errors[0x20]; u8 tx_errors[0x20]; u8 l0_to_recovery_eieos[0x20]; u8 l0_to_recovery_ts[0x20]; u8 l0_to_recovery_framing[0x20]; u8 l0_to_recovery_retrain[0x20]; u8 crc_error_dllp[0x20]; u8 crc_error_tlp[0x20]; u8 tx_overflow_buffer_pkt[0x40]; u8 outbound_stalled_reads[0x20]; u8 outbound_stalled_writes[0x20]; u8 outbound_stalled_reads_events[0x20]; u8 outbound_stalled_writes_events[0x20]; u8 tx_overflow_buffer_marked_pkt[0x40]; u8 reserved_at_240[0x580]; }; struct mlx5_ifc_pcie_perf_counters_ext_bits { u8 reserved_at_0[0x40]; u8 rx_errors[0x20]; u8 tx_errors[0x20]; u8 reserved_at_80[0xc0]; u8 tx_overflow_buffer_pkt[0x40]; u8 outbound_stalled_reads[0x20]; u8 outbound_stalled_writes[0x20]; u8 outbound_stalled_reads_events[0x20]; u8 outbound_stalled_writes_events[0x20]; u8 tx_overflow_buffer_marked_pkt[0x40]; u8 reserved_at_240[0x580]; }; struct mlx5_ifc_pcie_timers_states_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 time_to_boot_image_start[0x20]; u8 time_to_link_image[0x20]; u8 calibration_time[0x20]; u8 time_to_first_perst[0x20]; u8 time_to_detect_state[0x20]; u8 time_to_l0[0x20]; u8 time_to_crs_en[0x20]; u8 time_to_plastic_image_start[0x20]; u8 time_to_iron_image_start[0x20]; u8 perst_handler[0x20]; u8 times_in_l1[0x20]; u8 times_in_l23[0x20]; u8 dl_down[0x20]; u8 config_cycle1usec[0x20]; u8 config_cycle2to7usec[0x20]; u8 config_cycle8to15usec[0x20]; u8 config_cycle16to63usec[0x20]; u8 config_cycle64usec[0x20]; u8 correctable_err_msg_sent[0x20]; u8 non_fatal_err_msg_sent[0x20]; u8 fatal_err_msg_sent[0x20]; u8 reserved_at_2e0[0x4e0]; }; struct mlx5_ifc_pcie_timers_states_ext_bits { u8 reserved_at_0[0x40]; u8 time_to_boot_image_start[0x20]; u8 time_to_link_image[0x20]; u8 calibration_time[0x20]; u8 time_to_first_perst[0x20]; u8 time_to_detect_state[0x20]; u8 time_to_l0[0x20]; u8 time_to_crs_en[0x20]; u8 time_to_plastic_image_start[0x20]; u8 time_to_iron_image_start[0x20]; u8 perst_handler[0x20]; u8 times_in_l1[0x20]; u8 times_in_l23[0x20]; u8 dl_down[0x20]; u8 config_cycle1usec[0x20]; u8 config_cycle2to7usec[0x20]; u8 config_cycle8to15usec[0x20]; u8 config_cycle16to63usec[0x20]; u8 config_cycle64usec[0x20]; u8 correctable_err_msg_sent[0x20]; u8 non_fatal_err_msg_sent[0x20]; u8 fatal_err_msg_sent[0x20]; u8 reserved_at_2e0[0x4e0]; }; union mlx5_ifc_mpcnt_reg_counter_set_auto_bits { struct mlx5_ifc_pcie_perf_counters_bits pcie_perf_counters; struct mlx5_ifc_pcie_lanes_counters_bits pcie_lanes_counters; struct mlx5_ifc_pcie_timers_states_bits pcie_timers_states; u8 reserved_at_0[0x7c0]; }; union mlx5_ifc_mpcnt_reg_counter_set_auto_ext_bits { struct mlx5_ifc_pcie_perf_counters_ext_bits pcie_perf_counters_ext; struct mlx5_ifc_pcie_lanes_counters_ext_bits pcie_lanes_counters_ext; struct mlx5_ifc_pcie_timers_states_ext_bits pcie_timers_states_ext; u8 reserved_at_0[0x7c0]; }; struct mlx5_ifc_mpcnt_reg_bits { u8 reserved_at_0[0x2]; u8 depth[0x6]; u8 pcie_index[0x8]; u8 node[0x8]; u8 reserved_at_18[0x2]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_at_21[0x1f]; union mlx5_ifc_mpcnt_reg_counter_set_auto_bits counter_set; }; struct mlx5_ifc_mpcnt_reg_ext_bits { u8 reserved_at_0[0x2]; u8 depth[0x6]; u8 pcie_index[0x8]; u8 node[0x8]; u8 reserved_at_18[0x2]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_at_21[0x1f]; union mlx5_ifc_mpcnt_reg_counter_set_auto_ext_bits counter_set; }; struct mlx5_ifc_monitor_opcodes_layout_bits { u8 reserved_at_0[0x10]; u8 monitor_opcode[0x10]; }; union mlx5_ifc_pddr_status_opcode_bits { struct mlx5_ifc_monitor_opcodes_layout_bits monitor_opcodes; u8 reserved_at_0[0x20]; }; struct mlx5_ifc_troubleshooting_info_page_layout_bits { u8 reserved_at_0[0x10]; u8 group_opcode[0x10]; union mlx5_ifc_pddr_status_opcode_bits status_opcode; u8 user_feedback_data[0x10]; u8 user_feedback_index[0x10]; u8 status_message[0x760]; }; union mlx5_ifc_pddr_page_data_bits { struct mlx5_ifc_troubleshooting_info_page_layout_bits troubleshooting_info_page; struct mlx5_ifc_pddr_module_info_bits pddr_module_info; u8 reserved_at_0[0x7c0]; }; struct mlx5_ifc_pddr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_at_12[0xe]; u8 reserved_at_20[0x18]; u8 page_select[0x8]; union mlx5_ifc_pddr_page_data_bits page_data; }; enum { MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN = 0x9050, MLX5_MPEIN_PWR_STATUS_INVALID = 0, MLX5_MPEIN_PWR_STATUS_SUFFICIENT = 1, MLX5_MPEIN_PWR_STATUS_INSUFFICIENT = 2, }; struct mlx5_ifc_mpein_reg_bits { u8 reserved_at_0[0x2]; u8 depth[0x6]; u8 pcie_index[0x8]; u8 node[0x8]; u8 reserved_at_18[0x8]; u8 capability_mask[0x20]; u8 reserved_at_40[0x8]; u8 link_width_enabled[0x8]; u8 link_speed_enabled[0x10]; u8 lane0_physical_position[0x8]; u8 link_width_active[0x8]; u8 link_speed_active[0x10]; u8 num_of_pfs[0x10]; u8 num_of_vfs[0x10]; u8 bdf0[0x10]; u8 reserved_at_b0[0x10]; u8 max_read_request_size[0x4]; u8 max_payload_size[0x4]; u8 reserved_at_c8[0x5]; u8 pwr_status[0x3]; u8 port_type[0x4]; u8 reserved_at_d4[0xb]; u8 lane_reversal[0x1]; u8 reserved_at_e0[0x14]; u8 pci_power[0xc]; u8 reserved_at_100[0x20]; u8 device_status[0x10]; u8 port_state[0x8]; u8 reserved_at_138[0x8]; u8 reserved_at_140[0x10]; u8 receiver_detect_result[0x10]; u8 reserved_at_160[0x20]; }; struct mlx5_ifc_mpein_reg_ext_bits { u8 reserved_at_0[0x2]; u8 depth[0x6]; u8 pcie_index[0x8]; u8 node[0x8]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x8]; u8 link_width_enabled[0x8]; u8 link_speed_enabled[0x10]; u8 lane0_physical_position[0x8]; u8 link_width_active[0x8]; u8 link_speed_active[0x10]; u8 num_of_pfs[0x10]; u8 num_of_vfs[0x10]; u8 bdf0[0x10]; u8 reserved_at_b0[0x10]; u8 max_read_request_size[0x4]; u8 max_payload_size[0x4]; u8 reserved_at_c8[0x5]; u8 pwr_status[0x3]; u8 port_type[0x4]; u8 reserved_at_d4[0xb]; u8 lane_reversal[0x1]; }; struct mlx5_ifc_mcqi_cap_bits { u8 supported_info_bitmask[0x20]; u8 component_size[0x20]; u8 max_component_size[0x20]; u8 log_mcda_word_size[0x4]; u8 reserved_at_64[0xc]; u8 mcda_max_write_size[0x10]; u8 rd_en[0x1]; u8 reserved_at_81[0x1]; u8 match_chip_id[0x1]; u8 match_psid[0x1]; u8 check_user_timestamp[0x1]; u8 match_base_guid_mac[0x1]; u8 reserved_at_86[0x1a]; }; struct mlx5_ifc_mcqi_reg_bits { u8 read_pending_component[0x1]; u8 reserved_at_1[0xf]; u8 component_index[0x10]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x1b]; u8 info_type[0x5]; u8 info_size[0x20]; u8 offset[0x20]; u8 reserved_at_a0[0x10]; u8 data_size[0x10]; u8 data[0][0x20]; }; struct mlx5_ifc_mcc_reg_bits { u8 reserved_at_0[0x4]; u8 time_elapsed_since_last_cmd[0xc]; u8 reserved_at_10[0x8]; u8 instruction[0x8]; u8 reserved_at_20[0x10]; u8 component_index[0x10]; u8 reserved_at_40[0x8]; u8 update_handle[0x18]; u8 handle_owner_type[0x4]; u8 handle_owner_host_id[0x4]; u8 reserved_at_68[0x1]; u8 control_progress[0x7]; u8 error_code[0x8]; u8 reserved_at_78[0x4]; u8 control_state[0x4]; u8 component_size[0x20]; u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_mcda_reg_bits { u8 reserved_at_0[0x8]; u8 update_handle[0x18]; u8 offset[0x20]; u8 reserved_at_40[0x10]; u8 size[0x10]; u8 reserved_at_60[0x20]; u8 data[0][0x20]; }; union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits ib_portcntrs_attribute_grp_data; struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_discard_cntrs_grp_bits eth_discard_cntrs_grp; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_eth_per_traffic_class_cong_layout_bits eth_per_traffic_class_cong_layout; struct mlx5_ifc_eth_per_traffic_class_layout_bits eth_per_traffic_class_layout; struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; struct mlx5_ifc_link_level_retrans_cntr_grp_date_bits link_level_retrans_cntr_grp_date; struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; struct mlx5_ifc_pbmc_reg_bits pbmc_reg; struct mlx5_ifc_pcap_reg_bits pcap_reg; struct mlx5_ifc_peir_reg_bits peir_reg; struct mlx5_ifc_pelc_reg_bits pelc_reg; struct mlx5_ifc_pfcc_reg_bits pfcc_reg; struct mlx5_ifc_phbr_binding_reg_bits phbr_binding_reg; struct mlx5_ifc_phbr_for_port_tclass_reg_bits phbr_for_port_tclass_reg; struct mlx5_ifc_phbr_for_prio_reg_bits phbr_for_prio_reg; struct mlx5_ifc_phrr_reg_bits phrr_reg; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_pifr_reg_bits pifr_reg; struct mlx5_ifc_pipg_reg_bits pipg_reg; struct mlx5_ifc_plbf_reg_bits plbf_reg; struct mlx5_ifc_plib_reg_bits plib_reg; struct mlx5_ifc_pll_status_data_bits pll_status_data; struct mlx5_ifc_plpc_reg_bits plpc_reg; struct mlx5_ifc_pmaos_reg_bits pmaos_reg; struct mlx5_ifc_pmlp_reg_bits pmlp_reg; struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; struct mlx5_ifc_pmpc_reg_bits pmpc_reg; struct mlx5_ifc_pmpe_reg_bits pmpe_reg; struct mlx5_ifc_pmpr_reg_bits pmpr_reg; struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; struct mlx5_ifc_ppll_reg_bits ppll_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; struct mlx5_ifc_pspa_reg_bits pspa_reg; struct mlx5_ifc_ptas_reg_bits ptas_reg; struct mlx5_ifc_ptys_reg_bits ptys_reg; struct mlx5_ifc_pude_reg_bits pude_reg; struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_slrp_reg_bits slrp_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; u8 reserved_0[0x7880]; }; union mlx5_ifc_debug_enhancements_document_bits { struct mlx5_ifc_health_buffer_bits health_buffer; u8 reserved_0[0x200]; }; union mlx5_ifc_no_dram_nic_document_bits { struct mlx5_ifc_nodnic_config_reg_bits nodnic_config_reg; struct mlx5_ifc_nodnic_cq_arming_word_bits nodnic_cq_arming_word; struct mlx5_ifc_nodnic_event_word_bits nodnic_event_word; struct mlx5_ifc_nodnic_gid_filters_bits nodnic_gid_filters; struct mlx5_ifc_nodnic_mac_filters_bits nodnic_mac_filters; struct mlx5_ifc_nodnic_port_config_reg_bits nodnic_port_config_reg; struct mlx5_ifc_nodnic_ring_config_reg_bits nodnic_ring_config_reg; struct mlx5_ifc_nodnic_ring_doorbell_bits nodnic_ring_doorbell; u8 reserved_0[0x3160]; }; union mlx5_ifc_uplink_pci_interface_document_bits { struct mlx5_ifc_initial_seg_bits initial_seg; struct mlx5_ifc_vendor_specific_cap_bits vendor_specific_cap; u8 reserved_0[0x20120]; }; struct mlx5_ifc_qpdpm_dscp_reg_bits { u8 e[0x1]; u8 reserved_at_01[0x0b]; u8 prio[0x04]; }; struct mlx5_ifc_qpdpm_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64]; }; struct mlx5_ifc_qpts_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x2d]; u8 trust_state[0x3]; }; struct mlx5_ifc_mfrl_reg_bits { u8 reserved_at_0[0x38]; u8 reset_level[0x8]; }; enum { MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MTCAP = 0x9009, MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MTECR = 0x9109, MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MTMP = 0x900a, MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MTWE = 0x900b, MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MTBR = 0x900f, MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MTEWE = 0x910b, MLX5_MAX_TEMPERATURE = 16, }; struct mlx5_ifc_mtbr_temp_record_bits { u8 max_temperature[0x10]; u8 temperature[0x10]; }; struct mlx5_ifc_mtbr_reg_bits { u8 reserved_at_0[0x14]; u8 base_sensor_index[0xc]; u8 reserved_at_20[0x18]; u8 num_rec[0x8]; u8 reserved_at_40[0x40]; struct mlx5_ifc_mtbr_temp_record_bits temperature_record[MLX5_MAX_TEMPERATURE]; }; struct mlx5_ifc_mtbr_reg_ext_bits { u8 reserved_at_0[0x14]; u8 base_sensor_index[0xc]; u8 reserved_at_20[0x18]; u8 num_rec[0x8]; u8 reserved_at_40[0x40]; struct mlx5_ifc_mtbr_temp_record_bits temperature_record[MLX5_MAX_TEMPERATURE]; }; struct mlx5_ifc_mtcap_bits { u8 reserved_at_0[0x19]; u8 sensor_count[0x7]; u8 reserved_at_20[0x19]; u8 internal_sensor_count[0x7]; u8 sensor_map[0x40]; }; struct mlx5_ifc_mtcap_ext_bits { u8 reserved_at_0[0x19]; u8 sensor_count[0x7]; u8 reserved_at_20[0x20]; u8 sensor_map[0x40]; }; struct mlx5_ifc_mtecr_bits { u8 reserved_at_0[0x4]; u8 last_sensor[0xc]; u8 reserved_at_10[0x4]; u8 sensor_count[0xc]; u8 reserved_at_20[0x19]; u8 internal_sensor_count[0x7]; u8 sensor_map_0[0x20]; u8 reserved_at_60[0x2a0]; }; struct mlx5_ifc_mtecr_ext_bits { u8 reserved_at_0[0x4]; u8 last_sensor[0xc]; u8 reserved_at_10[0x4]; u8 sensor_count[0xc]; u8 reserved_at_20[0x20]; u8 sensor_map_0[0x20]; u8 reserved_at_60[0x2a0]; }; struct mlx5_ifc_mtewe_bits { u8 reserved_at_0[0x4]; u8 last_sensor[0xc]; u8 reserved_at_10[0x4]; u8 sensor_count[0xc]; u8 sensor_warning_0[0x20]; u8 reserved_at_40[0x2a0]; }; struct mlx5_ifc_mtewe_ext_bits { u8 reserved_at_0[0x4]; u8 last_sensor[0xc]; u8 reserved_at_10[0x4]; u8 sensor_count[0xc]; u8 sensor_warning_0[0x20]; u8 reserved_at_40[0x2a0]; }; struct mlx5_ifc_mtmp_bits { u8 reserved_at_0[0x14]; u8 sensor_index[0xc]; u8 reserved_at_20[0x10]; u8 temperature[0x10]; u8 mte[0x1]; u8 mtr[0x1]; u8 reserved_at_42[0xe]; u8 max_temperature[0x10]; u8 tee[0x2]; u8 reserved_at_62[0xe]; u8 temperature_threshold_hi[0x10]; u8 reserved_at_80[0x10]; u8 temperature_threshold_lo[0x10]; u8 reserved_at_a0[0x20]; u8 sensor_name_hi[0x20]; u8 sensor_name_lo[0x20]; }; struct mlx5_ifc_mtmp_ext_bits { u8 reserved_at_0[0x14]; u8 sensor_index[0xc]; u8 reserved_at_20[0x10]; u8 temperature[0x10]; u8 mte[0x1]; u8 mtr[0x1]; u8 reserved_at_42[0xe]; u8 max_temperature[0x10]; u8 tee[0x2]; u8 reserved_at_62[0xe]; u8 temperature_threshold_hi[0x10]; u8 reserved_at_80[0x10]; u8 temperature_threshold_lo[0x10]; u8 reserved_at_a0[0x20]; u8 sensor_name_hi[0x20]; u8 sensor_name_lo[0x20]; }; struct mlx5_ifc_general_obj_in_cmd_hdr_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 vhca_tunnel_id[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_general_obj_out_cmd_hdr_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 obj_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_umem_bits { u8 reserved_at_0[0x80]; u8 reserved_at_80[0x1b]; u8 log_page_size[0x5]; u8 page_offset[0x20]; u8 num_of_mtt[0x40]; struct mlx5_ifc_mtt_bits mtt[0]; }; struct mlx5_ifc_uctx_bits { u8 cap[0x20]; u8 reserved_at_20[0x160]; }; struct mlx5_ifc_create_umem_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_umem_bits umem; }; struct mlx5_ifc_create_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_uctx_bits uctx; }; struct mlx5_ifc_destroy_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 uid[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_mtrc_string_db_param_bits { u8 string_db_base_address[0x20]; u8 reserved_at_20[0x8]; u8 string_db_size[0x18]; }; struct mlx5_ifc_mtrc_cap_bits { u8 trace_owner[0x1]; u8 trace_to_memory[0x1]; u8 reserved_at_2[0x4]; u8 trc_ver[0x2]; u8 reserved_at_8[0x14]; u8 num_string_db[0x4]; u8 first_string_trace[0x8]; u8 num_string_trace[0x8]; u8 reserved_at_30[0x28]; u8 log_max_trace_buffer_size[0x8]; u8 reserved_at_60[0x20]; struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8]; u8 reserved_at_280[0x180]; }; struct mlx5_ifc_mtrc_conf_bits { u8 reserved_at_0[0x1c]; u8 trace_mode[0x4]; u8 reserved_at_20[0x18]; u8 log_trace_buffer_size[0x8]; u8 trace_mkey[0x20]; u8 reserved_at_60[0x3a0]; }; struct mlx5_ifc_mtrc_stdb_bits { u8 string_db_index[0x4]; u8 reserved_at_4[0x4]; u8 read_size[0x18]; u8 start_offset[0x20]; u8 string_db_data[0]; }; struct mlx5_ifc_mtrc_ctrl_bits { u8 trace_status[0x2]; u8 reserved_at_2[0x2]; u8 arm_event[0x1]; u8 reserved_at_5[0xb]; u8 modify_field_select[0x10]; u8 reserved_at_20[0x2b]; u8 current_timestamp52_32[0x15]; u8 current_timestamp31_0[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_affiliated_event_header_bits { u8 reserved_at_0[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; }; +#define MLX5_FC_BULK_SIZE_FACTOR 128 + +enum mlx5_fc_bulk_alloc_bitmask { + MLX5_FC_BULK_128 = (1 << 0), + MLX5_FC_BULK_256 = (1 << 1), + MLX5_FC_BULK_512 = (1 << 2), + MLX5_FC_BULK_1024 = (1 << 3), + MLX5_FC_BULK_2048 = (1 << 4), + MLX5_FC_BULK_4096 = (1 << 5), + MLX5_FC_BULK_8192 = (1 << 6), + MLX5_FC_BULK_16384 = (1 << 7), +}; + +#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + + #endif /* MLX5_IFC_H */ diff --git a/sys/modules/mlx5/Makefile b/sys/modules/mlx5/Makefile index 10e11d6c8594..339b0bef9382 100644 --- a/sys/modules/mlx5/Makefile +++ b/sys/modules/mlx5/Makefile @@ -1,59 +1,61 @@ .PATH: ${SRCTOP}/sys/dev/mlx5/mlx5_core \ ${SRCTOP}/sys/dev/mlx5/mlx5_lib \ ${SRCTOP}/sys/dev/mlx5/mlx5_fpga KMOD=mlx5 SRCS= \ mlx5_alloc.c \ mlx5_cmd.c \ mlx5_cq.c \ mlx5_diag_cnt.c \ mlx5_diagnostics.c \ mlx5_eq.c \ mlx5_eswitch.c \ +mlx5_fc_cmd.c \ mlx5_fs_cmd.c \ mlx5_fs_tcp.c \ mlx5_fs_tree.c \ +mlx5_fs_counters.c \ mlx5_fw.c \ mlx5_fwdump.c \ mlx5_health.c \ mlx5_mad.c \ mlx5_main.c \ mlx5_mcg.c \ mlx5_mpfs.c \ mlx5_mr.c \ mlx5_pagealloc.c \ mlx5_pd.c \ mlx5_port.c \ mlx5_qp.c \ mlx5_rl.c \ mlx5_srq.c \ mlx5_tls.c \ mlx5_transobj.c \ mlx5_uar.c \ mlx5_vport.c \ mlx5_vsc.c \ mlx5_wq.c \ mlx5_gid.c SRCS+= ${LINUXKPI_GENSRCS} SRCS+= opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h CFLAGS+= -I${SRCTOP}/sys/ofed/include CFLAGS+= -I${SRCTOP}/sys/ofed/include/uapi CFLAGS+= ${LINUXKPI_INCLUDES} .if defined(CONFIG_BUILD_FPGA) SRCS+= \ mlx5fpga_cmd.c \ mlx5fpga_core.c \ mlx5fpga_sdk.c \ mlx5fpga_trans.c \ mlx5fpga_xfer.c \ mlx5fpga_ipsec.c .endif EXPORT_SYMS= YES .include CFLAGS+= -Wno-cast-qual -Wno-pointer-arith ${GCC_MS_EXTENSIONS}