Index: head/sys/dev/mlx5/mlx5_core/mlx5_port.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_port.c (revision 341580) +++ head/sys/dev/mlx5/mlx5_core/mlx5_port.c (revision 341581) @@ -1,1148 +1,1184 @@ /*- * Copyright (c) 2013-2018, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "mlx5_core.h" int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write) { int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out; int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in; int err = -ENOMEM; u32 *out = NULL; u32 *in = NULL; void *data; in = mlx5_vzalloc(inlen); out = mlx5_vzalloc(outlen); if (!in || !out) goto out; data = MLX5_ADDR_OF(access_register_in, in, register_data); memcpy(data, data_in, size_in); MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG); MLX5_SET(access_register_in, in, op_mod, !write); MLX5_SET(access_register_in, in, argument, arg); MLX5_SET(access_register_in, in, register_id, reg_num); err = mlx5_cmd_exec(dev, in, inlen, out, outlen); if (err) goto out; data = MLX5_ADDR_OF(access_register_out, out, register_data); memcpy(data_out, data, size_out); out: kvfree(out); kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, u8 feature_group, u8 access_reg_group) { u32 in[MLX5_ST_SZ_DW(qcam_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(qcam_reg); MLX5_SET(qcam_reg, in, feature_group, feature_group); MLX5_SET(qcam_reg, in, access_reg_group, access_reg_group); return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0); } EXPORT_SYMBOL_GPL(mlx5_query_qcam_reg); struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; u8 rsvd1[2]; __be32 caps_127_96; __be32 caps_95_64; __be32 caps_63_32; __be32 caps_31_0; }; /* This function should be used after setting a port register only */ void mlx5_toggle_port_link(struct mlx5_core_dev *dev) { enum mlx5_port_status ps; mlx5_query_port_admin_status(dev, &ps); mlx5_set_port_status(dev, MLX5_PORT_DOWN); if (ps == MLX5_PORT_UP) mlx5_set_port_status(dev, MLX5_PORT_UP); } EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) { struct mlx5_reg_pcap in; struct mlx5_reg_pcap out; int err; memset(&in, 0, sizeof(in)); in.caps_127_96 = cpu_to_be32(caps); in.port_num = port_num; err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, sizeof(out), MLX5_REG_PCAP, 0, 1); return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port) { u32 in[MLX5_ST_SZ_DW(ptys_reg)]; int err; memset(in, 0, sizeof(in)); MLX5_SET(ptys_reg, in, local_port, local_port); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); err = mlx5_core_access_reg(dev, in, sizeof(in), ptys, ptys_size, MLX5_REG_PTYS, 0, 0); return err; } EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); if (err) return err; if (proto_mask == MLX5_PTYS_EN) *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); else *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap); int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, u8 *an_disable_cap, u8 *an_disable_status) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); if (err) return err; *an_disable_status = MLX5_GET(ptys_reg, out, an_disable_admin); *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg); int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable, u32 eth_proto_admin, int proto_mask) { u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; u8 an_disable_cap; u8 an_disable_status; int err; err = mlx5_query_port_autoneg(dev, proto_mask, &an_disable_cap, &an_disable_status); if (err) return err; if (!an_disable_cap) return -EPERM; MLX5_SET(ptys_reg, in, local_port, 1); MLX5_SET(ptys_reg, in, an_disable_admin, disable); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); if (proto_mask == MLX5_PTYS_EN) MLX5_SET(ptys_reg, in, eth_proto_admin, eth_proto_admin); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PTYS, 0, 1); return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_autoneg); int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); if (err) return err; if (proto_mask == MLX5_PTYS_EN) *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); else *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, u32 *proto_oper, u8 local_port) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, local_port); if (err) return err; *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); return 0; } EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask) { u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; int err; MLX5_SET(ptys_reg, in, local_port, 1); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); if (proto_mask == MLX5_PTYS_EN) MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); else MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PTYS, 0, 1); return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_proto); int mlx5_set_port_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) { u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)] = {0}; int err; MLX5_SET(paos_reg, in, local_port, 1); MLX5_SET(paos_reg, in, admin_status, status); MLX5_SET(paos_reg, in, ase, 1); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 1); return err; } int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) { u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)] = {0}; int err; MLX5_SET(paos_reg, in, local_port, 1); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 0); if (err) return err; *status = MLX5_GET(paos_reg, out, oper_status); return err; } int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status) { u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)]; int err; MLX5_SET(paos_reg, in, local_port, 1); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 0); if (err) return err; *status = MLX5_GET(paos_reg, out, admin_status); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, int *max_mtu, int *oper_mtu) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; int err; MLX5_SET(pmtu_reg, in, local_port, 1); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 0); if (err) return err; if (max_mtu) *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu); if (oper_mtu) *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu); if (admin_mtu) *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); return err; } int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; MLX5_SET(pmtu_reg, in, admin_mtu, mtu); MLX5_SET(pmtu_reg, in, local_port, 1); return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 1); } EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu) { return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL); } EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); int mlx5_set_port_pause_and_pfc(struct mlx5_core_dev *dev, u32 port, u8 rx_pause, u8 tx_pause, u8 pfc_en_rx, u8 pfc_en_tx) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; if (pfc_en_rx || pfc_en_tx) { /* PFC and global pauseframes are incompatible features */ if (tx_pause || rx_pause) return -EINVAL; } MLX5_SET(pfcc_reg, in, local_port, port); MLX5_SET(pfcc_reg, in, pptx, tx_pause); MLX5_SET(pfcc_reg, in, pprx, rx_pause); MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx); MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx); MLX5_SET(pfcc_reg, in, prio_mask_tx, pfc_en_tx); MLX5_SET(pfcc_reg, in, prio_mask_rx, pfc_en_rx); return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 1); } int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port, u32 *rx_pause, u32 *tx_pause) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; int err; MLX5_SET(pfcc_reg, in, local_port, port); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 0); if (err) return err; *rx_pause = MLX5_GET(pfcc_reg, out, pprx); *tx_pause = MLX5_GET(pfcc_reg, out, pptx); return 0; } int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; MLX5_SET(pfcc_reg, in, local_port, 1); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 0); if (err) return err; if (pfc_en_tx != NULL) *pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx); if (pfc_en_rx != NULL) *pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu) { return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu); } EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev) { u8 wol_supported = 0; if (MLX5_CAP_GEN(dev, wol_s)) wol_supported |= MLX5_WOL_SECURED_MAGIC; if (MLX5_CAP_GEN(dev, wol_g)) wol_supported |= MLX5_WOL_MAGIC; if (MLX5_CAP_GEN(dev, wol_a)) wol_supported |= MLX5_WOL_ARP; if (MLX5_CAP_GEN(dev, wol_b)) wol_supported |= MLX5_WOL_BROADCAST; if (MLX5_CAP_GEN(dev, wol_m)) wol_supported |= MLX5_WOL_MULTICAST; if (MLX5_CAP_GEN(dev, wol_u)) wol_supported |= MLX5_WOL_UNICAST; if (MLX5_CAP_GEN(dev, wol_p)) wol_supported |= MLX5_WOL_PHY_ACTIVITY; return wol_supported; } EXPORT_SYMBOL_GPL(mlx5_is_wol_supported); int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode) { u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0}; MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL); MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1); MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_set_wol); int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout) { u32 in[MLX5_ST_SZ_DW(query_delay_drop_params_in)] = {0}; u32 out[MLX5_ST_SZ_DW(query_delay_drop_params_out)] = {0}; int err = 0; MLX5_SET(query_delay_drop_params_in, in, opcode, MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; *timeout = MLX5_GET(query_delay_drop_params_out, out, delay_drop_timeout); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_dropless_mode); int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout) { u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0}; MLX5_SET(set_delay_drop_params_in, in, opcode, MLX5_CMD_OP_SET_DELAY_DROP_PARAMS); MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout, timeout); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_set_dropless_mode); int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, struct mlx5_pvlc_reg *pvlc, int write) { int sz = MLX5_ST_SZ_BYTES(pvlc_reg); u8 in[MLX5_ST_SZ_BYTES(pvlc_reg)] = {0}; u8 out[MLX5_ST_SZ_BYTES(pvlc_reg)] = {0}; int err; MLX5_SET(pvlc_reg, in, local_port, pvlc->local_port); if (write) MLX5_SET(pvlc_reg, in, vl_admin, pvlc->vl_admin); err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PVLC, 0, !!write); if (err) return err; if (!write) { pvlc->local_port = MLX5_GET(pvlc_reg, out, local_port); pvlc->vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap); pvlc->vl_admin = MLX5_GET(pvlc_reg, out, vl_admin); pvlc->vl_operational = MLX5_GET(pvlc_reg, out, vl_operational); } return 0; } EXPORT_SYMBOL_GPL(mlx5_core_access_pvlc); int mlx5_core_access_ptys(struct mlx5_core_dev *dev, struct mlx5_ptys_reg *ptys, int write) { int sz = MLX5_ST_SZ_BYTES(ptys_reg); void *out = NULL; void *in = NULL; int err; in = mlx5_vzalloc(sz); if (!in) return -ENOMEM; out = mlx5_vzalloc(sz); if (!out) { kfree(in); return -ENOMEM; } MLX5_SET(ptys_reg, in, local_port, ptys->local_port); MLX5_SET(ptys_reg, in, proto_mask, ptys->proto_mask); if (write) { MLX5_SET(ptys_reg, in, eth_proto_capability, ptys->eth_proto_cap); MLX5_SET(ptys_reg, in, ib_link_width_capability, ptys->ib_link_width_cap); MLX5_SET(ptys_reg, in, ib_proto_capability, ptys->ib_proto_cap); MLX5_SET(ptys_reg, in, eth_proto_admin, ptys->eth_proto_admin); MLX5_SET(ptys_reg, in, ib_link_width_admin, ptys->ib_link_width_admin); MLX5_SET(ptys_reg, in, ib_proto_admin, ptys->ib_proto_admin); MLX5_SET(ptys_reg, in, eth_proto_oper, ptys->eth_proto_oper); MLX5_SET(ptys_reg, in, ib_link_width_oper, ptys->ib_link_width_oper); MLX5_SET(ptys_reg, in, ib_proto_oper, ptys->ib_proto_oper); MLX5_SET(ptys_reg, in, eth_proto_lp_advertise, ptys->eth_proto_lp_advertise); } err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PTYS, 0, !!write); if (err) goto out; if (!write) { ptys->local_port = MLX5_GET(ptys_reg, out, local_port); ptys->proto_mask = MLX5_GET(ptys_reg, out, proto_mask); ptys->eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); ptys->ib_link_width_cap = MLX5_GET(ptys_reg, out, ib_link_width_capability); ptys->ib_proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); ptys->eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); ptys->ib_link_width_admin = MLX5_GET(ptys_reg, out, ib_link_width_admin); ptys->ib_proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); ptys->eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); ptys->ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); ptys->ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); ptys->eth_proto_lp_advertise = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); } out: kvfree(in); kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_ptys); static int mtu_to_ib_mtu(int mtu) { switch (mtu) { case 256: return 1; case 512: return 2; case 1024: return 3; case 2048: return 4; case 4096: return 5; default: printf("mlx5_core: WARN: ""invalid mtu\n"); return -1; } } int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, struct mlx5_pmtu_reg *pmtu, int write) { int sz = MLX5_ST_SZ_BYTES(pmtu_reg); void *out = NULL; void *in = NULL; int err; in = mlx5_vzalloc(sz); if (!in) return -ENOMEM; out = mlx5_vzalloc(sz); if (!out) { kfree(in); return -ENOMEM; } MLX5_SET(pmtu_reg, in, local_port, pmtu->local_port); if (write) MLX5_SET(pmtu_reg, in, admin_mtu, pmtu->admin_mtu); err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PMTU, 0, !!write); if (err) goto out; if (!write) { pmtu->local_port = MLX5_GET(pmtu_reg, out, local_port); pmtu->max_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out, max_mtu)); pmtu->admin_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out, admin_mtu)); pmtu->oper_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out, oper_mtu)); } out: kvfree(in); kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_pmtu); int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) { u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; int lane = 0; int err; MLX5_SET(pmlp_reg, in, local_port, 1); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMLP, 0, 0); if (err) return err; lane = MLX5_GET(pmlp_reg, out, lane0_module_mapping); *module_num = lane & MLX5_EEPROM_IDENTIFIER_BYTE_MASK; return 0; } EXPORT_SYMBOL_GPL(mlx5_query_module_num); int mlx5_query_eeprom(struct mlx5_core_dev *dev, int i2c_addr, int page_num, int device_addr, int size, int module_num, u32 *data, int *size_read) { u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(mcia_reg)] = {0}; u32 *ptr = (u32 *)MLX5_ADDR_OF(mcia_reg, out, dword_0); int status; int err; size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr); MLX5_SET(mcia_reg, in, page_number, page_num); MLX5_SET(mcia_reg, in, device_address, device_addr); MLX5_SET(mcia_reg, in, size, size); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MCIA, 0, 0); if (err) return err; status = MLX5_GET(mcia_reg, out, status); if (status) return status; memcpy(data, ptr, size); *size_read = size; return 0; } EXPORT_SYMBOL_GPL(mlx5_query_eeprom); int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port) { u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; int err; MLX5_SET(add_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) { mlx5_core_err(dev, "Failed %s, port %u, err - %d", mlx5_command_str(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT), port, err); } return err; } int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port) { u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; int err; MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) { mlx5_core_err(dev, "Failed %s, port %u, err - %d", mlx5_command_str(MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT), port, err); } return err; } int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode) { u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0}; u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0}; int err; MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode); return err; } EXPORT_SYMBOL_GPL(mlx5_query_wol); int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int *is_enable) { u32 in[MLX5_ST_SZ_DW(query_cong_status_in)] = {0}; u32 out[MLX5_ST_SZ_DW(query_cong_status_out)] = {0}; int err; *is_enable = 0; MLX5_SET(query_cong_status_in, in, opcode, MLX5_CMD_OP_QUERY_CONG_STATUS); MLX5_SET(query_cong_status_in, in, cong_protocol, protocol); MLX5_SET(query_cong_status_in, in, priority, priority); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) *is_enable = MLX5_GET(query_cong_status_out, out, enable); return err; } int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int enable) { u32 in[MLX5_ST_SZ_DW(modify_cong_status_in)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)] = {0}; MLX5_SET(modify_cong_status_in, in, opcode, MLX5_CMD_OP_MODIFY_CONG_STATUS); MLX5_SET(modify_cong_status_in, in, cong_protocol, protocol); MLX5_SET(modify_cong_status_in, in, priority, priority); MLX5_SET(modify_cong_status_in, in, enable, enable); return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, void *out, int out_size) { u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {0}; MLX5_SET(query_cong_params_in, in, opcode, MLX5_CMD_OP_QUERY_CONG_PARAMS); MLX5_SET(query_cong_params_in, in, cong_protocol, protocol); return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); } static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int outlen) { u32 in[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -ENOTSUPP; memset(in, 0, sizeof(in)); return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, MLX5_REG_QETCR, 0, 0); } int mlx5_max_tc(struct mlx5_core_dev *mdev) { u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; return num_tc - 1; } EXPORT_SYMBOL_GPL(mlx5_max_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { u32 out[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -ENOTSUPP; return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), MLX5_REG_QETCR, 0, 1); } int mlx5_query_port_tc_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_units) { u32 out[MLX5_ST_SZ_DW(qetc_reg)]; void *ets_tcn_conf; int err; int i; err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); if (err) return err; for (i = 0; i <= mlx5_max_tc(mdev); i++) { ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]); max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value); max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units); } return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_tc_rate_limit); int mlx5_modify_port_tc_rate_limit(struct mlx5_core_dev *mdev, const u8 *max_bw_value, const u8 *max_bw_units) { u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {}; void *ets_tcn_conf; int i; MLX5_SET(qetc_reg, in, port_number, 1); for (i = 0; i <= mlx5_max_tc(mdev); i++) { ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]); MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1); MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units, max_bw_units[i]); MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value, max_bw_value[i]); } return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } EXPORT_SYMBOL_GPL(mlx5_modify_port_tc_rate_limit); int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, u8 prio, u8 *tc) { u32 in[MLX5_ST_SZ_DW(qtct_reg)]; u32 out[MLX5_ST_SZ_DW(qtct_reg)]; int err; memset(in, 0, sizeof(in)); memset(out, 0, sizeof(out)); MLX5_SET(qtct_reg, in, port_number, 1); MLX5_SET(qtct_reg, in, prio, prio); err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_QTCT, 0, 0); if (!err) *tc = MLX5_GET(qtct_reg, out, tclass); return err; } EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, int prio_index, const u8 prio_tc) { u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {}; u32 out[MLX5_ST_SZ_DW(qtct_reg)]; int err; if (prio_tc > mlx5_max_tc(mdev)) return -EINVAL; MLX5_SET(qtct_reg, in, prio, prio_index); MLX5_SET(qtct_reg, in, tclass, prio_tc); err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_QTCT, 0, 1); return (err); } EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, const u8 *tc_group) { u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {}; int i; for (i = 0; i <= mlx5_max_tc(mdev); i++) { MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1); MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]); } return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, u8 tc, u8 *tc_group) { u32 out[MLX5_ST_SZ_DW(qetc_reg)]; void *ets_tcn_conf; int err; err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); if (err) return err; ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[tc]); *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, group); return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, const u8 *tc_bw) { u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {}; int i; for (i = 0; i <= mlx5_max_tc(mdev); i++) { MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1); MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]); } return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc); int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *bw_pct) { u32 out[MLX5_ST_SZ_DW(qetc_reg)]; void *ets_tcn_conf; int err; int i; err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); if (err) return err; for (i = 0; i <= mlx5_max_tc(mdev); i++) { ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]); bw_pct[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, bw_allocation); } return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc); int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, void *in, int in_size) { u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = {0}; MLX5_SET(modify_cong_params_in, in, opcode, MLX5_CMD_OP_MODIFY_CONG_PARAMS); return mlx5_cmd_exec(mdev, in, in_size, out, sizeof(out)); } int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, void *out, int out_size) { u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {0}; MLX5_SET(query_cong_statistics_in, in, opcode, MLX5_CMD_OP_QUERY_CONG_STATISTICS); MLX5_SET(query_cong_statistics_in, in, clear, clear); return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); } int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, int in_size) { u32 out[MLX5_ST_SZ_DW(set_diagnostic_params_out)] = {0}; MLX5_SET(set_diagnostic_params_in, in, opcode, MLX5_CMD_OP_SET_DIAGNOSTICS); return mlx5_cmd_exec(mdev, in, in_size, out, sizeof(out)); } int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, u8 num_of_samples, u16 sample_index, void *out, int out_size) { u32 in[MLX5_ST_SZ_DW(query_diagnostic_counters_in)] = {0}; MLX5_SET(query_diagnostic_counters_in, in, opcode, MLX5_CMD_OP_QUERY_DIAGNOSTICS); MLX5_SET(query_diagnostic_counters_in, in, num_of_samples, num_of_samples); MLX5_SET(query_diagnostic_counters_in, in, sample_index, sample_index); return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); } int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state) { u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {}; u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {}; int err; MLX5_SET(qpts_reg, in, local_port, 1); MLX5_SET(qpts_reg, in, trust_state, trust_state); err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_QPTS, 0, 1); return err; } int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state) { u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {}; u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {}; int err; MLX5_SET(qpts_reg, in, local_port, 1); err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_QPTS, 0, 0); if (!err) *trust_state = MLX5_GET(qpts_reg, out, trust_state); return err; } int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, const u8 *dscp2prio) { int sz = MLX5_ST_SZ_BYTES(qpdpm_reg); void *qpdpm_dscp; void *out; void *in; int err; int i; in = kzalloc(sz, GFP_KERNEL); out = kzalloc(sz, GFP_KERNEL); if (!in || !out) { err = -ENOMEM; goto out; } MLX5_SET(qpdpm_reg, in, local_port, 1); err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0); if (err) goto out; memcpy(in, out, sz); MLX5_SET(qpdpm_reg, in, local_port, 1); /* Update the corresponding dscp entry */ for (i = 0; i < MLX5_MAX_SUPPORTED_DSCP; i++) { qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, in, dscp[i]); MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, prio, dscp2prio[i]); MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1); } err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1); out: kfree(in); kfree(out); return err; } int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio) { int sz = MLX5_ST_SZ_BYTES(qpdpm_reg); void *qpdpm_dscp; void *out; void *in; int err; int i; in = kzalloc(sz, GFP_KERNEL); out = kzalloc(sz, GFP_KERNEL); if (!in || !out) { err = -ENOMEM; goto out; } MLX5_SET(qpdpm_reg, in, local_port, 1); err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0); if (err) goto out; for (i = 0; i < MLX5_MAX_SUPPORTED_DSCP; i++) { qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, out, dscp[i]); dscp2prio[i] = MLX5_GET16(qpdpm_dscp_reg, qpdpm_dscp, prio); } out: kfree(in); kfree(out); return err; } + +int mlx5_query_pddr_range_info(struct mlx5_core_dev *mdev, u8 local_port, u8 *is_er_type) +{ + u32 pddr_reg[MLX5_ST_SZ_DW(pddr_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(pddr_reg); + int error; + u8 ecc; + u8 ci; + + MLX5_SET(pddr_reg, pddr_reg, local_port, local_port); + MLX5_SET(pddr_reg, pddr_reg, page_select, 3 /* module info page */); + + error = mlx5_core_access_reg(mdev, pddr_reg, sz, pddr_reg, sz, + MLX5_ACCESS_REG_SUMMARY_CTRL_ID_PDDR, 0, 0); + if (error != 0) + return (error); + + ecc = MLX5_GET(pddr_reg, pddr_reg, page_data.pddr_module_info.ethernet_compliance_code); + ci = MLX5_GET(pddr_reg, pddr_reg, page_data.pddr_module_info.cable_identifier); + + switch (ci) { + case 0: /* QSFP28 */ + case 1: /* QSFP+ */ + *is_er_type = 0; + break; + case 2: /* SFP28/SFP+ */ + case 3: /* QSA (QSFP->SFP) */ + *is_er_type = ((ecc & (1 << 7)) != 0); + break; + default: + *is_er_type = 0; + break; + } + return (0); +} +EXPORT_SYMBOL_GPL(mlx5_query_pddr_range_info); Index: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c =================================================================== --- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (revision 341580) +++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (revision 341581) @@ -1,3909 +1,3953 @@ /*- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "en.h" #include #include #ifndef ETH_DRIVER_VERSION #define ETH_DRIVER_VERSION "3.4.2" #endif static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); struct mlx5e_channel_param { struct mlx5e_rq_param rq; struct mlx5e_sq_param sq; struct mlx5e_cq_param rx_cq; struct mlx5e_cq_param tx_cq; }; static const struct { u32 subtype; u64 baudrate; } mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = { [MLX5E_1000BASE_CX_SGMII] = { .subtype = IFM_1000_CX_SGMII, .baudrate = IF_Mbps(1000ULL), }, [MLX5E_1000BASE_KX] = { .subtype = IFM_1000_KX, .baudrate = IF_Mbps(1000ULL), }, [MLX5E_10GBASE_CX4] = { .subtype = IFM_10G_CX4, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_KX4] = { .subtype = IFM_10G_KX4, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_KR] = { .subtype = IFM_10G_KR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_20GBASE_KR2] = { .subtype = IFM_20G_KR2, .baudrate = IF_Gbps(20ULL), }, [MLX5E_40GBASE_CR4] = { .subtype = IFM_40G_CR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_KR4] = { .subtype = IFM_40G_KR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_56GBASE_R4] = { .subtype = IFM_56G_R4, .baudrate = IF_Gbps(56ULL), }, [MLX5E_10GBASE_CR] = { .subtype = IFM_10G_CR1, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_SR] = { .subtype = IFM_10G_SR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_ER] = { .subtype = IFM_10G_ER, .baudrate = IF_Gbps(10ULL), }, [MLX5E_40GBASE_SR4] = { .subtype = IFM_40G_SR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_LR4] = { .subtype = IFM_40G_LR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_100GBASE_CR4] = { .subtype = IFM_100G_CR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GBASE_SR4] = { .subtype = IFM_100G_SR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GBASE_KR4] = { .subtype = IFM_100G_KR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GBASE_LR4] = { .subtype = IFM_100G_LR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100BASE_TX] = { .subtype = IFM_100_TX, .baudrate = IF_Mbps(100ULL), }, [MLX5E_1000BASE_T] = { .subtype = IFM_1000_T, .baudrate = IF_Mbps(1000ULL), }, [MLX5E_10GBASE_T] = { .subtype = IFM_10G_T, .baudrate = IF_Gbps(10ULL), }, [MLX5E_25GBASE_CR] = { .subtype = IFM_25G_CR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GBASE_KR] = { .subtype = IFM_25G_KR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GBASE_SR] = { .subtype = IFM_25G_SR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_50GBASE_CR2] = { .subtype = IFM_50G_CR2, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GBASE_KR2] = { .subtype = IFM_50G_KR2, .baudrate = IF_Gbps(50ULL), }, }; MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); SYSCTL_DECL(_hw_mlx5); static void mlx5e_update_carrier(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; u32 eth_proto_oper; int error; u8 port_state; + u8 is_er_type; u8 i; port_state = mlx5_query_vport_state(mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); if (port_state == VPORT_STATE_UP) { priv->media_status_last |= IFM_ACTIVE; } else { priv->media_status_last &= ~IFM_ACTIVE; priv->media_active_last = IFM_ETHER; if_link_state_change(priv->ifp, LINK_STATE_DOWN); return; } error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); if (error) { priv->media_active_last = IFM_ETHER; priv->ifp->if_baudrate = 1; if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n", __func__, error); return; } eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) { if (mlx5e_mode_table[i].baudrate == 0) continue; if (MLX5E_PROT_MASK(i) & eth_proto_oper) { + u32 subtype = mlx5e_mode_table[i].subtype; + priv->ifp->if_baudrate = mlx5e_mode_table[i].baudrate; - priv->media_active_last = - mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX; + + switch (subtype) { + case IFM_10G_ER: + error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); + if (error != 0) { + if_printf(priv->ifp, "%s: query port pddr failed: %d\n", + __func__, error); + } + if (error != 0 || is_er_type == 0) + subtype = IFM_10G_LR; + break; + case IFM_40G_LR4: + error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); + if (error != 0) { + if_printf(priv->ifp, "%s: query port pddr failed: %d\n", + __func__, error); + } + if (error == 0 && is_er_type != 0) + subtype = IFM_40G_ER4; + break; + } + priv->media_active_last = subtype | IFM_ETHER | IFM_FDX; + break; } } if_link_state_change(priv->ifp, LINK_STATE_UP); } static void mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) { struct mlx5e_priv *priv = dev->if_softc; ifmr->ifm_status = priv->media_status_last; ifmr->ifm_active = priv->media_active_last | (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); } static u32 mlx5e_find_link_mode(u32 subtype) { u32 i; u32 link_mode = 0; + switch (subtype) { + case IFM_10G_LR: + subtype = IFM_10G_ER; + break; + case IFM_40G_ER4: + subtype = IFM_40G_LR4; + break; + } + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { if (mlx5e_mode_table[i].baudrate == 0) continue; if (mlx5e_mode_table[i].subtype == subtype) link_mode |= MLX5E_PROT_MASK(i); } return (link_mode); } static int mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) { return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, priv->params.rx_pauseframe_control, priv->params.tx_pauseframe_control, priv->params.rx_priority_flow_control, priv->params.tx_priority_flow_control)); } static int mlx5e_set_port_pfc(struct mlx5e_priv *priv) { int error; if (priv->params.rx_pauseframe_control || priv->params.tx_pauseframe_control) { if_printf(priv->ifp, "Global pauseframes must be disabled before enabling PFC.\n"); error = -EINVAL; } else { error = mlx5e_set_port_pause_and_pfc(priv); } return (error); } static int mlx5e_media_change(struct ifnet *dev) { struct mlx5e_priv *priv = dev->if_softc; struct mlx5_core_dev *mdev = priv->mdev; u32 eth_proto_cap; u32 link_mode; int was_opened; int locked; int error; locked = PRIV_LOCKED(priv); if (!locked) PRIV_LOCK(priv); if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { error = EINVAL; goto done; } link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media)); /* query supported capabilities */ error = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); if (error != 0) { if_printf(dev, "Query port media capability failed\n"); goto done; } /* check for autoselect */ if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { link_mode = eth_proto_cap; if (link_mode == 0) { if_printf(dev, "Port media capability is zero\n"); error = EINVAL; goto done; } } else { link_mode = link_mode & eth_proto_cap; if (link_mode == 0) { if_printf(dev, "Not supported link mode requested\n"); error = EINVAL; goto done; } } if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { /* check if PFC is enabled */ if (priv->params.rx_priority_flow_control || priv->params.tx_priority_flow_control) { if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n"); error = EINVAL; goto done; } } /* update pauseframe control bits */ priv->params.rx_pauseframe_control = (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; priv->params.tx_pauseframe_control = (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; /* check if device is opened */ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); /* reconfigure the hardware */ mlx5_set_port_status(mdev, MLX5_PORT_DOWN); mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN); error = -mlx5e_set_port_pause_and_pfc(priv); if (was_opened) mlx5_set_port_status(mdev, MLX5_PORT_UP); done: if (!locked) PRIV_UNLOCK(priv); return (error); } static void mlx5e_update_carrier_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, update_carrier_work); PRIV_LOCK(priv); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_update_carrier(priv); PRIV_UNLOCK(priv); } /* * This function reads the physical port counters from the firmware * using a pre-defined layout defined by various MLX5E_PPORT_XXX() * macros. The output is converted from big-endian 64-bit values into * host endian ones and stored in the "priv->stats.pport" structure. */ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_pport_stats *s = &priv->stats.pport; struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; u32 *in; u32 *out; const u64 *ptr; unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); unsigned x; unsigned y; unsigned z; /* allocate firmware request structures */ in = mlx5_vzalloc(sz); out = mlx5_vzalloc(sz); if (in == NULL || out == NULL) goto free_out; /* * Get pointer to the 64-bit counter set which is located at a * fixed offset in the output firmware request structure: */ ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); MLX5_SET(ppcnt_reg, in, local_port, 1); /* read IEEE802_3 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) s->arg[y] = be64toh(ptr[x]); /* read RFC2819 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) s->arg[y] = be64toh(ptr[x]); for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read RFC2863 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read physical layer stats counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read per-priority counters */ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); /* iterate all the priorities */ for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { MLX5_SET(ppcnt_reg, in, prio_tc, z); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); /* read per priority stats counter group using predefined counter layout */ for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) s->arg[y] = be64toh(ptr[x]); } free_out: /* free firmware request structures */ kvfree(in); kvfree(out); } /* * This function is called regularly to collect all statistics * counters from the firmware. The values can be viewed through the * sysctl interface. Execution is serialized using the priv's global * configuration lock. */ static void mlx5e_update_stats_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, update_stats_work); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_vport_stats *s = &priv->stats.vport; struct mlx5e_rq_stats *rq_stats; struct mlx5e_sq_stats *sq_stats; struct buf_ring *sq_br; #if (__FreeBSD_version < 1100000) struct ifnet *ifp = priv->ifp; #endif u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u64 tso_packets = 0; u64 tso_bytes = 0; u64 tx_queue_dropped = 0; u64 tx_defragged = 0; u64 tx_offload_none = 0; u64 lro_packets = 0; u64 lro_bytes = 0; u64 sw_lro_queued = 0; u64 sw_lro_flushed = 0; u64 rx_csum_none = 0; u64 rx_wqe_err = 0; u32 rx_out_of_buffer = 0; int i; int j; PRIV_LOCK(priv); out = mlx5_vzalloc(outlen); if (out == NULL) goto free_out; if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) goto free_out; /* Collect firts the SW counters and then HW for consistency */ for (i = 0; i < priv->params.num_channels; i++) { struct mlx5e_rq *rq = &priv->channel[i]->rq; rq_stats = &priv->channel[i]->rq.stats; /* collect stats from LRO */ rq_stats->sw_lro_queued = rq->lro.lro_queued; rq_stats->sw_lro_flushed = rq->lro.lro_flushed; sw_lro_queued += rq_stats->sw_lro_queued; sw_lro_flushed += rq_stats->sw_lro_flushed; lro_packets += rq_stats->lro_packets; lro_bytes += rq_stats->lro_bytes; rx_csum_none += rq_stats->csum_none; rx_wqe_err += rq_stats->wqe_err; for (j = 0; j < priv->num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; sq_br = priv->channel[i]->sq[j].br; tso_packets += sq_stats->tso_packets; tso_bytes += sq_stats->tso_bytes; tx_queue_dropped += sq_stats->dropped; if (sq_br != NULL) tx_queue_dropped += sq_br->br_drops; tx_defragged += sq_stats->defragged; tx_offload_none += sq_stats->csum_offload_none; } } s->tx_jumbo_packets = priv->stats.port_stats_debug.p1519to2047octets + priv->stats.port_stats_debug.p2048to4095octets + priv->stats.port_stats_debug.p4096to8191octets + priv->stats.port_stats_debug.p8192to10239octets; /* update counters */ s->tso_packets = tso_packets; s->tso_bytes = tso_bytes; s->tx_queue_dropped = tx_queue_dropped; s->tx_defragged = tx_defragged; s->lro_packets = lro_packets; s->lro_bytes = lro_bytes; s->sw_lro_queued = sw_lro_queued; s->sw_lro_flushed = sw_lro_flushed; s->rx_csum_none = rx_csum_none; s->rx_wqe_err = rx_wqe_err; /* HW counters */ memset(in, 0, sizeof(in)); MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); MLX5_SET(query_vport_counter_in, in, other_vport, 0); memset(out, 0, outlen); /* get number of out-of-buffer drops first */ if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, &rx_out_of_buffer)) goto free_out; /* accumulate difference into a 64-bit counter */ s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev); s->rx_out_of_buffer_prev = rx_out_of_buffer; /* get port statistics */ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) goto free_out; #define MLX5_GET_CTR(out, x) \ MLX5_GET64(query_vport_counter_out, out, x) s->rx_error_packets = MLX5_GET_CTR(out, received_errors.packets); s->rx_error_bytes = MLX5_GET_CTR(out, received_errors.octets); s->tx_error_packets = MLX5_GET_CTR(out, transmit_errors.packets); s->tx_error_bytes = MLX5_GET_CTR(out, transmit_errors.octets); s->rx_unicast_packets = MLX5_GET_CTR(out, received_eth_unicast.packets); s->rx_unicast_bytes = MLX5_GET_CTR(out, received_eth_unicast.octets); s->tx_unicast_packets = MLX5_GET_CTR(out, transmitted_eth_unicast.packets); s->tx_unicast_bytes = MLX5_GET_CTR(out, transmitted_eth_unicast.octets); s->rx_multicast_packets = MLX5_GET_CTR(out, received_eth_multicast.packets); s->rx_multicast_bytes = MLX5_GET_CTR(out, received_eth_multicast.octets); s->tx_multicast_packets = MLX5_GET_CTR(out, transmitted_eth_multicast.packets); s->tx_multicast_bytes = MLX5_GET_CTR(out, transmitted_eth_multicast.octets); s->rx_broadcast_packets = MLX5_GET_CTR(out, received_eth_broadcast.packets); s->rx_broadcast_bytes = MLX5_GET_CTR(out, received_eth_broadcast.octets); s->tx_broadcast_packets = MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); s->tx_broadcast_bytes = MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); s->rx_packets = s->rx_unicast_packets + s->rx_multicast_packets + s->rx_broadcast_packets - s->rx_out_of_buffer; s->rx_bytes = s->rx_unicast_bytes + s->rx_multicast_bytes + s->rx_broadcast_bytes; s->tx_packets = s->tx_unicast_packets + s->tx_multicast_packets + s->tx_broadcast_packets; s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + s->tx_broadcast_bytes; /* Update calculated offload counters */ s->tx_csum_offload = s->tx_packets - tx_offload_none; s->rx_csum_good = s->rx_packets - s->rx_csum_none; /* Get physical port counters */ mlx5e_update_pport_counters(priv); #if (__FreeBSD_version < 1100000) /* no get_counters interface in fbsd 10 */ ifp->if_ipackets = s->rx_packets; ifp->if_ierrors = s->rx_error_packets + priv->stats.pport.alignment_err + priv->stats.pport.check_seq_err + priv->stats.pport.crc_align_errors + priv->stats.pport.in_range_len_errors + priv->stats.pport.jabbers + priv->stats.pport.out_of_range_len + priv->stats.pport.oversize_pkts + priv->stats.pport.symbol_err + priv->stats.pport.too_long_errors + priv->stats.pport.undersize_pkts + priv->stats.pport.unsupported_op_rx; ifp->if_iqdrops = s->rx_out_of_buffer + priv->stats.pport.drop_events; ifp->if_opackets = s->tx_packets; ifp->if_oerrors = s->tx_error_packets; ifp->if_snd.ifq_drops = s->tx_queue_dropped; ifp->if_ibytes = s->rx_bytes; ifp->if_obytes = s->tx_bytes; ifp->if_collisions = priv->stats.pport.collisions; #endif free_out: kvfree(out); /* Update diagnostics, if any */ if (priv->params_ethtool.diag_pci_enable || priv->params_ethtool.diag_general_enable) { int error = mlx5_core_get_diagnostics_full(mdev, priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); if (error != 0) if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error); } PRIV_UNLOCK(priv); } static void mlx5e_update_stats(void *arg) { struct mlx5e_priv *priv = arg; queue_work(priv->wq, &priv->update_stats_work); callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); } static void mlx5e_async_event_sub(struct mlx5e_priv *priv, enum mlx5_dev_event event) { switch (event) { case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; default: break; } } static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; mtx_lock(&priv->async_events_mtx); if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) mlx5e_async_event_sub(priv, event); mtx_unlock(&priv->async_events_mtx); } static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); } static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { mtx_lock(&priv->async_events_mtx); clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); mtx_unlock(&priv->async_events_mtx); } static void mlx5e_calibration_callout(void *arg); static int mlx5e_calibration_duration = 20; static int mlx5e_fast_calibration = 1; static int mlx5e_normal_calibration = 30; static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0, "MLX5 timestamp calibration parameteres"); SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN, &mlx5e_calibration_duration, 0, "Duration of initial calibration"); SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN, &mlx5e_fast_calibration, 0, "Recalibration interval during initial calibration"); SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN, &mlx5e_normal_calibration, 0, "Recalibration interval during normal operations"); /* * Ignites the calibration process. */ static void mlx5e_reset_calibration_callout(struct mlx5e_priv *priv) { if (priv->clbr_done == 0) mlx5e_calibration_callout(priv); else callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done < mlx5e_calibration_duration ? mlx5e_fast_calibration : mlx5e_normal_calibration) * hz, mlx5e_calibration_callout, priv); } static uint64_t mlx5e_timespec2usec(const struct timespec *ts) { return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec); } static uint64_t mlx5e_hw_clock(struct mlx5e_priv *priv) { struct mlx5_init_seg *iseg; uint32_t hw_h, hw_h1, hw_l; iseg = priv->mdev->iseg; do { hw_h = ioread32be(&iseg->internal_timer_h); hw_l = ioread32be(&iseg->internal_timer_l); hw_h1 = ioread32be(&iseg->internal_timer_h); } while (hw_h1 != hw_h); return (((uint64_t)hw_h << 32) | hw_l); } /* * The calibration callout, it runs either in the context of the * thread which enables calibration, or in callout. It takes the * snapshot of system and adapter clocks, then advances the pointers to * the calibration point to allow rx path to read the consistent data * lockless. */ static void mlx5e_calibration_callout(void *arg) { struct mlx5e_priv *priv; struct mlx5e_clbr_point *next, *curr; struct timespec ts; int clbr_curr_next; priv = arg; curr = &priv->clbr_points[priv->clbr_curr]; clbr_curr_next = priv->clbr_curr + 1; if (clbr_curr_next >= nitems(priv->clbr_points)) clbr_curr_next = 0; next = &priv->clbr_points[clbr_curr_next]; next->base_prev = curr->base_curr; next->clbr_hw_prev = curr->clbr_hw_curr; next->clbr_hw_curr = mlx5e_hw_clock(priv); if (((next->clbr_hw_curr - curr->clbr_hw_prev) >> MLX5E_TSTMP_PREC) == 0) { if_printf(priv->ifp, "HW failed tstmp frozen %#jx %#jx," "disabling\n", next->clbr_hw_curr, curr->clbr_hw_prev); priv->clbr_done = 0; return; } nanouptime(&ts); next->base_curr = mlx5e_timespec2usec(&ts); curr->clbr_gen = 0; atomic_thread_fence_rel(); priv->clbr_curr = clbr_curr_next; atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen)); if (priv->clbr_done < mlx5e_calibration_duration) priv->clbr_done++; mlx5e_reset_calibration_callout(priv); } static const char *mlx5e_rq_stats_desc[] = { MLX5E_RQ_STATS(MLX5E_STATS_DESC) }; static int mlx5e_create_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; char buffer[16]; void *rqc = param->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); int wq_sz; int err; int i; u32 nsegs, wqe_sz; err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); if (err != 0) goto done; /* Create DMA descriptor TAG */ if ((err = -bus_dma_tag_create( bus_get_dma_tag(mdev->pdev->dev.bsddev), 1, /* any alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ nsegs, /* nsegments */ nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &rq->dma_tag))) goto done; err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); if (err) goto err_free_dma_tag; rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); if (err != 0) goto err_rq_wq_destroy; wq_sz = mlx5_wq_ll_get_size(&rq->wq); err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz); if (err) goto err_rq_wq_destroy; rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); for (i = 0; i != wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); #if (MLX5E_MAX_RX_SEGS == 1) uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; #else int j; #endif err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); if (err != 0) { while (i--) bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); goto err_rq_mbuf_free; } /* set value for constant fields */ #if (MLX5E_MAX_RX_SEGS == 1) wqe->data[0].lkey = c->mkey_be; wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING); #else for (j = 0; j < rq->nsegs; j++) wqe->data[j].lkey = c->mkey_be; #endif } rq->ifp = c->ifp; rq->channel = c; rq->ix = c->ix; snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, rq->stats.arg); return (0); err_rq_mbuf_free: free(rq->mbuf, M_MLX5EN); tcp_lro_free(&rq->lro); err_rq_wq_destroy: mlx5_wq_destroy(&rq->wq_ctrl); err_free_dma_tag: bus_dma_tag_destroy(rq->dma_tag); done: return (err); } static void mlx5e_destroy_rq(struct mlx5e_rq *rq) { int wq_sz; int i; /* destroy all sysctl nodes */ sysctl_ctx_free(&rq->stats.ctx); /* free leftover LRO packets, if any */ tcp_lro_free(&rq->lro); wq_sz = mlx5_wq_ll_get_size(&rq->wq); for (i = 0; i != wq_sz; i++) { if (rq->mbuf[i].mbuf != NULL) { bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); m_freem(rq->mbuf[i].mbuf); } bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); } free(rq->mbuf, M_MLX5EN); mlx5_wq_destroy(&rq->wq_ctrl); } static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *rqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); wq = MLX5_ADDR_OF(rqc, rqc, wq); memcpy(rqc, param->rqc, sizeof(param->rqc)); MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); if (priv->counter_set_id >= 0) MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); mlx5_fill_page_array(&rq->wq_ctrl.buf, (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); kvfree(in); return (err); } static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *rqc; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); MLX5_SET(modify_rq_in, in, rqn, rq->rqn); MLX5_SET(modify_rq_in, in, rq_state, curr_state); MLX5_SET(rqc, rqc, state, next_state); err = mlx5_core_modify_rq(mdev, in, inlen); kvfree(in); return (err); } static void mlx5e_disable_rq(struct mlx5e_rq *rq) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_rq(mdev, rq->rqn); } static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_wq_ll *wq = &rq->wq; int i; for (i = 0; i < 1000; i++) { if (wq->cur_sz >= priv->params.min_rx_wqes) return (0); msleep(4); } return (-ETIMEDOUT); } static int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { int err; err = mlx5e_create_rq(c, param, rq); if (err) return (err); err = mlx5e_enable_rq(rq, param); if (err) goto err_destroy_rq; err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) goto err_disable_rq; c->rq.enabled = 1; return (0); err_disable_rq: mlx5e_disable_rq(rq); err_destroy_rq: mlx5e_destroy_rq(rq); return (err); } static void mlx5e_close_rq(struct mlx5e_rq *rq) { mtx_lock(&rq->mtx); rq->enabled = 0; callout_stop(&rq->watchdog); mtx_unlock(&rq->mtx); callout_drain(&rq->watchdog); mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); } static void mlx5e_close_rq_wait(struct mlx5e_rq *rq) { struct mlx5_core_dev *mdev = rq->channel->priv->mdev; /* wait till RQ is empty */ while (!mlx5_wq_ll_is_empty(&rq->wq) && (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) { msleep(4); rq->cq.mcq.comp(&rq->cq.mcq); } mlx5e_disable_rq(rq); mlx5e_destroy_rq(rq); } void mlx5e_free_sq_db(struct mlx5e_sq *sq) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int x; for (x = 0; x != wq_sz; x++) bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); free(sq->mbuf, M_MLX5EN); } int mlx5e_alloc_sq_db(struct mlx5e_sq *sq) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int err; int x; sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); /* Create DMA descriptor MAPs */ for (x = 0; x != wq_sz; x++) { err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); if (err != 0) { while (x--) bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); free(sq->mbuf, M_MLX5EN); return (err); } } return (0); } static const char *mlx5e_sq_stats_desc[] = { MLX5E_SQ_STATS(MLX5E_STATS_DESC) }; void mlx5e_update_sq_inline(struct mlx5e_sq *sq) { sq->max_inline = sq->priv->params.tx_max_inline; sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; /* * Check if trust state is DSCP or if inline mode is NONE which * indicates CX-5 or newer hardware. */ if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; else sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; } else { sq->min_insert_caps = 0; } } static void mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) { int i; for (i = 0; i != c->num_tc; i++) { mtx_lock(&c->sq[i].lock); mlx5e_update_sq_inline(&c->sq[i]); mtx_unlock(&c->sq[i].lock); } } void mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) { int i; /* check if channels are closed */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return; for (i = 0; i < priv->params.num_channels; i++) mlx5e_refresh_sq_inline_sub(priv, priv->channel[i]); } static int mlx5e_create_sq(struct mlx5e_channel *c, int tc, struct mlx5e_sq_param *param, struct mlx5e_sq *sq) { struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; char buffer[16]; void *sqc = param->sqc; void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); #ifdef RSS cpuset_t cpu_mask; int cpu_id; #endif int err; /* Create DMA descriptor TAG */ if ((err = -bus_dma_tag_create( bus_get_dma_tag(mdev->pdev->dev.bsddev), 1, /* any alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sq->dma_tag))) goto done; err = mlx5_alloc_map_uar(mdev, &sq->uar); if (err) goto err_free_dma_tag; err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; err = mlx5e_alloc_sq_db(sq); if (err) goto err_sq_wq_destroy; sq->mkey_be = c->mkey_be; sq->ifp = priv->ifp; sq->priv = priv; sq->tc = tc; mlx5e_update_sq_inline(sq); snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, sq->stats.arg); return (0); err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); err_unmap_free_uar: mlx5_unmap_free_uar(mdev, &sq->uar); err_free_dma_tag: bus_dma_tag_destroy(sq->dma_tag); done: return (err); } static void mlx5e_destroy_sq(struct mlx5e_sq *sq) { /* destroy all sysctl nodes */ sysctl_ctx_free(&sq->stats.ctx); mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); } int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, int tis_num) { void *in; void *sqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * sq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); wq = MLX5_ADDR_OF(sqc, sqc, wq); memcpy(sqc, param->sqc, sizeof(param->sqc)); MLX5_SET(sqc, sqc, tis_num_0, tis_num); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); kvfree(in); return (err); } int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) { void *in; void *sqc; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); MLX5_SET(modify_sq_in, in, sqn, sq->sqn); MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); kvfree(in); return (err); } void mlx5e_disable_sq(struct mlx5e_sq *sq) { mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); } static int mlx5e_open_sq(struct mlx5e_channel *c, int tc, struct mlx5e_sq_param *param, struct mlx5e_sq *sq) { int err; err = mlx5e_create_sq(c, tc, param, sq); if (err) return (err); err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); if (err) goto err_destroy_sq; err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); if (err) goto err_disable_sq; return (0); err_disable_sq: mlx5e_disable_sq(sq); err_destroy_sq: mlx5e_destroy_sq(sq); return (err); } static void mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) { /* fill up remainder with NOPs */ while (sq->cev_counter != 0) { while (!mlx5e_sq_has_room_for(sq, 1)) { if (can_sleep != 0) { mtx_unlock(&sq->lock); msleep(4); mtx_lock(&sq->lock); } else { goto done; } } /* send a single NOP */ mlx5e_send_nop(sq, 1); atomic_thread_fence_rel(); } done: /* Check if we need to write the doorbell */ if (likely(sq->doorbell.d64 != 0)) { mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); sq->doorbell.d64 = 0; } } void mlx5e_sq_cev_timeout(void *arg) { struct mlx5e_sq *sq = arg; mtx_assert(&sq->lock, MA_OWNED); /* check next state */ switch (sq->cev_next_state) { case MLX5E_CEV_STATE_SEND_NOPS: /* fill TX ring with NOPs, if any */ mlx5e_sq_send_nops_locked(sq, 0); /* check if completed */ if (sq->cev_counter == 0) { sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; return; } break; default: /* send NOPs on next timeout */ sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; break; } /* restart timer */ callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); } void mlx5e_drain_sq(struct mlx5e_sq *sq) { int error; struct mlx5_core_dev *mdev= sq->priv->mdev; /* * Check if already stopped. * * NOTE: The "stopped" variable is only written when both the * priv's configuration lock and the SQ's lock is locked. It * can therefore safely be read when only one of the two locks * is locked. This function is always called when the priv's * configuration lock is locked. */ if (sq->stopped != 0) return; mtx_lock(&sq->lock); /* don't put more packets into the SQ */ sq->stopped = 1; /* teardown event factor timer, if any */ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; callout_stop(&sq->cev_callout); /* send dummy NOPs in order to flush the transmit ring */ mlx5e_sq_send_nops_locked(sq, 1); mtx_unlock(&sq->lock); /* make sure it is safe to free the callout */ callout_drain(&sq->cev_callout); /* wait till SQ is empty or link is down */ mtx_lock(&sq->lock); while (sq->cc != sq->pc && (sq->priv->media_status_last & IFM_ACTIVE) != 0 && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mtx_unlock(&sq->lock); msleep(1); sq->cq.mcq.comp(&sq->cq.mcq); mtx_lock(&sq->lock); } mtx_unlock(&sq->lock); /* error out remaining requests */ error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); if (error != 0) { if_printf(sq->ifp, "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); } /* wait till SQ is empty */ mtx_lock(&sq->lock); while (sq->cc != sq->pc && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mtx_unlock(&sq->lock); msleep(1); sq->cq.mcq.comp(&sq->cq.mcq); mtx_lock(&sq->lock); } mtx_unlock(&sq->lock); } static void mlx5e_close_sq_wait(struct mlx5e_sq *sq) { mlx5e_drain_sq(sq); mlx5e_disable_sq(sq); mlx5e_destroy_sq(sq); } static int mlx5e_create_cq(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, mlx5e_cq_comp_t *comp, int eq_ix) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; int irqn; int err; u32 i; param->wq.buf_numa_node = 0; param->wq.db_numa_node = 0; err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) return (err); mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); mcq->cqe_sz = 64; mcq->set_ci_db = cq->wq_ctrl.db.db; mcq->arm_db = cq->wq_ctrl.db.db + 1; *mcq->set_ci_db = 0; *mcq->arm_db = 0; mcq->vector = eq_ix; mcq->comp = comp; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; mcq->uar = &priv->cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); cqe->op_own = 0xf1; } cq->priv = priv; return (0); } static void mlx5e_destroy_cq(struct mlx5e_cq *cq) { mlx5_wq_destroy(&cq->wq_ctrl); } static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) { struct mlx5_core_cq *mcq = &cq->mcq; void *in; void *cqc; int inlen; int irqn_not_used; int eqn; int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); memcpy(cqc, param->cqc, sizeof(param->cqc)); mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); kvfree(in); if (err) return (err); mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); return (0); } static void mlx5e_disable_cq(struct mlx5e_cq *cq) { mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); } int mlx5e_open_cq(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, mlx5e_cq_comp_t *comp, int eq_ix) { int err; err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); if (err) return (err); err = mlx5e_enable_cq(cq, param, eq_ix); if (err) goto err_destroy_cq; return (0); err_destroy_cq: mlx5e_destroy_cq(cq); return (err); } void mlx5e_close_cq(struct mlx5e_cq *cq) { mlx5e_disable_cq(cq); mlx5e_destroy_cq(cq); } static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_channel_param *cparam) { int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { /* open completion queue */ err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, &mlx5e_tx_cq_comp, c->ix); if (err) goto err_close_tx_cqs; } return (0); err_close_tx_cqs: for (tc--; tc >= 0; tc--) mlx5e_close_cq(&c->sq[tc].cq); return (err); } static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) { int tc; for (tc = 0; tc < c->num_tc; tc++) mlx5e_close_cq(&c->sq[tc].cq); } static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_channel_param *cparam) { int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); if (err) goto err_close_sqs; } return (0); err_close_sqs: for (tc--; tc >= 0; tc--) mlx5e_close_sq_wait(&c->sq[tc]); return (err); } static void mlx5e_close_sqs_wait(struct mlx5e_channel *c) { int tc; for (tc = 0; tc < c->num_tc; tc++) mlx5e_close_sq_wait(&c->sq[tc]); } static void mlx5e_chan_mtx_init(struct mlx5e_channel *c) { int tc; mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); for (tc = 0; tc < c->num_tc; tc++) { struct mlx5e_sq *sq = c->sq + tc; mtx_init(&sq->lock, "mlx5tx", MTX_NETWORK_LOCK " TX", MTX_DEF); mtx_init(&sq->comp_lock, "mlx5comp", MTX_NETWORK_LOCK " TX", MTX_DEF); callout_init_mtx(&sq->cev_callout, &sq->lock, 0); sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; /* ensure the TX completion event factor is not zero */ if (sq->cev_factor == 0) sq->cev_factor = 1; } } static void mlx5e_chan_mtx_destroy(struct mlx5e_channel *c) { int tc; mtx_destroy(&c->rq.mtx); for (tc = 0; tc < c->num_tc; tc++) { mtx_destroy(&c->sq[tc].lock); mtx_destroy(&c->sq[tc].comp_lock); } } static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel *volatile *cp) { struct mlx5e_channel *c; int err; c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO); c->priv = priv; c->ix = ix; c->cpu = 0; c->ifp = priv->ifp; c->mkey_be = cpu_to_be32(priv->mr.key); c->num_tc = priv->num_tc; /* init mutexes */ mlx5e_chan_mtx_init(c); /* open transmit completion queue */ err = mlx5e_open_tx_cqs(c, cparam); if (err) goto err_free; /* open receive completion queue */ err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, &mlx5e_rx_cq_comp, c->ix); if (err) goto err_close_tx_cqs; err = mlx5e_open_sqs(c, cparam); if (err) goto err_close_rx_cq; err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; /* store channel pointer */ *cp = c; /* poll receive queue initially */ c->rq.cq.mcq.comp(&c->rq.cq.mcq); return (0); err_close_sqs: mlx5e_close_sqs_wait(c); err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); err_close_tx_cqs: mlx5e_close_tx_cqs(c); err_free: /* destroy mutexes */ mlx5e_chan_mtx_destroy(c); free(c, M_MLX5EN); return (err); } static void mlx5e_close_channel(struct mlx5e_channel *volatile *pp) { struct mlx5e_channel *c = *pp; /* check if channel is already closed */ if (c == NULL) return; mlx5e_close_rq(&c->rq); } static void mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp) { struct mlx5e_channel *c = *pp; /* check if channel is already closed */ if (c == NULL) return; /* ensure channel pointer is no longer used */ *pp = NULL; mlx5e_close_rq_wait(&c->rq); mlx5e_close_sqs_wait(c); mlx5e_close_cq(&c->rq.cq); mlx5e_close_tx_cqs(c); /* destroy mutexes */ mlx5e_chan_mtx_destroy(c); free(c, M_MLX5EN); } static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) { u32 r, n; r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : MLX5E_SW2MB_MTU(priv->ifp->if_mtu); if (r > MJUM16BYTES) return (-ENOMEM); if (r > MJUM9BYTES) r = MJUM16BYTES; else if (r > MJUMPAGESIZE) r = MJUM9BYTES; else if (r > MCLBYTES) r = MJUMPAGESIZE; else r = MCLBYTES; /* * n + 1 must be a power of two, because stride size must be. * Stride size is 16 * (n + 1), as the first segment is * control. */ for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) ; *wqe_sz = r; *nsegs = n; return (0); } static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_rq_param *param) { void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); u32 wqe_sz, nsegs; mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + nsegs * sizeof(struct mlx5_wqe_data_seg))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, pd, priv->pdn); param->wq.buf_numa_node = 0; param->wq.db_numa_node = 0; param->wq.linear = 1; } static void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, pd, priv->pdn); param->wq.buf_numa_node = 0; param->wq.db_numa_node = 0; param->wq.linear = 1; } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; /* * TODO The sysctl to control on/off is a bool value for now, which means * we only support CSUM, once HASH is implemnted we'll need to address that. */ if (priv->params.cqe_zipping_en) { MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); MLX5_SET(cqc, cqc, cqe_compression_en, 1); } MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); switch (priv->params.rx_cq_moderation_mode) { case 0: MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; default: if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); else MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; } mlx5e_build_common_cq_param(priv, param); } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); switch (priv->params.tx_cq_moderation_mode) { case 0: MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; default: if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); else MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; } mlx5e_build_common_cq_param(priv, param); } static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) { memset(cparam, 0, sizeof(*cparam)); mlx5e_build_rq_param(priv, &cparam->rq); mlx5e_build_sq_param(priv, &cparam->sq); mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); } static int mlx5e_open_channels(struct mlx5e_priv *priv) { struct mlx5e_channel_param cparam; void *ptr; int err; int i; int j; priv->channel = malloc(priv->params.num_channels * sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO); mlx5e_build_channel_param(priv, &cparam); for (i = 0; i < priv->params.num_channels; i++) { err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); if (err) goto err_close_channels; } for (j = 0; j < priv->params.num_channels; j++) { err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); if (err) goto err_close_channels; } return (0); err_close_channels: for (i--; i >= 0; i--) { mlx5e_close_channel(&priv->channel[i]); mlx5e_close_channel_wait(&priv->channel[i]); } /* remove "volatile" attribute from "channel" pointer */ ptr = __DECONST(void *, priv->channel); priv->channel = NULL; free(ptr, M_MLX5EN); return (err); } static void mlx5e_close_channels(struct mlx5e_priv *priv) { void *ptr; int i; if (priv->channel == NULL) return; for (i = 0; i < priv->params.num_channels; i++) mlx5e_close_channel(&priv->channel[i]); for (i = 0; i < priv->params.num_channels; i++) mlx5e_close_channel_wait(&priv->channel[i]); /* remove "volatile" attribute from "channel" pointer */ ptr = __DECONST(void *, priv->channel); priv->channel = NULL; free(ptr, M_MLX5EN); } static int mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) { if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { uint8_t cq_mode; switch (priv->params.tx_cq_moderation_mode) { case 0: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; break; default: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; break; } return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, priv->params.tx_cq_moderation_usec, priv->params.tx_cq_moderation_pkts, cq_mode)); } return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, priv->params.tx_cq_moderation_usec, priv->params.tx_cq_moderation_pkts)); } static int mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) { if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { uint8_t cq_mode; int retval; switch (priv->params.rx_cq_moderation_mode) { case 0: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; break; default: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; break; } retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts, cq_mode); return (retval); } return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts)); } static int mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) { int err; int i; if (c == NULL) return (EINVAL); err = mlx5e_refresh_rq_params(priv, &c->rq); if (err) goto done; for (i = 0; i != c->num_tc; i++) { err = mlx5e_refresh_sq_params(priv, &c->sq[i]); if (err) goto done; } done: return (err); } int mlx5e_refresh_channel_params(struct mlx5e_priv *priv) { int i; if (priv->channel == NULL) return (EINVAL); for (i = 0; i < priv->params.num_channels; i++) { int err; err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]); if (err) return (err); } return (0); } static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) { struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(create_tis_in)]; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); memset(in, 0, sizeof(in)); MLX5_SET(tisc, tisc, prio, tc); MLX5_SET(tisc, tisc, transport_domain, priv->tdn); return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); } static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) { mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); } static int mlx5e_open_tises(struct mlx5e_priv *priv) { int num_tc = priv->num_tc; int err; int tc; for (tc = 0; tc < num_tc; tc++) { err = mlx5e_open_tis(priv, tc); if (err) goto err_close_tises; } return (0); err_close_tises: for (tc--; tc >= 0; tc--) mlx5e_close_tis(priv, tc); return (err); } static void mlx5e_close_tises(struct mlx5e_priv *priv) { int num_tc = priv->num_tc; int tc; for (tc = 0; tc < num_tc; tc++) mlx5e_close_tis(priv, tc); } static int mlx5e_open_rqt(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; void *rqtc; int inlen; int err; int sz; int i; sz = 1 << priv->params.rx_hash_log_tbl_sz; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); for (i = 0; i < sz; i++) { int ix = i; #ifdef RSS ix = rss_get_indirection_to_bucket(ix); #endif /* ensure we don't overflow */ ix %= priv->params.num_channels; /* apply receive side scaling stride, if any */ ix -= ix % (int)priv->params.channels_rsss; MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); } MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); if (!err) priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); kvfree(in); return (err); } static void mlx5e_close_rqt(struct mlx5e_priv *priv) { u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); } static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); __be32 *hkey; MLX5_SET(tirc, tirc, transport_domain, priv->tdn); #define ROUGH_MAX_L2_L3_HDR_SZ 256 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_L4_SPORT |\ MLX5_HASH_FIELD_SEL_L4_DPORT) #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) if (priv->params.hw_lro_en) { MLX5_SET(tirc, tirc, lro_enable_mask, MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); MLX5_SET(tirc, tirc, lro_max_msg_sz, (priv->params.lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); /* TODO: add the option to choose timer value dynamically */ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, MLX5_CAP_ETH(priv->mdev, lro_timer_supported_periods[2])); } /* setup parameters for hashing TIR type, if any */ switch (tt) { case MLX5E_TT_ANY: MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); MLX5_SET(tirc, tirc, inline_rqn, priv->channel[0]->rq.rqn); break; default: MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->rqtn); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); #ifdef RSS /* * The FreeBSD RSS implementation does currently not * support symmetric Toeplitz hashes: */ MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); rss_getkey((uint8_t *)hkey); #else MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); hkey[0] = cpu_to_be32(0xD181C62C); hkey[1] = cpu_to_be32(0xF7F4DB5B); hkey[2] = cpu_to_be32(0x1983A2FC); hkey[3] = cpu_to_be32(0x943E1ADB); hkey[4] = cpu_to_be32(0xD9389E6B); hkey[5] = cpu_to_be32(0xD1039C2C); hkey[6] = cpu_to_be32(0xA74499AD); hkey[7] = cpu_to_be32(0x593D56D9); hkey[8] = cpu_to_be32(0xF3253C06); hkey[9] = cpu_to_be32(0x2ADC1FFC); #endif break; } switch (tt) { case MLX5E_TT_IPV4_TCP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV6_TCP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV4_UDP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_UDP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV6_UDP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_UDP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV4_IPSEC_AH: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV6_IPSEC_AH: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV4_IPSEC_ESP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV6_IPSEC_ESP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV4: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); break; case MLX5E_TT_IPV6: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); break; default: break; } } static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; void *tirc; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); mlx5e_build_tir_ctx(priv, tirc, tt); err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); kvfree(in); return (err); } static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) { mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); } static int mlx5e_open_tirs(struct mlx5e_priv *priv) { int err; int i; for (i = 0; i < MLX5E_NUM_TT; i++) { err = mlx5e_open_tir(priv, i); if (err) goto err_close_tirs; } return (0); err_close_tirs: for (i--; i >= 0; i--) mlx5e_close_tir(priv, i); return (err); } static void mlx5e_close_tirs(struct mlx5e_priv *priv) { int i; for (i = 0; i < MLX5E_NUM_TT; i++) mlx5e_close_tir(priv, i); } /* * SW MTU does not include headers, * HW MTU includes all headers and checksums. */ static int mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) { struct mlx5e_priv *priv = ifp->if_softc; struct mlx5_core_dev *mdev = priv->mdev; int hw_mtu; int err; hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); err = mlx5_set_port_mtu(mdev, hw_mtu); if (err) { if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n", __func__, sw_mtu, err); return (err); } /* Update vport context MTU */ err = mlx5_set_vport_mtu(mdev, hw_mtu); if (err) { if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n", __func__, err); } ifp->if_mtu = sw_mtu; err = mlx5_query_vport_mtu(mdev, &hw_mtu); if (err || !hw_mtu) { /* fallback to port oper mtu */ err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); } if (err) { if_printf(ifp, "Query port MTU, after setting new " "MTU value, failed\n"); return (err); } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { err = -E2BIG, if_printf(ifp, "Port MTU %d is smaller than " "ifp mtu %d\n", hw_mtu, sw_mtu); } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { err = -EINVAL; if_printf(ifp, "Port MTU %d is bigger than " "ifp mtu %d\n", hw_mtu, sw_mtu); } priv->params_ethtool.hw_mtu = hw_mtu; return (err); } int mlx5e_open_locked(struct ifnet *ifp) { struct mlx5e_priv *priv = ifp->if_softc; int err; u16 set_id; /* check if already opened */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) return (0); #ifdef RSS if (rss_getnumbuckets() > priv->params.num_channels) { if_printf(ifp, "NOTE: There are more RSS buckets(%u) than " "channels(%u) available\n", rss_getnumbuckets(), priv->params.num_channels); } #endif err = mlx5e_open_tises(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n", __func__, err); return (err); } err = mlx5_vport_alloc_q_counter(priv->mdev, MLX5_INTERFACE_PROTOCOL_ETH, &set_id); if (err) { if_printf(priv->ifp, "%s: mlx5_vport_alloc_q_counter failed: %d\n", __func__, err); goto err_close_tises; } /* store counter set ID */ priv->counter_set_id = set_id; err = mlx5e_open_channels(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n", __func__, err); goto err_dalloc_q_counter; } err = mlx5e_open_rqt(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n", __func__, err); goto err_close_channels; } err = mlx5e_open_tirs(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n", __func__, err); goto err_close_rqls; } err = mlx5e_open_flow_table(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n", __func__, err); goto err_close_tirs; } err = mlx5e_add_all_vlan_rules(priv); if (err) { if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n", __func__, err); goto err_close_flow_table; } set_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_update_carrier(priv); mlx5e_set_rx_mode_core(priv); return (0); err_close_flow_table: mlx5e_close_flow_table(priv); err_close_tirs: mlx5e_close_tirs(priv); err_close_rqls: mlx5e_close_rqt(priv); err_close_channels: mlx5e_close_channels(priv); err_dalloc_q_counter: mlx5_vport_dealloc_q_counter(priv->mdev, MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); err_close_tises: mlx5e_close_tises(priv); return (err); } static void mlx5e_open(void *arg) { struct mlx5e_priv *priv = arg; PRIV_LOCK(priv); if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) if_printf(priv->ifp, "%s: Setting port status to up failed\n", __func__); mlx5e_open_locked(priv->ifp); priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; PRIV_UNLOCK(priv); } int mlx5e_close_locked(struct ifnet *ifp) { struct mlx5e_priv *priv = ifp->if_softc; /* check if already closed */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return (0); clear_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_set_rx_mode_core(priv); mlx5e_del_all_vlan_rules(priv); if_link_state_change(priv->ifp, LINK_STATE_DOWN); mlx5e_close_flow_table(priv); mlx5e_close_tirs(priv); mlx5e_close_rqt(priv); mlx5e_close_channels(priv); mlx5_vport_dealloc_q_counter(priv->mdev, MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); mlx5e_close_tises(priv); return (0); } #if (__FreeBSD_version >= 1100000) static uint64_t mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) { struct mlx5e_priv *priv = ifp->if_softc; u64 retval; /* PRIV_LOCK(priv); XXX not allowed */ switch (cnt) { case IFCOUNTER_IPACKETS: retval = priv->stats.vport.rx_packets; break; case IFCOUNTER_IERRORS: retval = priv->stats.vport.rx_error_packets + priv->stats.pport.alignment_err + priv->stats.pport.check_seq_err + priv->stats.pport.crc_align_errors + priv->stats.pport.in_range_len_errors + priv->stats.pport.jabbers + priv->stats.pport.out_of_range_len + priv->stats.pport.oversize_pkts + priv->stats.pport.symbol_err + priv->stats.pport.too_long_errors + priv->stats.pport.undersize_pkts + priv->stats.pport.unsupported_op_rx; break; case IFCOUNTER_IQDROPS: retval = priv->stats.vport.rx_out_of_buffer + priv->stats.pport.drop_events; break; case IFCOUNTER_OPACKETS: retval = priv->stats.vport.tx_packets; break; case IFCOUNTER_OERRORS: retval = priv->stats.vport.tx_error_packets; break; case IFCOUNTER_IBYTES: retval = priv->stats.vport.rx_bytes; break; case IFCOUNTER_OBYTES: retval = priv->stats.vport.tx_bytes; break; case IFCOUNTER_IMCASTS: retval = priv->stats.vport.rx_multicast_packets; break; case IFCOUNTER_OMCASTS: retval = priv->stats.vport.tx_multicast_packets; break; case IFCOUNTER_OQDROPS: retval = priv->stats.vport.tx_queue_dropped; break; case IFCOUNTER_COLLISIONS: retval = priv->stats.pport.collisions; break; default: retval = if_get_counter_default(ifp, cnt); break; } /* PRIV_UNLOCK(priv); XXX not allowed */ return (retval); } #endif static void mlx5e_set_rx_mode(struct ifnet *ifp) { struct mlx5e_priv *priv = ifp->if_softc; queue_work(priv->wq, &priv->set_rx_mode_work); } static int mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct mlx5e_priv *priv; struct ifreq *ifr; struct ifi2creq i2c; int error = 0; int mask = 0; int size_read = 0; int module_status; int module_num; int max_mtu; uint8_t read_addr; priv = ifp->if_softc; /* check if detaching */ if (priv == NULL || priv->gone != 0) return (ENXIO); switch (command) { case SIOCSIFMTU: ifr = (struct ifreq *)data; PRIV_LOCK(priv); mlx5_query_port_max_mtu(priv->mdev, &max_mtu); if (ifr->ifr_mtu >= MLX5E_MTU_MIN && ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { int was_opened; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) mlx5e_close_locked(ifp); /* set new MTU */ mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); if (was_opened) mlx5e_open_locked(ifp); } else { error = EINVAL; if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n", MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); } PRIV_UNLOCK(priv); break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { mlx5e_set_rx_mode(ifp); break; } PRIV_LOCK(priv); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) mlx5e_open_locked(ifp); ifp->if_drv_flags |= IFF_DRV_RUNNING; mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mlx5_set_port_status(priv->mdev, MLX5_PORT_DOWN); if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) mlx5e_close_locked(ifp); mlx5e_update_carrier(priv); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } } PRIV_UNLOCK(priv); break; case SIOCADDMULTI: case SIOCDELMULTI: mlx5e_set_rx_mode(ifp); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: ifr = (struct ifreq *)data; error = ifmedia_ioctl(ifp, ifr, &priv->media, command); break; case SIOCSIFCAP: ifr = (struct ifreq *)data; PRIV_LOCK(priv); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_IP_TSO; if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & ifp->if_capenable && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO6; ifp->if_hwassist &= ~CSUM_IP6_TSO; if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & ifp->if_capenable) && !(IFCAP_TXCSUM & ifp->if_capenable)) { if_printf(ifp, "enable txcsum first.\n"); error = EAGAIN; goto out; } ifp->if_capenable ^= IFCAP_TSO4; ifp->if_hwassist ^= CSUM_IP_TSO; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & ifp->if_capenable) && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { if_printf(ifp, "enable txcsum6 first.\n"); error = EAGAIN; goto out; } ifp->if_capenable ^= IFCAP_TSO6; ifp->if_hwassist ^= CSUM_IP6_TSO; } if (mask & IFCAP_VLAN_HWFILTER) { if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) mlx5e_disable_vlan_filter(priv); else mlx5e_enable_vlan_filter(priv); ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; } if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_WOL_MAGIC) ifp->if_capenable ^= IFCAP_WOL_MAGIC; VLAN_CAPABILITIES(ifp); /* turn off LRO means also turn of HW LRO - if it's on */ if (mask & IFCAP_LRO) { int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); bool need_restart = false; ifp->if_capenable ^= IFCAP_LRO; if (!(ifp->if_capenable & IFCAP_LRO)) { if (priv->params.hw_lro_en) { priv->params.hw_lro_en = false; need_restart = true; /* Not sure this is the correct way */ priv->params_ethtool.hw_lro = priv->params.hw_lro_en; } } if (was_opened && need_restart) { mlx5e_close_locked(ifp); mlx5e_open_locked(ifp); } } if (mask & IFCAP_HWRXTSTMP) { ifp->if_capenable ^= IFCAP_HWRXTSTMP; if (ifp->if_capenable & IFCAP_HWRXTSTMP) { if (priv->clbr_done == 0) mlx5e_reset_calibration_callout(priv); } else { callout_drain(&priv->tstmp_clbr); priv->clbr_done = 0; } } out: PRIV_UNLOCK(priv); break; case SIOCGI2C: ifr = (struct ifreq *)data; /* * Copy from the user-space address ifr_data to the * kernel-space address i2c */ error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (error) break; if (i2c.len > sizeof(i2c.data)) { error = EINVAL; break; } PRIV_LOCK(priv); /* Get module_num which is required for the query_eeprom */ error = mlx5_query_module_num(priv->mdev, &module_num); if (error) { if_printf(ifp, "Query module num failed, eeprom " "reading is not supported\n"); error = EINVAL; goto err_i2c; } /* Check if module is present before doing an access */ module_status = mlx5_query_module_status(priv->mdev, module_num); if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED && module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) { error = EINVAL; goto err_i2c; } /* * Currently 0XA0 and 0xA2 are the only addresses permitted. * The internal conversion is as follows: */ if (i2c.dev_addr == 0xA0) read_addr = MLX5E_I2C_ADDR_LOW; else if (i2c.dev_addr == 0xA2) read_addr = MLX5E_I2C_ADDR_HIGH; else { if_printf(ifp, "Query eeprom failed, " "Invalid Address: %X\n", i2c.dev_addr); error = EINVAL; goto err_i2c; } error = mlx5_query_eeprom(priv->mdev, read_addr, MLX5E_EEPROM_LOW_PAGE, (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, (uint32_t *)i2c.data, &size_read); if (error) { if_printf(ifp, "Query eeprom failed, eeprom " "reading is not supported\n"); error = EINVAL; goto err_i2c; } if (i2c.len > MLX5_EEPROM_MAX_BYTES) { error = mlx5_query_eeprom(priv->mdev, read_addr, MLX5E_EEPROM_LOW_PAGE, (uint32_t)(i2c.offset + size_read), (uint32_t)(i2c.len - size_read), module_num, (uint32_t *)(i2c.data + size_read), &size_read); } if (error) { if_printf(ifp, "Query eeprom failed, eeprom " "reading is not supported\n"); error = EINVAL; goto err_i2c; } error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); err_i2c: PRIV_UNLOCK(priv); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) { /* * TODO: uncoment once FW really sets all these bits if * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return * -ENOTSUPP; */ /* TODO: add more must-to-have features */ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return (-ENODEV); return (0); } static u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) { uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U; bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2; /* verify against driver hardware limit */ if (bf_buf_size > MLX5E_MAX_TX_INLINE) bf_buf_size = MLX5E_MAX_TX_INLINE; return (bf_buf_size); } static int mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, int num_comp_vectors) { int err; /* * TODO: Consider link speed for setting "log_sq_size", * "log_rq_size" and "cq_moderation_xxx": */ priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; priv->params.rx_cq_moderation_usec = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; priv->params.rx_cq_moderation_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; priv->params.rx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; priv->params.tx_cq_moderation_usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; priv->params.tx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.rx_hash_log_tbl_sz = (order_base_2(num_comp_vectors) > MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? order_base_2(num_comp_vectors) : MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; priv->params.num_tc = 1; priv->params.default_vlan_prio = 0; priv->counter_set_id = -1; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); if (err) return (err); /* * hw lro is currently defaulted to off. when it won't anymore we * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" */ priv->params.hw_lro_en = false; priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression); priv->mdev = mdev; priv->params.num_channels = num_comp_vectors; priv->params.channels_rsss = 1; priv->order_base_2_num_channels = order_base_2(num_comp_vectors); priv->queue_mapping_channel_mask = roundup_pow_of_two(num_comp_vectors) - 1; priv->num_tc = priv->params.num_tc; priv->default_vlan_prio = priv->params.default_vlan_prio; INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); return (0); } static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, struct mlx5_core_mr *mkey) { struct ifnet *ifp = priv->ifp; struct mlx5_core_dev *mdev = priv->mdev; int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; u32 *in; int err; in = mlx5_vzalloc(inlen); if (in == NULL) { if_printf(ifp, "%s: failed to allocate inbox\n", __func__); return (-ENOMEM); } mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, qpn, 0xffffff); err = mlx5_core_create_mkey(mdev, mkey, in, inlen); if (err) if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n", __func__, err); kvfree(in); return (err); } static const char *mlx5e_vport_stats_desc[] = { MLX5E_VPORT_STATS(MLX5E_STATS_DESC) }; static const char *mlx5e_pport_stats_desc[] = { MLX5E_PPORT_STATS(MLX5E_STATS_DESC) }; static void mlx5e_priv_mtx_init(struct mlx5e_priv *priv) { mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); sx_init(&priv->state_lock, "mlx5state"); callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); } static void mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv) { mtx_destroy(&priv->async_events_mtx); sx_destroy(&priv->state_lock); } static int sysctl_firmware(SYSCTL_HANDLER_ARGS) { /* * %d.%d%.d the string format. * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. * We need at most 5 chars to store that. * It also has: two "." and NULL at the end, which means we need 18 * (5*3 + 3) chars at most. */ char fw[18]; struct mlx5e_priv *priv = arg1; int error; snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), fw_rev_sub(priv->mdev)); error = sysctl_handle_string(oidp, fw, sizeof(fw), req); return (error); } static void mlx5e_disable_tx_dma(struct mlx5e_channel *ch) { int i; for (i = 0; i < ch->num_tc; i++) mlx5e_drain_sq(&ch->sq[i]); } static void mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) { sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); sq->doorbell.d64 = 0; } void mlx5e_resume_sq(struct mlx5e_sq *sq) { int err; /* check if already enabled */ if (sq->stopped == 0) return; err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, MLX5_SQC_STATE_RST); if (err != 0) { if_printf(sq->ifp, "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); } sq->cc = 0; sq->pc = 0; /* reset doorbell prior to moving from RST to RDY */ mlx5e_reset_sq_doorbell_record(sq); err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); if (err != 0) { if_printf(sq->ifp, "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); } mtx_lock(&sq->lock); sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; sq->stopped = 0; mtx_unlock(&sq->lock); } static void mlx5e_enable_tx_dma(struct mlx5e_channel *ch) { int i; for (i = 0; i < ch->num_tc; i++) mlx5e_resume_sq(&ch->sq[i]); } static void mlx5e_disable_rx_dma(struct mlx5e_channel *ch) { struct mlx5e_rq *rq = &ch->rq; int err; mtx_lock(&rq->mtx); rq->enabled = 0; callout_stop(&rq->watchdog); mtx_unlock(&rq->mtx); callout_drain(&rq->watchdog); err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); if (err != 0) { if_printf(rq->ifp, "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); } while (!mlx5_wq_ll_is_empty(&rq->wq)) { msleep(1); rq->cq.mcq.comp(&rq->cq.mcq); } /* * Transitioning into RST state will allow the FW to track less ERR state queues, * thus reducing the recv queue flushing time */ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); if (err != 0) { if_printf(rq->ifp, "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); } } static void mlx5e_enable_rx_dma(struct mlx5e_channel *ch) { struct mlx5e_rq *rq = &ch->rq; int err; rq->wq.wqe_ctr = 0; mlx5_wq_ll_update_db_record(&rq->wq); err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err != 0) { if_printf(rq->ifp, "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); } rq->enabled = 1; rq->cq.mcq.comp(&rq->cq.mcq); } void mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) { int i; if (priv->channel == NULL) return; for (i = 0; i < priv->params.num_channels; i++) { if (!priv->channel[i]) continue; if (value) mlx5e_disable_tx_dma(priv->channel[i]); else mlx5e_enable_tx_dma(priv->channel[i]); } } void mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) { int i; if (priv->channel == NULL) return; for (i = 0; i < priv->params.num_channels; i++) { if (!priv->channel[i]) continue; if (value) mlx5e_disable_rx_dma(priv->channel[i]); else mlx5e_enable_rx_dma(priv->channel[i]); } } static void mlx5e_add_hw_stats(struct mlx5e_priv *priv) { SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, sysctl_firmware, "A", "HCA firmware version"); SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, "Board ID"); } static int mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) { struct mlx5e_priv *priv = arg1; uint32_t tx_pfc; uint32_t value; int error; PRIV_LOCK(priv); tx_pfc = priv->params.tx_priority_flow_control; /* get current value */ value = (tx_pfc >> arg2) & 1; error = sysctl_handle_32(oidp, &value, 0, req); /* range check value */ if (value != 0) priv->params.tx_priority_flow_control |= (1 << arg2); else priv->params.tx_priority_flow_control &= ~(1 << arg2); /* check if update is required */ if (error == 0 && priv->gone == 0 && tx_pfc != priv->params.tx_priority_flow_control) { error = -mlx5e_set_port_pfc(priv); /* restore previous value */ if (error != 0) priv->params.tx_priority_flow_control= tx_pfc; } PRIV_UNLOCK(priv); return (error); } static int mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) { struct mlx5e_priv *priv = arg1; uint32_t rx_pfc; uint32_t value; int error; PRIV_LOCK(priv); rx_pfc = priv->params.rx_priority_flow_control; /* get current value */ value = (rx_pfc >> arg2) & 1; error = sysctl_handle_32(oidp, &value, 0, req); /* range check value */ if (value != 0) priv->params.rx_priority_flow_control |= (1 << arg2); else priv->params.rx_priority_flow_control &= ~(1 << arg2); /* check if update is required */ if (error == 0 && priv->gone == 0 && rx_pfc != priv->params.rx_priority_flow_control) { error = -mlx5e_set_port_pfc(priv); /* restore previous value */ if (error != 0) priv->params.rx_priority_flow_control= rx_pfc; } PRIV_UNLOCK(priv); return (error); } static void mlx5e_setup_pauseframes(struct mlx5e_priv *priv) { unsigned int x; char path[96]; int error; /* enable pauseframes by default */ priv->params.tx_pauseframe_control = 1; priv->params.rx_pauseframe_control = 1; /* disable ports flow control, PFC, by default */ priv->params.tx_priority_flow_control = 0; priv->params.rx_priority_flow_control = 0; #if (__FreeBSD_version < 1100000) /* compute path for sysctl */ snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", device_get_unit(priv->mdev->pdev->dev.bsddev)); /* try to fetch tunable, if any */ TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); /* compute path for sysctl */ snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", device_get_unit(priv->mdev->pdev->dev.bsddev)); /* try to fetch tunable, if any */ TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); for (x = 0; x != 8; x++) { /* compute path for sysctl */ snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u", device_get_unit(priv->mdev->pdev->dev.bsddev), x); /* try to fetch tunable, if any */ if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0) priv->params.tx_priority_flow_control |= 1 << x; /* compute path for sysctl */ snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u", device_get_unit(priv->mdev->pdev->dev.bsddev), x); /* try to fetch tunable, if any */ if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0) priv->params.rx_priority_flow_control |= 1 << x; } #endif /* register pauseframe SYSCTLs */ SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, &priv->params.tx_pauseframe_control, 0, "Set to enable TX pause frames. Clear to disable."); SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, &priv->params.rx_pauseframe_control, 0, "Set to enable RX pause frames. Clear to disable."); /* register priority_flow control, PFC, SYSCTLs */ for (x = 0; x != 8; x++) { snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x); SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU", "Set to enable TX ports flow control frames for given priority. Clear to disable."); snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x); SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU", "Set to enable RX ports flow control frames for given priority. Clear to disable."); } PRIV_LOCK(priv); /* range check */ priv->params.tx_pauseframe_control = priv->params.tx_pauseframe_control ? 1 : 0; priv->params.rx_pauseframe_control = priv->params.rx_pauseframe_control ? 1 : 0; /* update firmware */ error = mlx5e_set_port_pause_and_pfc(priv); if (error == -EINVAL) { if_printf(priv->ifp, "Global pauseframes must be disabled before enabling PFC.\n"); priv->params.rx_priority_flow_control = 0; priv->params.tx_priority_flow_control = 0; /* update firmware */ (void) mlx5e_set_port_pause_and_pfc(priv); } PRIV_UNLOCK(priv); } static void * mlx5e_create_ifp(struct mlx5_core_dev *mdev) { struct ifnet *ifp; struct mlx5e_priv *priv; u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); struct sysctl_oid_list *child; int ncv = mdev->priv.eq_table.num_comp_vectors; char unit[16]; int err; int i; u32 eth_proto_cap; if (mlx5e_check_required_hca_cap(mdev)) { mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); return (NULL); } priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO); mlx5e_priv_mtx_init(priv); ifp = priv->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { mlx5_core_err(mdev, "if_alloc() failed\n"); goto err_free_priv; } ifp->if_softc = priv; if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); ifp->if_mtu = ETHERMTU; ifp->if_init = mlx5e_open; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = mlx5e_ioctl; ifp->if_transmit = mlx5e_xmit; ifp->if_qflush = if_qflush; #if (__FreeBSD_version >= 1100000) ifp->if_get_counter = mlx5e_get_counter; #endif ifp->if_snd.ifq_maxlen = ifqmaxlen; /* * Set driver features */ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_LRO; ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP; #ifdef RATELIMIT ifp->if_capabilities |= IFCAP_TXRTLMT; ifp->if_snd_tag_alloc = mlx5e_rl_snd_tag_alloc; ifp->if_snd_tag_free = mlx5e_rl_snd_tag_free; ifp->if_snd_tag_modify = mlx5e_rl_snd_tag_modify; ifp->if_snd_tag_query = mlx5e_rl_snd_tag_query; #endif /* set TSO limits so that we don't have to drop TX packets */ ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); sysctl_ctx_init(&priv->sysctl_ctx_channel_debug); /* ifnet sysctl tree */ sysctl_ctx_init(&priv->sysctl_ctx); priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); if (priv->sysctl_ifnet == NULL) { mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); goto err_free_sysctl; } snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); if (priv->sysctl_ifnet == NULL) { mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); goto err_free_sysctl; } /* HW sysctl tree */ child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); if (priv->sysctl_hw == NULL) { mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); goto err_free_sysctl; } err = mlx5e_build_ifp_priv(mdev, priv, ncv); if (err) { mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); goto err_free_sysctl; } snprintf(unit, sizeof(unit), "mce%u_wq", device_get_unit(mdev->pdev->dev.bsddev)); priv->wq = alloc_workqueue(unit, 0, 1); if (priv->wq == NULL) { if_printf(ifp, "%s: alloc_workqueue failed\n", __func__); goto err_free_sysctl; } err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); if (err) { if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n", __func__, err); goto err_free_wq; } err = mlx5_core_alloc_pd(mdev, &priv->pdn); if (err) { if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n", __func__, err); goto err_unmap_free_uar; } err = mlx5_alloc_transport_domain(mdev, &priv->tdn); if (err) { if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n", __func__, err); goto err_dealloc_pd; } err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); if (err) { if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n", __func__, err); goto err_dealloc_transport_domain; } mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); /* check if we should generate a random MAC address */ if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && is_zero_ether_addr(dev_addr)) { random_ether_addr(dev_addr); if_printf(ifp, "Assigned random MAC address\n"); } #ifdef RATELIMIT err = mlx5e_rl_init(priv); if (err) { if_printf(ifp, "%s: mlx5e_rl_init failed, %d\n", __func__, err); goto err_create_mkey; } #endif /* set default MTU */ mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); /* Set default media status */ priv->media_status_last = IFM_AVALID; priv->media_active_last = IFM_ETHER | IFM_AUTO | IFM_ETH_RXPAUSE | IFM_FDX; /* setup default pauseframes configuration */ mlx5e_setup_pauseframes(priv); err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); if (err) { eth_proto_cap = 0; if_printf(ifp, "%s: Query port media capability failed, %d\n", __func__, err); } /* Setup supported medias */ ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, mlx5e_media_change, mlx5e_media_status); for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { if (mlx5e_mode_table[i].baudrate == 0) continue; if (MLX5E_PROT_MASK(i) & eth_proto_cap) { ifmedia_add(&priv->media, mlx5e_mode_table[i].subtype | IFM_ETHER, 0, NULL); ifmedia_add(&priv->media, mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); } } + + /* Additional supported medias */ + ifmedia_add(&priv->media, IFM_10G_LR | IFM_ETHER, 0, NULL); + ifmedia_add(&priv->media, IFM_10G_LR | + IFM_ETHER | IFM_FDX | + IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); + + ifmedia_add(&priv->media, IFM_40G_ER4 | IFM_ETHER, 0, NULL); + ifmedia_add(&priv->media, IFM_40G_ER4 | + IFM_ETHER | IFM_FDX | + IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); /* Set autoselect by default */ ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); ether_ifattach(ifp, dev_addr); /* Register for VLAN events */ priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); /* Link is down by default */ if_link_state_change(ifp, LINK_STATE_DOWN); mlx5e_enable_async_events(priv); mlx5e_add_hw_stats(priv); mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, priv->stats.vport.arg); mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, priv->stats.pport.arg); mlx5e_create_ethtool(priv); mtx_lock(&priv->async_events_mtx); mlx5e_update_stats(priv); mtx_unlock(&priv->async_events_mtx); SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "rx_clbr_done", CTLFLAG_RD, &priv->clbr_done, 0, "RX timestamps calibration state"); callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT); mlx5e_reset_calibration_callout(priv); return (priv); #ifdef RATELIMIT err_create_mkey: mlx5_core_destroy_mkey(priv->mdev, &priv->mr); #endif err_dealloc_transport_domain: mlx5_dealloc_transport_domain(mdev, priv->tdn); err_dealloc_pd: mlx5_core_dealloc_pd(mdev, priv->pdn); err_unmap_free_uar: mlx5_unmap_free_uar(mdev, &priv->cq_uar); err_free_wq: destroy_workqueue(priv->wq); err_free_sysctl: sysctl_ctx_free(&priv->sysctl_ctx); sysctl_ctx_free(&priv->sysctl_ctx_channel_debug); if_free(ifp); err_free_priv: mlx5e_priv_mtx_destroy(priv); free(priv, M_MLX5EN); return (NULL); } static void mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) { struct mlx5e_priv *priv = vpriv; struct ifnet *ifp = priv->ifp; /* don't allow more IOCTLs */ priv->gone = 1; /* XXX wait a bit to allow IOCTL handlers to complete */ pause("W", hz); #ifdef RATELIMIT /* * The kernel can have reference(s) via the m_snd_tag's into * the ratelimit channels, and these must go away before * detaching: */ while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) { if_printf(priv->ifp, "Waiting for all ratelimit connections " "to terminate\n"); pause("W", hz); } #endif /* stop watchdog timer */ callout_drain(&priv->watchdog); callout_drain(&priv->tstmp_clbr); if (priv->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); if (priv->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); /* make sure device gets closed */ PRIV_LOCK(priv); mlx5e_close_locked(ifp); PRIV_UNLOCK(priv); /* unregister device */ ifmedia_removeall(&priv->media); ether_ifdetach(ifp); if_free(ifp); #ifdef RATELIMIT mlx5e_rl_cleanup(priv); #endif /* destroy all remaining sysctl nodes */ if (priv->sysctl_debug) { sysctl_ctx_free(&priv->sysctl_ctx_channel_debug); sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); } sysctl_ctx_free(&priv->stats.vport.ctx); sysctl_ctx_free(&priv->stats.pport.ctx); sysctl_ctx_free(&priv->sysctl_ctx); mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); mlx5e_disable_async_events(priv); destroy_workqueue(priv->wq); mlx5e_priv_mtx_destroy(priv); free(priv, M_MLX5EN); } static void * mlx5e_get_ifp(void *vpriv) { struct mlx5e_priv *priv = vpriv; return (priv->ifp); } static struct mlx5_interface mlx5e_interface = { .add = mlx5e_create_ifp, .remove = mlx5e_destroy_ifp, .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, .get_dev = mlx5e_get_ifp, }; void mlx5e_init(void) { mlx5_register_interface(&mlx5e_interface); } void mlx5e_cleanup(void) { mlx5_unregister_interface(&mlx5e_interface); } static void mlx5e_show_version(void __unused *arg) { printf("%s", mlx5e_version); } SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); module_init_order(mlx5e_init, SI_ORDER_THIRD); module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); #if (__FreeBSD_version >= 1100000) MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); #endif MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); MODULE_VERSION(mlx5en, 1); Index: head/sys/dev/mlx5/mlx5_ifc.h =================================================================== --- head/sys/dev/mlx5/mlx5_ifc.h (revision 341580) +++ head/sys/dev/mlx5/mlx5_ifc.h (revision 341581) @@ -1,9797 +1,9906 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MLX5_IFC_H #define MLX5_IFC_H #include enum { MLX5_EVENT_TYPE_COMP = 0x0, MLX5_EVENT_TYPE_PATH_MIG = 0x1, MLX5_EVENT_TYPE_COMM_EST = 0x2, MLX5_EVENT_TYPE_SQ_DRAINED = 0x3, MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_CQ_ERROR = 0x4, MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x5, MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x7, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x8, MLX5_EVENT_TYPE_PORT_CHANGE = 0x9, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_CODING_TEMP_WARNING_EVENT = 0x17, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT = 0x1e, MLX5_EVENT_TYPE_CODING_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT = 0x22, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, MLX5_EVENT_TYPE_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CMD = 0xa, MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, }; enum { MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3, MLX5_MODIFY_TIR_BITMASK_SELF_LB_EN = 0x4 }; enum { MLX5_MODIFY_RQT_BITMASK_RQN_LIST = 0x1, }; enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, }; enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_INIT_HCA = 0x102, MLX5_CMD_OP_TEARDOWN_HCA = 0x103, MLX5_CMD_OP_ENABLE_HCA = 0x104, MLX5_CMD_OP_DISABLE_HCA = 0x105, MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e, MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP = 0x10f, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, MLX5_CMD_OP_GEN_EQE = 0x304, MLX5_CMD_OP_CREATE_CQ = 0x400, MLX5_CMD_OP_DESTROY_CQ = 0x401, MLX5_CMD_OP_QUERY_CQ = 0x402, MLX5_CMD_OP_MODIFY_CQ = 0x403, MLX5_CMD_OP_CREATE_QP = 0x500, MLX5_CMD_OP_DESTROY_QP = 0x501, MLX5_CMD_OP_RST2INIT_QP = 0x502, MLX5_CMD_OP_INIT2RTR_QP = 0x503, MLX5_CMD_OP_RTR2RTS_QP = 0x504, MLX5_CMD_OP_RTS2RTS_QP = 0x505, MLX5_CMD_OP_SQERR2RTS_QP = 0x506, MLX5_CMD_OP_2ERR_QP = 0x507, MLX5_CMD_OP_2RST_QP = 0x50a, MLX5_CMD_OP_QUERY_QP = 0x50b, MLX5_CMD_OP_SQD_RTS_QP = 0x50c, MLX5_CMD_OP_INIT2INIT_QP = 0x50e, MLX5_CMD_OP_CREATE_PSV = 0x600, MLX5_CMD_OP_DESTROY_PSV = 0x601, MLX5_CMD_OP_CREATE_SRQ = 0x700, MLX5_CMD_OP_DESTROY_SRQ = 0x701, MLX5_CMD_OP_QUERY_SRQ = 0x702, MLX5_CMD_OP_ARM_RQ = 0x703, MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, MLX5_CMD_OP_CREATE_DCT = 0x710, MLX5_CMD_OP_DESTROY_DCT = 0x711, MLX5_CMD_OP_DRAIN_DCT = 0x712, MLX5_CMD_OP_QUERY_DCT = 0x713, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, MLX5_CMD_OP_SET_DC_CNAK_TRACE = 0x715, MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, MLX5_CMD_OP_DEALLOC_UAR = 0x803, MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, MLX5_CMD_OP_ACCESS_REG = 0x805, MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, MLX5_CMD_OP_MAD_IFC = 0x50d, MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, MLX5_CMD_OP_NOP = 0x80d, MLX5_CMD_OP_ALLOC_XRCD = 0x80e, MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, MLX5_CMD_OP_SET_BURST_SIZE = 0x812, MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813, MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, MLX5_CMD_OP_SET_DIAGNOSTICS = 0x820, MLX5_CMD_OP_QUERY_DIAGNOSTICS = 0x821, MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, MLX5_CMD_OP_SET_WOL_ROL = 0x830, MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, MLX5_CMD_OP_CREATE_LAG = 0x840, MLX5_CMD_OP_MODIFY_LAG = 0x841, MLX5_CMD_OP_QUERY_LAG = 0x842, MLX5_CMD_OP_DESTROY_LAG = 0x843, MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844, MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, MLX5_CMD_OP_QUERY_TIR = 0x903, MLX5_CMD_OP_CREATE_SQ = 0x904, MLX5_CMD_OP_MODIFY_SQ = 0x905, MLX5_CMD_OP_DESTROY_SQ = 0x906, MLX5_CMD_OP_QUERY_SQ = 0x907, MLX5_CMD_OP_CREATE_RQ = 0x908, MLX5_CMD_OP_MODIFY_RQ = 0x909, MLX5_CMD_OP_DESTROY_RQ = 0x90a, MLX5_CMD_OP_QUERY_RQ = 0x90b, MLX5_CMD_OP_CREATE_RMP = 0x90c, MLX5_CMD_OP_MODIFY_RMP = 0x90d, MLX5_CMD_OP_DESTROY_RMP = 0x90e, MLX5_CMD_OP_QUERY_RMP = 0x90f, MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910, MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911, MLX5_CMD_OP_CREATE_TIS = 0x912, MLX5_CMD_OP_MODIFY_TIS = 0x913, MLX5_CMD_OP_DESTROY_TIS = 0x914, MLX5_CMD_OP_QUERY_TIS = 0x915, MLX5_CMD_OP_CREATE_RQT = 0x916, MLX5_CMD_OP_MODIFY_RQT = 0x917, MLX5_CMD_OP_DESTROY_RQT = 0x918, MLX5_CMD_OP_QUERY_RQT = 0x919, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f, MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, }; enum { MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_QUERY_FW_INFO = 0x8007, MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_CAPABILITY = 0x8400, MLX5_ICMD_CMDS_OPCODE_ICMD_ACCESS_REGISTER = 0x9001, MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_VIRTUAL_MAC = 0x9003, MLX5_ICMD_CMDS_OPCODE_ICMD_SET_VIRTUAL_MAC = 0x9004, MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_WOL_ROL = 0x9005, MLX5_ICMD_CMDS_OPCODE_ICMD_SET_WOL_ROL = 0x9006, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_INIT = 0x9007, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_QUERY_HEADER_STATUS = 0x9008, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_QUERY_ETOC_STATUS = 0x9009, MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_SET_EVENT = 0x900a, MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_INIT_OCSD = 0xf004 }; struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_dmac[0x1]; u8 outer_smac[0x1]; u8 outer_ether_type[0x1]; u8 reserved_0[0x1]; u8 outer_first_prio[0x1]; u8 outer_first_cfi[0x1]; u8 outer_first_vid[0x1]; u8 reserved_1[0x1]; u8 outer_second_prio[0x1]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0x1]; u8 outer_ipv6_flow_label[0x1]; u8 outer_sip[0x1]; u8 outer_dip[0x1]; u8 outer_frag[0x1]; u8 outer_ip_protocol[0x1]; u8 outer_ip_ecn[0x1]; u8 outer_ip_dscp[0x1]; u8 outer_udp_sport[0x1]; u8 outer_udp_dport[0x1]; u8 outer_tcp_sport[0x1]; u8 outer_tcp_dport[0x1]; u8 outer_tcp_flags[0x1]; u8 outer_gre_protocol[0x1]; u8 outer_gre_key[0x1]; u8 outer_vxlan_vni[0x1]; u8 outer_geneve_vni[0x1]; u8 outer_geneve_oam[0x1]; u8 outer_geneve_protocol_type[0x1]; u8 outer_geneve_opt_len[0x1]; u8 reserved_2[0x1]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; u8 inner_smac[0x1]; u8 inner_ether_type[0x1]; u8 reserved_3[0x1]; u8 inner_first_prio[0x1]; u8 inner_first_cfi[0x1]; u8 inner_first_vid[0x1]; u8 reserved_4[0x1]; u8 inner_second_prio[0x1]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0x1]; u8 inner_ipv6_flow_label[0x1]; u8 inner_sip[0x1]; u8 inner_dip[0x1]; u8 inner_frag[0x1]; u8 inner_ip_protocol[0x1]; u8 inner_ip_ecn[0x1]; u8 inner_ip_dscp[0x1]; u8 inner_udp_sport[0x1]; u8 inner_udp_dport[0x1]; u8 inner_tcp_sport[0x1]; u8 inner_tcp_dport[0x1]; u8 inner_tcp_flags[0x1]; u8 reserved_5[0x9]; u8 reserved_6[0x1a]; u8 bth_dst_qp[0x1]; u8 reserved_7[0x4]; u8 source_sqn[0x1]; u8 reserved_8[0x20]; }; struct mlx5_ifc_eth_discard_cntrs_grp_bits { u8 ingress_general_high[0x20]; u8 ingress_general_low[0x20]; u8 ingress_policy_engine_high[0x20]; u8 ingress_policy_engine_low[0x20]; u8 ingress_vlan_membership_high[0x20]; u8 ingress_vlan_membership_low[0x20]; u8 ingress_tag_frame_type_high[0x20]; u8 ingress_tag_frame_type_low[0x20]; u8 egress_vlan_membership_high[0x20]; u8 egress_vlan_membership_low[0x20]; u8 loopback_filter_high[0x20]; u8 loopback_filter_low[0x20]; u8 egress_general_high[0x20]; u8 egress_general_low[0x20]; u8 reserved_at_1c0[0x40]; u8 egress_hoq_high[0x20]; u8 egress_hoq_low[0x20]; u8 port_isolation_high[0x20]; u8 port_isolation_low[0x20]; u8 egress_policy_engine_high[0x20]; u8 egress_policy_engine_low[0x20]; u8 ingress_tx_link_down_high[0x20]; u8 ingress_tx_link_down_low[0x20]; u8 egress_stp_filter_high[0x20]; u8 egress_stp_filter_low[0x20]; u8 egress_hoq_stall_high[0x20]; u8 egress_hoq_stall_low[0x20]; u8 reserved_at_340[0x440]; }; struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; u8 flow_tag[0x1]; u8 flow_counter[0x1]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table[0x1]; u8 flow_table_modify[0x1]; u8 encap[0x1]; u8 decap[0x1]; u8 reset_root_to_default[0x1]; u8 reserved_at_a[0x16]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 reserved_at_28[0x10]; u8 max_ft_level[0x8]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; u8 log_max_flow_counter[0x8]; u8 log_max_destination[0x8]; u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; }; struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 send[0x1]; u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; u8 atomic[0x1]; u8 srq_receive[0x1]; u8 reserved_0[0x1a]; }; struct mlx5_ifc_flow_counter_list_bits { u8 reserved_0[0x10]; u8 flow_counter_id[0x10]; u8 reserved_1[0x20]; }; enum { MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0, MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 0x2, MLX5_FLOW_CONTEXT_DEST_TYPE_QP = 0x3, }; struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; u8 reserved_0[0x20]; }; struct mlx5_ifc_ipv4_layout_bits { u8 reserved_at_0[0x60]; u8 ipv4[0x20]; }; struct mlx5_ifc_ipv6_layout_bits { u8 ipv6[16][0x8]; }; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { struct mlx5_ifc_ipv6_layout_bits ipv6_layout; struct mlx5_ifc_ipv4_layout_bits ipv4_layout; u8 reserved_at_0[0x80]; }; struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; u8 smac_15_0[0x10]; u8 ethertype[0x10]; u8 dmac_47_16[0x20]; u8 dmac_15_0[0x10]; u8 first_prio[0x3]; u8 first_cfi[0x1]; u8 first_vid[0xc]; u8 ip_protocol[0x8]; u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; u8 cvlan_tag[0x1]; u8 svlan_tag[0x1]; u8 frag[0x1]; u8 reserved_1[0x4]; u8 tcp_flags[0x9]; u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; u8 reserved_2[0x20]; u8 udp_sport[0x10]; u8 udp_dport[0x10]; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_0[0x8]; u8 source_sqn[0x18]; u8 reserved_1[0x10]; u8 source_port[0x10]; u8 outer_second_prio[0x3]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0xc]; u8 inner_second_prio[0x3]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0xc]; u8 outer_second_vlan_tag[0x1]; u8 inner_second_vlan_tag[0x1]; u8 reserved_2[0xe]; u8 gre_protocol[0x10]; u8 gre_key_h[0x18]; u8 gre_key_l[0x8]; u8 vxlan_vni[0x18]; u8 reserved_3[0x8]; u8 geneve_vni[0x18]; u8 reserved4[0x7]; u8 geneve_oam[0x1]; u8 reserved_5[0xc]; u8 outer_ipv6_flow_label[0x14]; u8 reserved_6[0xc]; u8 inner_ipv6_flow_label[0x14]; u8 reserved_7[0xa]; u8 geneve_opt_len[0x6]; u8 geneve_protocol_type[0x10]; u8 reserved_8[0x8]; u8 bth_dst_qp[0x18]; u8 reserved_9[0xa0]; }; struct mlx5_ifc_cmd_pas_bits { u8 pa_h[0x20]; u8 pa_l[0x14]; u8 reserved_0[0xc]; }; struct mlx5_ifc_uint64_bits { u8 hi[0x20]; u8 lo[0x20]; }; struct mlx5_ifc_application_prio_entry_bits { u8 reserved_0[0x8]; u8 priority[0x3]; u8 reserved_1[0x2]; u8 sel[0x3]; u8 protocol_id[0x10]; }; struct mlx5_ifc_nodnic_ring_doorbell_bits { u8 reserved_0[0x8]; u8 ring_pi[0x10]; u8 reserved_1[0x8]; }; enum { MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, MLX5_ADS_STAT_RATE_10GBPS = 0x8, MLX5_ADS_STAT_RATE_30GBPS = 0x9, MLX5_ADS_STAT_RATE_5GBPS = 0xa, MLX5_ADS_STAT_RATE_20GBPS = 0xb, MLX5_ADS_STAT_RATE_40GBPS = 0xc, MLX5_ADS_STAT_RATE_60GBPS = 0xd, MLX5_ADS_STAT_RATE_80GBPS = 0xe, MLX5_ADS_STAT_RATE_120GBPS = 0xf, }; struct mlx5_ifc_ads_bits { u8 fl[0x1]; u8 free_ar[0x1]; u8 reserved_0[0xe]; u8 pkey_index[0x10]; u8 reserved_1[0x8]; u8 grh[0x1]; u8 mlid[0x7]; u8 rlid[0x10]; u8 ack_timeout[0x5]; u8 reserved_2[0x3]; u8 src_addr_index[0x8]; u8 log_rtm[0x4]; u8 stat_rate[0x4]; u8 hop_limit[0x8]; u8 reserved_3[0x4]; u8 tclass[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; u8 reserved_4[0x4]; u8 f_dscp[0x1]; u8 f_ecn[0x1]; u8 reserved_5[0x1]; u8 f_eth_prio[0x1]; u8 ecn[0x2]; u8 dscp[0x6]; u8 udp_sport[0x10]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 sl[0x4]; u8 port[0x8]; u8 rmac_47_32[0x10]; u8 rmac_31_0[0x20]; }; struct mlx5_ifc_diagnostic_counter_cap_bits { u8 sync[0x1]; u8 reserved_0[0xf]; u8 counter_id[0x10]; }; struct mlx5_ifc_debug_cap_bits { u8 reserved_0[0x18]; u8 log_max_samples[0x8]; u8 single[0x1]; u8 repetitive[0x1]; u8 health_mon_rx_activity[0x1]; u8 reserved_1[0x15]; u8 log_min_sample_period[0x8]; u8 reserved_2[0x1c0]; struct mlx5_ifc_diagnostic_counter_cap_bits diagnostic_counter[0x1f0]; }; struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; u8 esw_scheduling[0x1]; u8 esw_bw_share[0x1]; u8 esw_rate_limit[0x1]; u8 hll[0x1]; u8 packet_pacing_burst_bound[0x1]; u8 reserved_at_6[0x1a]; u8 reserved_at_20[0x20]; u8 packet_pacing_max_rate[0x20]; u8 packet_pacing_min_rate[0x20]; u8 reserved_at_80[0x10]; u8 packet_pacing_rate_table_size[0x10]; u8 esw_element_type[0x10]; u8 esw_tsar_type[0x10]; u8 reserved_at_c0[0x10]; u8 max_qos_para_vport[0x10]; u8 max_tsar_bw_share[0x20]; u8 reserved_at_100[0x700]; }; struct mlx5_ifc_snapshot_cap_bits { u8 reserved_0[0x1d]; u8 suspend_qp_uc[0x1]; u8 suspend_qp_ud[0x1]; u8 suspend_qp_rc[0x1]; u8 reserved_1[0x1c]; u8 restore_pd[0x1]; u8 restore_uar[0x1]; u8 restore_mkey[0x1]; u8 restore_qp[0x1]; u8 reserved_2[0x1e]; u8 named_mkey[0x1]; u8 named_qp[0x1]; u8 reserved_3[0x7a0]; }; struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; u8 reserved_0[0x19]; u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; u8 reserved_1[0x7e0]; }; struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_0[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; u8 reserved_1[0x7800]; }; struct mlx5_ifc_flow_table_nic_cap_bits { u8 nic_rx_multi_path_tirs[0x1]; u8 nic_rx_multi_path_tirs_fts[0x1]; u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; u8 reserved_at_3[0x1fd]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; u8 reserved_1[0x7200]; }; +enum { + MLX5_ACCESS_REG_SUMMARY_CTRL_ID_PDDR = 0x5031, +}; + +struct mlx5_ifc_pddr_module_info_bits { + u8 cable_technology[0x8]; + u8 cable_breakout[0x8]; + u8 ext_ethernet_compliance_code[0x8]; + u8 ethernet_compliance_code[0x8]; + + u8 cable_type[0x4]; + u8 cable_vendor[0x4]; + u8 cable_length[0x8]; + u8 cable_identifier[0x8]; + u8 cable_power_class[0x8]; + + u8 reserved_at_40[0x8]; + u8 cable_rx_amp[0x8]; + u8 cable_rx_emphasis[0x8]; + u8 cable_tx_equalization[0x8]; + + u8 reserved_at_60[0x8]; + u8 cable_attenuation_12g[0x8]; + u8 cable_attenuation_7g[0x8]; + u8 cable_attenuation_5g[0x8]; + + u8 reserved_at_80[0x8]; + u8 rx_cdr_cap[0x4]; + u8 tx_cdr_cap[0x4]; + u8 reserved_at_90[0x4]; + u8 rx_cdr_state[0x4]; + u8 reserved_at_98[0x4]; + u8 tx_cdr_state[0x4]; + + u8 vendor_name[16][0x8]; + + u8 vendor_pn[16][0x8]; + + u8 vendor_rev[0x20]; + + u8 fw_version[0x20]; + + u8 vendor_sn[16][0x8]; + + u8 temperature[0x10]; + u8 voltage[0x10]; + + u8 rx_power_lane0[0x10]; + u8 rx_power_lane1[0x10]; + + u8 rx_power_lane2[0x10]; + u8 rx_power_lane3[0x10]; + + u8 reserved_at_2c0[0x40]; + + u8 tx_power_lane0[0x10]; + u8 tx_power_lane1[0x10]; + + u8 tx_power_lane2[0x10]; + u8 tx_power_lane3[0x10]; + + u8 reserved_at_340[0x40]; + + u8 tx_bias_lane0[0x10]; + u8 tx_bias_lane1[0x10]; + + u8 tx_bias_lane2[0x10]; + u8 tx_bias_lane3[0x10]; + + u8 reserved_at_3c0[0x40]; + + u8 temperature_high_th[0x10]; + u8 temperature_low_th[0x10]; + + u8 voltage_high_th[0x10]; + u8 voltage_low_th[0x10]; + + u8 rx_power_high_th[0x10]; + u8 rx_power_low_th[0x10]; + + u8 tx_power_high_th[0x10]; + u8 tx_power_low_th[0x10]; + + u8 tx_bias_high_th[0x10]; + u8 tx_bias_low_th[0x10]; + + u8 reserved_at_4a0[0x10]; + u8 wavelength[0x10]; + + u8 reserved_at_4c0[0x300]; +}; + +union mlx5_ifc_pddr_operation_info_page_pddr_phy_info_page_pddr_troubleshooting_page_pddr_module_info_auto_bits { + struct mlx5_ifc_pddr_module_info_bits pddr_module_info; + u8 reserved_at_0[0x7c0]; +}; + +struct mlx5_ifc_pddr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_at_12[0xe]; + + u8 reserved_at_20[0x18]; + u8 page_select[0x8]; + + union mlx5_ifc_pddr_operation_info_page_pddr_phy_info_page_pddr_troubleshooting_page_pddr_module_info_auto_bits page_data; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; u8 lro_max_msg_sz_mode[0x2]; u8 wqe_vlan_insert[0x1]; u8 self_lb_en_modifiable[0x1]; u8 self_lb_mc[0x1]; u8 self_lb_uc[0x1]; u8 max_lso_cap[0x5]; u8 multi_pkt_send_wqe[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 scatter_fcs[0x1]; u8 reserved_1[0x2]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 tunnel_lro_gre[0x1]; u8 tunnel_lro_vxlan[0x1]; u8 tunnel_statless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 reserved_2[0x1b]; u8 max_geneve_opt_len[0x1]; u8 tunnel_stateless_geneve_rx[0x1]; u8 reserved_3[0x10]; u8 lro_min_mss_size[0x10]; u8 reserved_4[0x120]; u8 lro_timer_supported_periods[4][0x20]; u8 reserved_5[0x600]; }; enum { MLX5_ROCE_CAP_L3_TYPE_GRH = 0x1, MLX5_ROCE_CAP_L3_TYPE_IPV4 = 0x2, MLX5_ROCE_CAP_L3_TYPE_IPV6 = 0x4, }; struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; u8 rts2rts_primary_eth_prio[0x1]; u8 roce_rx_allow_untagged[0x1]; u8 rts2rts_src_addr_index_for_vlan_valid_vlan_id[0x1]; u8 reserved_0[0x1c]; u8 reserved_1[0x60]; u8 reserved_2[0xc]; u8 l3_type[0x4]; u8 reserved_3[0x8]; u8 roce_version[0x8]; u8 reserved_4[0x10]; u8 r_roce_dest_udp_port[0x10]; u8 r_roce_max_src_udp_port[0x10]; u8 r_roce_min_src_udp_port[0x10]; u8 reserved_5[0x10]; u8 roce_address_table_size[0x10]; u8 reserved_6[0x700]; }; enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x1, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, }; enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, }; struct mlx5_ifc_atomic_caps_bits { u8 reserved_0[0x40]; u8 atomic_req_8B_endianess_mode[0x2]; u8 reserved_1[0x4]; u8 supported_atomic_req_8B_endianess_mode_1[0x1]; u8 reserved_2[0x19]; u8 reserved_3[0x20]; u8 reserved_4[0x10]; u8 atomic_operations[0x10]; u8 reserved_5[0x10]; u8 atomic_size_qp[0x10]; u8 reserved_6[0x10]; u8 atomic_size_dc[0x10]; u8 reserved_7[0x720]; }; struct mlx5_ifc_odp_cap_bits { u8 reserved_0[0x40]; u8 sig[0x1]; u8 reserved_1[0x1f]; u8 reserved_2[0x20]; struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; u8 reserved_3[0x6e0]; }; enum { MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, }; enum { MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, }; enum { MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, }; enum { MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, }; struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_0[0x80]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; u8 reserved_1[0xb]; u8 log_max_qp[0x5]; u8 reserved_2[0xb]; u8 log_max_srq[0x5]; u8 reserved_3[0x10]; u8 reserved_4[0x8]; u8 log_max_cq_sz[0x8]; u8 reserved_5[0xb]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; u8 relaxed_ordering_write[1]; u8 reserved_6[0x1]; u8 log_max_mkey[0x6]; u8 reserved_7[0xc]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; u8 reserved_8[0x1]; u8 log_max_mrw_sz[0x7]; u8 force_teardown[0x1]; u8 reserved_9[0x1]; u8 log_max_bsf_list_size[0x6]; u8 reserved_10[0x2]; u8 log_max_klm_list_size[0x6]; u8 reserved_11[0xa]; u8 log_max_ra_req_dc[0x6]; u8 reserved_12[0xa]; u8 log_max_ra_res_dc[0x6]; u8 reserved_13[0xa]; u8 log_max_ra_req_qp[0x6]; u8 reserved_14[0xa]; u8 log_max_ra_res_qp[0x6]; u8 pad_cap[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; u8 reserved_at_165[0xa]; u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; u8 debug[0x1]; u8 modify_rq_counters_set_id[0x1]; u8 rq_delay_drop[0x1]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; u8 vport_group_manager[0x1]; u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; u8 reserved_17[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; u8 reserved_18[0x3]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; u8 reserved_19[0x5]; u8 port_type[0x2]; u8 num_ports[0x8]; u8 snapshot[0x1]; u8 reserved_20[0x2]; u8 log_max_msg[0x5]; u8 reserved_21[0x4]; u8 max_tc[0x4]; u8 temp_warn_event[0x1]; u8 dcbx[0x1]; u8 general_notification_event[0x1]; u8 reserved_at_1d3[0x2]; u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_23[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; u8 wol_b[0x1]; u8 wol_m[0x1]; u8 wol_u[0x1]; u8 wol_p[0x1]; u8 stat_rate_support[0x10]; u8 reserved_24[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; u8 reserved_25[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_ipoib_offloads[0x1]; u8 reserved_26[0x8]; u8 dc_connect_qp[0x1]; u8 dc_cnak_trace[0x1]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; u8 reserved_27[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; u8 reserved_28[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; u8 qos[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; u8 reserved_30[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; u8 cq_period_mode_modify[0x1]; u8 cq_invalidate[0x1]; u8 reserved_at_225[0x1]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; u8 exponential_backoff[0x1]; u8 scqe_break_moderation[0x1]; u8 cq_period_start_from_cqe[0x1]; u8 cd[0x1]; u8 atm[0x1]; u8 apm[0x1]; u8 imaicl[0x1]; u8 reserved_32[0x6]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; u8 reserved_33[0x3]; u8 xrc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; u8 reserved_34[0xa]; u8 uar_sz[0x6]; u8 reserved_35[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; u8 reserved_36[0x8]; u8 log_bf_reg_size[0x5]; u8 reserved_37[0x10]; u8 num_of_diagnostic_counters[0x10]; u8 max_wqe_sz_sq[0x10]; u8 reserved_38[0x10]; u8 max_wqe_sz_rq[0x10]; u8 reserved_39[0x10]; u8 max_wqe_sz_sq_dc[0x10]; u8 reserved_40[0x7]; u8 max_qp_mcg[0x19]; u8 reserved_41[0x18]; u8 log_max_mcg[0x8]; u8 reserved_42[0x3]; u8 log_max_transport_domain[0x5]; u8 reserved_43[0x3]; u8 log_max_pd[0x5]; u8 reserved_44[0xb]; u8 log_max_xrcd[0x5]; u8 reserved_45[0x10]; u8 max_flow_counter[0x10]; u8 reserved_46[0x3]; u8 log_max_rq[0x5]; u8 reserved_47[0x3]; u8 log_max_sq[0x5]; u8 reserved_48[0x3]; u8 log_max_tir[0x5]; u8 reserved_49[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; u8 reserved_50[0x2]; u8 log_max_rmp[0x5]; u8 reserved_51[0x3]; u8 log_max_rqt[0x5]; u8 reserved_52[0x3]; u8 log_max_rqt_size[0x5]; u8 reserved_53[0x3]; u8 log_max_tis_per_sq[0x5]; u8 reserved_54[0x3]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_55[0x3]; u8 log_min_stride_sz_rq[0x5]; u8 reserved_56[0x3]; u8 log_max_stride_sz_sq[0x5]; u8 reserved_57[0x3]; u8 log_min_stride_sz_sq[0x5]; u8 reserved_58[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; u8 disable_local_lb[0x1]; u8 reserved_59[0x9]; u8 log_max_vlan_list[0x5]; u8 reserved_60[0x3]; u8 log_max_current_mc_list[0x5]; u8 reserved_61[0x3]; u8 log_max_current_uc_list[0x5]; u8 reserved_62[0x80]; u8 reserved_63[0x3]; u8 log_max_l2_table[0x5]; u8 reserved_64[0x8]; u8 log_uar_page_sz[0x10]; u8 reserved_65[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; u8 reserved_66[0x80]; u8 log_max_atomic_size_qp[0x8]; u8 reserved_67[0x10]; u8 log_max_atomic_size_dc[0x8]; u8 reserved_68[0x1f]; u8 cqe_compression[0x1]; u8 cqe_compression_timeout[0x10]; u8 cqe_compression_max_num[0x10]; u8 reserved_69[0x220]; }; enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, }; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { struct mlx5_ifc_dest_format_struct_bits dest_format_struct; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; u8 reserved_0[0x40]; }; struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; u8 reserved_0[0xa00]; }; enum { MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, }; struct mlx5_ifc_rx_hash_field_select_bits { u8 l3_prot_type[0x1]; u8 l4_prot_type[0x1]; u8 selected_fields[0x1e]; }; enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, MLX5_WQ_TYPE_STRQ_LINKED_LIST = 0x2, MLX5_WQ_TYPE_STRQ_CYCLIC = 0x3, }; enum rq_type { RQ_TYPE_NONE, RQ_TYPE_STRIDE, }; enum { MLX5_WQ_END_PAD_MODE_NONE = 0x0, MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, }; struct mlx5_ifc_wq_bits { u8 wq_type[0x4]; u8 wq_signature[0x1]; u8 end_padding_mode[0x2]; u8 cd_slave[0x1]; u8 reserved_0[0x18]; u8 hds_skip_first_sge[0x1]; u8 log2_hds_buf_size[0x3]; u8 reserved_1[0x7]; u8 page_offset[0x5]; u8 lwm[0x10]; u8 reserved_2[0x8]; u8 pd[0x18]; u8 reserved_3[0x8]; u8 uar_page[0x18]; u8 dbr_addr[0x40]; u8 hw_counter[0x20]; u8 sw_counter[0x20]; u8 reserved_4[0xc]; u8 log_wq_stride[0x4]; u8 reserved_5[0x3]; u8 log_wq_pg_sz[0x5]; u8 reserved_6[0x3]; u8 log_wq_sz[0x5]; u8 reserved_7[0x15]; u8 single_wqe_log_num_of_strides[0x3]; u8 two_byte_shift_en[0x1]; u8 reserved_8[0x4]; u8 single_stride_log_num_of_bytes[0x3]; u8 reserved_9[0x4c0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; struct mlx5_ifc_rq_num_bits { u8 reserved_0[0x8]; u8 rq_num[0x18]; }; struct mlx5_ifc_mac_address_layout_bits { u8 reserved_0[0x10]; u8 mac_addr_47_32[0x10]; u8 mac_addr_31_0[0x20]; }; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_0[0xa0]; u8 min_time_between_cnps[0x20]; u8 reserved_1[0x12]; u8 cnp_dscp[0x6]; u8 reserved_2[0x4]; u8 cnp_prio_mode[0x1]; u8 cnp_802p_prio[0x3]; u8 reserved_3[0x720]; }; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { u8 reserved_0[0x60]; u8 reserved_1[0x4]; u8 clamp_tgt_rate[0x1]; u8 reserved_2[0x3]; u8 clamp_tgt_rate_after_time_inc[0x1]; u8 reserved_3[0x17]; u8 reserved_4[0x20]; u8 rpg_time_reset[0x20]; u8 rpg_byte_reset[0x20]; u8 rpg_threshold[0x20]; u8 rpg_max_rate[0x20]; u8 rpg_ai_rate[0x20]; u8 rpg_hai_rate[0x20]; u8 rpg_gd[0x20]; u8 rpg_min_dec_fac[0x20]; u8 rpg_min_rate[0x20]; u8 reserved_5[0xe0]; u8 rate_to_set_on_first_cnp[0x20]; u8 dce_tcp_g[0x20]; u8 dce_tcp_rtt[0x20]; u8 rate_reduce_monitor_period[0x20]; u8 reserved_6[0x20]; u8 initial_alpha_value[0x20]; u8 reserved_7[0x4a0]; }; struct mlx5_ifc_cong_control_802_1qau_rp_bits { u8 reserved_0[0x80]; u8 rppp_max_rps[0x20]; u8 rpg_time_reset[0x20]; u8 rpg_byte_reset[0x20]; u8 rpg_threshold[0x20]; u8 rpg_max_rate[0x20]; u8 rpg_ai_rate[0x20]; u8 rpg_hai_rate[0x20]; u8 rpg_gd[0x20]; u8 rpg_min_dec_fac[0x20]; u8 rpg_min_rate[0x20]; u8 reserved_1[0x640]; }; enum { MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, }; struct mlx5_ifc_resize_field_select_bits { u8 resize_field_select[0x20]; }; enum { MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD_MODE = 0x10, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_STATUS = 0x20, }; struct mlx5_ifc_modify_field_select_bits { u8 modify_field_select[0x20]; }; struct mlx5_ifc_field_select_r_roce_np_bits { u8 field_select_r_roce_np[0x20]; }; enum { MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_CLAMP_TGT_RATE = 0x2, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_CLAMP_TGT_RATE_AFTER_TIME_INC = 0x4, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_TIME_RESET = 0x8, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_BYTE_RESET = 0x10, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_THRESHOLD = 0x20, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MAX_RATE = 0x40, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_AI_RATE = 0x80, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_HAI_RATE = 0x100, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MIN_DEC_FAC = 0x200, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MIN_RATE = 0x400, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RATE_TO_SET_ON_FIRST_CNP = 0x800, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_DCE_TCP_G = 0x1000, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_DCE_TCP_RTT = 0x2000, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RATE_REDUCE_MONITOR_PERIOD = 0x4000, MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_INITIAL_ALPHA_VALUE = 0x8000, }; struct mlx5_ifc_field_select_r_roce_rp_bits { u8 field_select_r_roce_rp[0x20]; }; enum { MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, }; struct mlx5_ifc_field_select_802_1qau_rp_bits { u8 field_select_8021qaurp[0x20]; }; struct mlx5_ifc_pptb_reg_bits { u8 reserved_0[0x2]; u8 mm[0x2]; u8 reserved_1[0x4]; u8 local_port[0x8]; u8 reserved_2[0x6]; u8 cm[0x1]; u8 um[0x1]; u8 pm[0x8]; u8 prio7buff[0x4]; u8 prio6buff[0x4]; u8 prio5buff[0x4]; u8 prio4buff[0x4]; u8 prio3buff[0x4]; u8 prio2buff[0x4]; u8 prio1buff[0x4]; u8 prio0buff[0x4]; u8 pm_msb[0x8]; u8 reserved_3[0x10]; u8 ctrl_buff[0x4]; u8 untagged_buff[0x4]; }; struct mlx5_ifc_dcbx_app_reg_bits { u8 reserved_0[0x8]; u8 port_number[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x1a]; u8 num_app_prio[0x6]; u8 reserved_3[0x40]; struct mlx5_ifc_application_prio_entry_bits app_prio[0]; }; struct mlx5_ifc_dcbx_param_reg_bits { u8 dcbx_cee_cap[0x1]; u8 dcbx_ieee_cap[0x1]; u8 dcbx_standby_cap[0x1]; u8 reserved_0[0x5]; u8 port_number[0x8]; u8 reserved_1[0xa]; u8 max_application_table_size[0x6]; u8 reserved_2[0x15]; u8 version_oper[0x3]; u8 reserved_3[0x5]; u8 version_admin[0x3]; u8 willing_admin[0x1]; u8 reserved_4[0x3]; u8 pfc_cap_oper[0x4]; u8 reserved_5[0x4]; u8 pfc_cap_admin[0x4]; u8 reserved_6[0x4]; u8 num_of_tc_oper[0x4]; u8 reserved_7[0x4]; u8 num_of_tc_admin[0x4]; u8 remote_willing[0x1]; u8 reserved_8[0x3]; u8 remote_pfc_cap[0x4]; u8 reserved_9[0x14]; u8 remote_num_of_tc[0x4]; u8 reserved_10[0x18]; u8 error[0x8]; u8 reserved_11[0x160]; }; struct mlx5_ifc_qhll_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x1b]; u8 hll_time[0x5]; u8 stall_en[0x1]; u8 reserved_at_41[0x1c]; u8 stall_cnt[0x3]; }; struct mlx5_ifc_qetcr_reg_bits { u8 operation_type[0x2]; u8 cap_local_admin[0x1]; u8 cap_remote_admin[0x1]; u8 reserved_0[0x4]; u8 port_number[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x20]; u8 tc[8][0x40]; u8 global_configuration[0x40]; }; struct mlx5_ifc_nodnic_ring_config_reg_bits { u8 queue_address_63_32[0x20]; u8 queue_address_31_12[0x14]; u8 reserved_0[0x6]; u8 log_size[0x6]; struct mlx5_ifc_nodnic_ring_doorbell_bits doorbell; u8 reserved_1[0x8]; u8 queue_number[0x18]; u8 q_key[0x20]; u8 reserved_2[0x10]; u8 pkey_index[0x10]; u8 reserved_3[0x40]; }; struct mlx5_ifc_nodnic_cq_arming_word_bits { u8 reserved_0[0x8]; u8 cq_ci[0x10]; u8 reserved_1[0x8]; }; enum { MLX5_NODNIC_EVENT_WORD_LINK_TYPE_INFINIBAND = 0x0, MLX5_NODNIC_EVENT_WORD_LINK_TYPE_ETHERNET = 0x1, }; enum { MLX5_NODNIC_EVENT_WORD_PORT_STATE_DOWN = 0x0, MLX5_NODNIC_EVENT_WORD_PORT_STATE_INITIALIZE = 0x1, MLX5_NODNIC_EVENT_WORD_PORT_STATE_ARMED = 0x2, MLX5_NODNIC_EVENT_WORD_PORT_STATE_ACTIVE = 0x3, }; struct mlx5_ifc_nodnic_event_word_bits { u8 driver_reset_needed[0x1]; u8 port_management_change_event[0x1]; u8 reserved_0[0x19]; u8 link_type[0x1]; u8 port_state[0x4]; }; struct mlx5_ifc_nic_vport_change_event_bits { u8 reserved_0[0x10]; u8 vport_num[0x10]; u8 reserved_1[0xc0]; }; struct mlx5_ifc_pages_req_event_bits { u8 reserved_0[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; u8 reserved_1[0xa0]; }; struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; u8 reserved_0[0xc0]; }; struct mlx5_ifc_stall_vl_event_bits { u8 reserved_0[0x18]; u8 port_num[0x1]; u8 reserved_1[0x3]; u8 vl[0x4]; u8 reserved_2[0xa0]; }; struct mlx5_ifc_db_bf_congestion_event_bits { u8 event_subtype[0x8]; u8 reserved_0[0x8]; u8 congestion_level[0x8]; u8 reserved_1[0x8]; u8 reserved_2[0xa0]; }; struct mlx5_ifc_gpio_event_bits { u8 reserved_0[0x60]; u8 gpio_event_hi[0x20]; u8 gpio_event_lo[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_port_state_change_event_bits { u8 reserved_0[0x40]; u8 port_num[0x4]; u8 reserved_1[0x1c]; u8 reserved_2[0x80]; }; struct mlx5_ifc_dropped_packet_logged_bits { u8 reserved_0[0xe0]; }; enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, }; struct mlx5_ifc_cq_error_bits { u8 reserved_0[0x8]; u8 cqn[0x18]; u8 reserved_1[0x20]; u8 reserved_2[0x18]; u8 syndrome[0x8]; u8 reserved_3[0x80]; }; struct mlx5_ifc_rdma_page_fault_event_bits { u8 bytes_commited[0x20]; u8 r_key[0x20]; u8 reserved_0[0x10]; u8 packet_len[0x10]; u8 rdma_op_len[0x20]; u8 rdma_va[0x40]; u8 reserved_1[0x5]; u8 rdma[0x1]; u8 write[0x1]; u8 requestor[0x1]; u8 qp_number[0x18]; }; struct mlx5_ifc_wqe_associated_page_fault_event_bits { u8 bytes_committed[0x20]; u8 reserved_0[0x10]; u8 wqe_index[0x10]; u8 reserved_1[0x10]; u8 len[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x5]; u8 rdma[0x1]; u8 write_read[0x1]; u8 requestor[0x1]; u8 qpn[0x18]; }; enum { MLX5_QP_EVENTS_TYPE_QP = 0x0, MLX5_QP_EVENTS_TYPE_RQ = 0x1, MLX5_QP_EVENTS_TYPE_SQ = 0x2, }; struct mlx5_ifc_qp_events_bits { u8 reserved_0[0xa0]; u8 type[0x8]; u8 reserved_1[0x18]; u8 reserved_2[0x8]; u8 qpn_rqn_sqn[0x18]; }; struct mlx5_ifc_dct_events_bits { u8 reserved_0[0xc0]; u8 reserved_1[0x8]; u8 dct_number[0x18]; }; struct mlx5_ifc_comp_event_bits { u8 reserved_0[0xc0]; u8 reserved_1[0x8]; u8 cq_number[0x18]; }; struct mlx5_ifc_fw_version_bits { u8 major[0x10]; u8 reserved_0[0x10]; u8 minor[0x10]; u8 subminor[0x10]; u8 second[0x8]; u8 minute[0x8]; u8 hour[0x8]; u8 reserved_1[0x8]; u8 year[0x10]; u8 month[0x8]; u8 day[0x8]; }; enum { MLX5_QPC_STATE_RST = 0x0, MLX5_QPC_STATE_INIT = 0x1, MLX5_QPC_STATE_RTR = 0x2, MLX5_QPC_STATE_RTS = 0x3, MLX5_QPC_STATE_SQER = 0x4, MLX5_QPC_STATE_SQD = 0x5, MLX5_QPC_STATE_ERR = 0x6, MLX5_QPC_STATE_SUSPENDED = 0x9, }; enum { MLX5_QPC_ST_RC = 0x0, MLX5_QPC_ST_UC = 0x1, MLX5_QPC_ST_UD = 0x2, MLX5_QPC_ST_XRC = 0x3, MLX5_QPC_ST_DCI = 0x5, MLX5_QPC_ST_QP0 = 0x7, MLX5_QPC_ST_QP1 = 0x8, MLX5_QPC_ST_RAW_DATAGRAM = 0x9, MLX5_QPC_ST_REG_UMR = 0xc, }; enum { MLX5_QP_PM_ARMED = 0x0, MLX5_QP_PM_REARM = 0x1, MLX5_QPC_PM_STATE_RESERVED = 0x2, MLX5_QP_PM_MIGRATED = 0x3, }; enum { MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, }; enum { MLX5_QPC_MTU_256_BYTES = 0x1, MLX5_QPC_MTU_512_BYTES = 0x2, MLX5_QPC_MTU_1K_BYTES = 0x3, MLX5_QPC_MTU_2K_BYTES = 0x4, MLX5_QPC_MTU_4K_BYTES = 0x5, MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, }; enum { MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, }; enum { MLX5_QPC_CS_REQ_DISABLE = 0x0, MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, }; enum { MLX5_QPC_CS_RES_DISABLE = 0x0, MLX5_QPC_CS_RES_UP_TO_32B = 0x1, MLX5_QPC_CS_RES_UP_TO_64B = 0x2, }; struct mlx5_ifc_qpc_bits { u8 state[0x4]; u8 lag_tx_port_affinity[0x4]; u8 st[0x8]; u8 reserved_1[0x3]; u8 pm_state[0x2]; u8 reserved_2[0x7]; u8 end_padding_mode[0x2]; u8 reserved_3[0x2]; u8 wq_signature[0x1]; u8 block_lb_mc[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; u8 reserved_4[0x1]; u8 drain_sigerr[0x1]; u8 reserved_5[0x2]; u8 pd[0x18]; u8 mtu[0x3]; u8 log_msg_max[0x5]; u8 reserved_6[0x1]; u8 log_rq_size[0x4]; u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; u8 reserved_7[0x6]; u8 rlky[0x1]; u8 ulp_stateless_offload_mode[0x4]; u8 counter_set_id[0x8]; u8 uar_page[0x18]; u8 reserved_8[0x8]; u8 user_index[0x18]; u8 reserved_9[0x3]; u8 log_page_size[0x5]; u8 remote_qpn[0x18]; struct mlx5_ifc_ads_bits primary_address_path; struct mlx5_ifc_ads_bits secondary_address_path; u8 log_ack_req_freq[0x4]; u8 reserved_10[0x4]; u8 log_sra_max[0x3]; u8 reserved_11[0x2]; u8 retry_count[0x3]; u8 rnr_retry[0x3]; u8 reserved_12[0x1]; u8 fre[0x1]; u8 cur_rnr_retry[0x3]; u8 cur_retry_count[0x3]; u8 reserved_13[0x5]; u8 reserved_14[0x20]; u8 reserved_15[0x8]; u8 next_send_psn[0x18]; u8 reserved_16[0x8]; u8 cqn_snd[0x18]; u8 reserved_at_400[0x8]; u8 deth_sqpn[0x18]; u8 reserved_17[0x20]; u8 reserved_18[0x8]; u8 last_acked_psn[0x18]; u8 reserved_19[0x8]; u8 ssn[0x18]; u8 reserved_20[0x8]; u8 log_rra_max[0x3]; u8 reserved_21[0x1]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; u8 reserved_22[0x1]; u8 page_offset[0x6]; u8 reserved_23[0x3]; u8 cd_slave_receive[0x1]; u8 cd_slave_send[0x1]; u8 cd_master[0x1]; u8 reserved_24[0x3]; u8 min_rnr_nak[0x5]; u8 next_rcv_psn[0x18]; u8 reserved_25[0x8]; u8 xrcd[0x18]; u8 reserved_26[0x8]; u8 cqn_rcv[0x18]; u8 dbr_addr[0x40]; u8 q_key[0x20]; u8 reserved_27[0x5]; u8 rq_type[0x3]; u8 srqn_rmpn[0x18]; u8 reserved_28[0x8]; u8 rmsn[0x18]; u8 hw_sq_wqebb_counter[0x10]; u8 sw_sq_wqebb_counter[0x10]; u8 hw_rq_counter[0x20]; u8 sw_rq_counter[0x20]; u8 reserved_29[0x20]; u8 reserved_30[0xf]; u8 cgs[0x1]; u8 cs_req[0x8]; u8 cs_res[0x8]; u8 dc_access_key[0x40]; u8 rdma_active[0x1]; u8 comm_est[0x1]; u8 suspended[0x1]; u8 reserved_31[0x5]; u8 send_msg_psn[0x18]; u8 reserved_32[0x8]; u8 rcv_msg_psn[0x18]; u8 rdma_va[0x40]; u8 rdma_key[0x20]; u8 reserved_33[0x20]; }; struct mlx5_ifc_roce_addr_layout_bits { u8 source_l3_address[16][0x8]; u8 reserved_0[0x3]; u8 vlan_valid[0x1]; u8 vlan_id[0xc]; u8 source_mac_47_32[0x10]; u8 source_mac_31_0[0x20]; u8 reserved_1[0x14]; u8 roce_l3_type[0x4]; u8 roce_version[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_rdbc_bits { u8 reserved_0[0x1c]; u8 type[0x4]; u8 reserved_1[0x20]; u8 reserved_2[0x8]; u8 psn[0x18]; u8 rkey[0x20]; u8 address[0x40]; u8 byte_count[0x20]; u8 reserved_3[0x20]; u8 atomic_resp[32][0x8]; }; enum { MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, }; struct mlx5_ifc_flow_context_bits { u8 reserved_0[0x20]; u8 group_id[0x20]; u8 reserved_1[0x8]; u8 flow_tag[0x18]; u8 reserved_2[0x10]; u8 action[0x10]; u8 reserved_3[0x8]; u8 destination_list_size[0x18]; u8 reserved_4[0x8]; u8 flow_counter_list_size[0x18]; u8 reserved_5[0x140]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_6[0x600]; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { MLX5_XRC_SRQC_STATE_GOOD = 0x0, MLX5_XRC_SRQC_STATE_ERROR = 0x1, }; struct mlx5_ifc_xrc_srqc_bits { u8 state[0x4]; u8 log_xrc_srq_size[0x4]; u8 reserved_0[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; u8 reserved_1[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; u8 reserved_2[0x2]; u8 cqn[0x18]; u8 reserved_3[0x20]; u8 reserved_4[0x2]; u8 log_page_size[0x6]; u8 user_index[0x18]; u8 reserved_5[0x20]; u8 reserved_6[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; u8 reserved_7[0x40]; u8 db_record_addr_h[0x20]; u8 db_record_addr_l[0x1e]; u8 reserved_8[0x2]; u8 reserved_9[0x80]; }; struct mlx5_ifc_traffic_counter_bits { u8 packets[0x40]; u8 octets[0x40]; }; struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; u8 reserved_at_1[0x3]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; u8 prio[0x4]; u8 reserved_1[0x10]; u8 reserved_2[0x100]; u8 reserved_3[0x8]; u8 transport_domain[0x18]; u8 reserved_4[0x8]; u8 underlay_qpn[0x18]; u8 reserved_5[0x3a0]; }; enum { MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, }; enum { MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, }; enum { MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, }; enum { MLX5_TIRC_SELF_LB_EN_ENABLE_UNICAST = 0x1, MLX5_TIRC_SELF_LB_EN_ENABLE_MULTICAST = 0x2, }; struct mlx5_ifc_tirc_bits { u8 reserved_0[0x20]; u8 disp_type[0x4]; u8 reserved_1[0x1c]; u8 reserved_2[0x40]; u8 reserved_3[0x4]; u8 lro_timeout_period_usecs[0x10]; u8 lro_enable_mask[0x4]; u8 lro_max_msg_sz[0x8]; u8 reserved_4[0x40]; u8 reserved_5[0x8]; u8 inline_rqn[0x18]; u8 rx_hash_symmetric[0x1]; u8 reserved_6[0x1]; u8 tunneled_offload_en[0x1]; u8 reserved_7[0x5]; u8 indirect_table[0x18]; u8 rx_hash_fn[0x4]; u8 reserved_8[0x2]; u8 self_lb_en[0x2]; u8 transport_domain[0x18]; u8 rx_hash_toeplitz_key[10][0x20]; struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; u8 reserved_9[0x4c0]; }; enum { MLX5_SRQC_STATE_GOOD = 0x0, MLX5_SRQC_STATE_ERROR = 0x1, }; struct mlx5_ifc_srqc_bits { u8 state[0x4]; u8 log_srq_size[0x4]; u8 reserved_0[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; u8 reserved_1[0x1]; u8 rlky[0x1]; u8 reserved_2[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; u8 reserved_3[0x2]; u8 cqn[0x18]; u8 reserved_4[0x20]; u8 reserved_5[0x2]; u8 log_page_size[0x6]; u8 reserved_6[0x18]; u8 reserved_7[0x20]; u8 reserved_8[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; u8 reserved_9[0x40]; u8 dbr_addr[0x40]; u8 reserved_10[0x80]; }; enum { MLX5_SQC_STATE_RST = 0x0, MLX5_SQC_STATE_RDY = 0x1, MLX5_SQC_STATE_ERR = 0x3, }; struct mlx5_ifc_sqc_bits { u8 rlkey[0x1]; u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; u8 allow_multi_pkt_send_wqe[0x1]; u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; u8 allow_swp[0x1]; u8 reserved_0[0x12]; u8 reserved_1[0x8]; u8 user_index[0x18]; u8 reserved_2[0x8]; u8 cqn[0x18]; u8 reserved_3[0x80]; u8 qos_para_vport_number[0x10]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; u8 reserved_4[0x10]; u8 reserved_5[0x40]; u8 reserved_6[0x8]; u8 tis_num_0[0x18]; struct mlx5_ifc_wq_bits wq; }; enum { MLX5_TSAR_TYPE_DWRR = 0, MLX5_TSAR_TYPE_ROUND_ROUBIN = 1, MLX5_TSAR_TYPE_ETS = 2 }; struct mlx5_ifc_tsar_element_attributes_bits { u8 reserved_0[0x8]; u8 tsar_type[0x8]; u8 reserved_1[0x10]; }; struct mlx5_ifc_vport_element_attributes_bits { u8 reserved_0[0x10]; u8 vport_number[0x10]; }; struct mlx5_ifc_vport_tc_element_attributes_bits { u8 traffic_class[0x10]; u8 vport_number[0x10]; }; struct mlx5_ifc_para_vport_tc_element_attributes_bits { u8 reserved_0[0x0C]; u8 traffic_class[0x04]; u8 qos_para_vport_number[0x10]; }; enum { MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, }; struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; u8 element_attributes[0x20]; u8 parent_element_id[0x20]; u8 reserved_at_60[0x40]; u8 bw_share[0x20]; u8 max_average_bw[0x20]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_rqtc_bits { u8 reserved_0[0xa0]; u8 reserved_1[0x10]; u8 rqt_max_size[0x10]; u8 reserved_2[0x10]; u8 rqt_actual_size[0x10]; u8 reserved_3[0x6a0]; struct mlx5_ifc_rq_num_bits rq_num[0]; }; enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, MLX5_RQC_RQ_TYPE_MEMORY_RQ_RMP = 0x1, }; enum { MLX5_RQC_STATE_RST = 0x0, MLX5_RQC_STATE_RDY = 0x1, MLX5_RQC_STATE_ERR = 0x3, }; enum { MLX5_RQC_DROPLESS_MODE_DISABLE = 0x0, MLX5_RQC_DROPLESS_MODE_ENABLE = 0x1, }; struct mlx5_ifc_rqc_bits { u8 rlkey[0x1]; u8 delay_drop_en[0x1]; u8 scatter_fcs[0x1]; u8 vlan_strip_disable[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; u8 reserved_1[0x1]; u8 flush_in_error_en[0x1]; u8 reserved_2[0x12]; u8 reserved_3[0x8]; u8 user_index[0x18]; u8 reserved_4[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; u8 reserved_5[0x18]; u8 reserved_6[0x8]; u8 rmpn[0x18]; u8 reserved_7[0xe0]; struct mlx5_ifc_wq_bits wq; }; enum { MLX5_RMPC_STATE_RDY = 0x1, MLX5_RMPC_STATE_ERR = 0x3, }; struct mlx5_ifc_rmpc_bits { u8 reserved_0[0x8]; u8 state[0x4]; u8 reserved_1[0x14]; u8 basic_cyclic_rcv_wqe[0x1]; u8 reserved_2[0x1f]; u8 reserved_3[0x140]; struct mlx5_ifc_wq_bits wq; }; enum { MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_MC_MAC_ADDRESS = 0x1, MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST = 0x2, }; struct mlx5_ifc_nic_vport_context_bits { u8 reserved_0[0x5]; u8 min_wqe_inline_mode[0x3]; u8 reserved_1[0x15]; u8 disable_mc_local_lb[0x1]; u8 disable_uc_local_lb[0x1]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; u8 reserved_2[0x1a]; u8 event_on_mtu[0x1]; u8 event_on_promisc_change[0x1]; u8 event_on_vlan_change[0x1]; u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; u8 reserved_3[0xe0]; u8 reserved_4[0x10]; u8 mtu[0x10]; u8 system_image_guid[0x40]; u8 port_guid[0x40]; u8 node_guid[0x40]; u8 reserved_5[0x140]; u8 qkey_violation_counter[0x10]; u8 reserved_6[0x10]; u8 reserved_7[0x420]; u8 promisc_uc[0x1]; u8 promisc_mc[0x1]; u8 promisc_all[0x1]; u8 reserved_8[0x2]; u8 allowed_list_type[0x3]; u8 reserved_9[0xc]; u8 allowed_list_size[0xc]; struct mlx5_ifc_mac_address_layout_bits permanent_address; u8 reserved_10[0x20]; u8 current_uc_mac_address[0][0x40]; }; enum { MLX5_ACCESS_MODE_PA = 0x0, MLX5_ACCESS_MODE_MTT = 0x1, MLX5_ACCESS_MODE_KLM = 0x2, }; struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; u8 reserved_at_2[0x1]; u8 access_mode_4_2[0x3]; u8 reserved_at_6[0x7]; u8 relaxed_ordering_write[0x1]; u8 reserved_at_e[0x1]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; u8 rw[0x1]; u8 rr[0x1]; u8 lw[0x1]; u8 lr[0x1]; u8 access_mode[0x2]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 mkey_7_0[0x8]; u8 reserved_3[0x20]; u8 length64[0x1]; u8 bsf_en[0x1]; u8 sync_umr[0x1]; u8 reserved_4[0x2]; u8 expected_sigerr_count[0x1]; u8 reserved_5[0x1]; u8 en_rinval[0x1]; u8 pd[0x18]; u8 start_addr[0x40]; u8 len[0x40]; u8 bsf_octword_size[0x20]; u8 reserved_6[0x80]; u8 translations_octword_size[0x20]; u8 reserved_7[0x1b]; u8 log_page_size[0x5]; u8 reserved_8[0x20]; }; struct mlx5_ifc_pkey_bits { u8 reserved_0[0x10]; u8 pkey[0x10]; }; struct mlx5_ifc_array128_auto_bits { u8 array128_auto[16][0x8]; }; enum { MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_PORT_GUID = 0x0, MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_NODE_GUID = 0x1, MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_VPORT_STATE_POLICY = 0x2, }; enum { MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_SLEEP = 0x1, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_POLLING = 0x2, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_DISABLED = 0x3, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_PORTCONFIGURATIONTRAINING = 0x4, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_LINKUP = 0x5, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_LINKERRORRECOVERY = 0x6, MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_PHYTEST = 0x7, }; enum { MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_DOWN = 0x0, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_UP = 0x1, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_FOLLOW = 0x2, }; enum { MLX5_HCA_VPORT_CONTEXT_PORT_STATE_DOWN = 0x1, MLX5_HCA_VPORT_CONTEXT_PORT_STATE_INIT = 0x2, MLX5_HCA_VPORT_CONTEXT_PORT_STATE_ARM = 0x3, MLX5_HCA_VPORT_CONTEXT_PORT_STATE_ACTIVE = 0x4, }; enum { MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_DOWN = 0x1, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_INIT = 0x2, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_ARM = 0x3, MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_ACTIVE = 0x4, }; struct mlx5_ifc_hca_vport_context_bits { u8 field_select[0x20]; u8 reserved_0[0xe0]; u8 sm_virt_aware[0x1]; u8 has_smi[0x1]; u8 has_raw[0x1]; u8 grh_required[0x1]; u8 reserved_1[0x1]; u8 min_wqe_inline_mode[0x3]; u8 reserved_2[0x8]; u8 port_physical_state[0x4]; u8 vport_state_policy[0x4]; u8 port_state[0x4]; u8 vport_state[0x4]; u8 reserved_3[0x20]; u8 system_image_guid[0x40]; u8 port_guid[0x40]; u8 node_guid[0x40]; u8 cap_mask1[0x20]; u8 cap_mask1_field_select[0x20]; u8 cap_mask2[0x20]; u8 cap_mask2_field_select[0x20]; u8 reserved_4[0x80]; u8 lid[0x10]; u8 reserved_5[0x4]; u8 init_type_reply[0x4]; u8 lmc[0x3]; u8 subnet_timeout[0x5]; u8 sm_lid[0x10]; u8 sm_sl[0x4]; u8 reserved_6[0xc]; u8 qkey_violation_counter[0x10]; u8 pkey_violation_counter[0x10]; u8 reserved_7[0xca0]; }; union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; struct mlx5_ifc_odp_cap_bits odp_cap; struct mlx5_ifc_atomic_caps_bits atomic_caps; struct mlx5_ifc_roce_cap_bits roce_cap; struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_snapshot_cap_bits snapshot_cap; struct mlx5_ifc_debug_cap_bits diagnostic_counters_cap; struct mlx5_ifc_qos_cap_bits qos_cap; u8 reserved_0[0x8000]; }; enum { MLX5_FLOW_TABLE_CONTEXT_TABLE_MISS_ACTION_DEFAULT = 0x0, MLX5_FLOW_TABLE_CONTEXT_TABLE_MISS_ACTION_IDENTIFIED = 0x1, }; struct mlx5_ifc_flow_table_context_bits { u8 encap_en[0x1]; u8 decap_en[0x1]; u8 reserved_at_2[0x2]; u8 table_miss_action[0x4]; u8 level[0x8]; u8 reserved_at_10[0x8]; u8 log_size[0x8]; u8 reserved_at_20[0x8]; u8 table_miss_id[0x18]; u8 reserved_at_40[0x8]; u8 lag_master_next_table_id[0x18]; u8 reserved_at_60[0xe0]; }; struct mlx5_ifc_esw_vport_context_bits { u8 reserved_0[0x3]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; u8 reserved_1[0x18]; u8 reserved_2[0x20]; u8 svlan_cfi[0x1]; u8 svlan_pcp[0x3]; u8 svlan_id[0xc]; u8 cvlan_cfi[0x1]; u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; u8 reserved_3[0x7a0]; }; enum { MLX5_EQC_STATUS_OK = 0x0, MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, }; enum { MLX5_EQ_STATE_ARMED = 0x9, MLX5_EQ_STATE_FIRED = 0xa, }; struct mlx5_ifc_eqc_bits { u8 status[0x4]; u8 reserved_0[0x9]; u8 ec[0x1]; u8 oi[0x1]; u8 reserved_1[0x5]; u8 st[0x4]; u8 reserved_2[0x8]; u8 reserved_3[0x20]; u8 reserved_4[0x14]; u8 page_offset[0x6]; u8 reserved_5[0x6]; u8 reserved_6[0x3]; u8 log_eq_size[0x5]; u8 uar_page[0x18]; u8 reserved_7[0x20]; u8 reserved_8[0x18]; u8 intr[0x8]; u8 reserved_9[0x3]; u8 log_page_size[0x5]; u8 reserved_10[0x18]; u8 reserved_11[0x60]; u8 reserved_12[0x8]; u8 consumer_counter[0x18]; u8 reserved_13[0x8]; u8 producer_counter[0x18]; u8 reserved_14[0x80]; }; enum { MLX5_DCTC_STATE_ACTIVE = 0x0, MLX5_DCTC_STATE_DRAINING = 0x1, MLX5_DCTC_STATE_DRAINED = 0x2, }; enum { MLX5_DCTC_CS_RES_DISABLE = 0x0, MLX5_DCTC_CS_RES_NA = 0x1, MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, }; enum { MLX5_DCTC_MTU_256_BYTES = 0x1, MLX5_DCTC_MTU_512_BYTES = 0x2, MLX5_DCTC_MTU_1K_BYTES = 0x3, MLX5_DCTC_MTU_2K_BYTES = 0x4, MLX5_DCTC_MTU_4K_BYTES = 0x5, }; struct mlx5_ifc_dctc_bits { u8 reserved_0[0x4]; u8 state[0x4]; u8 reserved_1[0x18]; u8 reserved_2[0x8]; u8 user_index[0x18]; u8 reserved_3[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; u8 rlky[0x1]; u8 reserved_4[0xe]; u8 reserved_5[0x8]; u8 cs_res[0x8]; u8 reserved_6[0x3]; u8 min_rnr_nak[0x5]; u8 reserved_7[0x8]; u8 reserved_8[0x8]; u8 srqn[0x18]; u8 reserved_9[0x8]; u8 pd[0x18]; u8 tclass[0x8]; u8 reserved_10[0x4]; u8 flow_label[0x14]; u8 dc_access_key[0x40]; u8 reserved_11[0x5]; u8 mtu[0x3]; u8 port[0x8]; u8 pkey_index[0x10]; u8 reserved_12[0x8]; u8 my_addr_index[0x8]; u8 reserved_13[0x8]; u8 hop_limit[0x8]; u8 dc_access_key_violation_count[0x20]; u8 reserved_14[0x14]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 ecn[0x2]; u8 dscp[0x6]; u8 reserved_15[0x40]; }; enum { MLX5_CQC_STATUS_OK = 0x0, MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, }; enum { CQE_SIZE_64 = 0x0, CQE_SIZE_128 = 0x1, }; enum { MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, }; enum { MLX5_CQ_STATE_SOLICITED_ARMED = 0x6, MLX5_CQ_STATE_ARMED = 0x9, MLX5_CQ_STATE_FIRED = 0xa, }; struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_0[0x4]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_1[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; u8 cq_period_mode[0x2]; u8 cqe_compression_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; u8 reserved_2[0x8]; u8 reserved_3[0x20]; u8 reserved_4[0x14]; u8 page_offset[0x6]; u8 reserved_5[0x6]; u8 reserved_6[0x3]; u8 log_cq_size[0x5]; u8 uar_page[0x18]; u8 reserved_7[0x4]; u8 cq_period[0xc]; u8 cq_max_count[0x10]; u8 reserved_8[0x18]; u8 c_eqn[0x8]; u8 reserved_9[0x3]; u8 log_page_size[0x5]; u8 reserved_10[0x18]; u8 reserved_11[0x20]; u8 reserved_12[0x8]; u8 last_notified_index[0x18]; u8 reserved_13[0x8]; u8 last_solicit_index[0x18]; u8 reserved_14[0x8]; u8 consumer_counter[0x18]; u8 reserved_15[0x8]; u8 producer_counter[0x18]; u8 reserved_16[0x40]; u8 dbr_addr[0x40]; }; union mlx5_ifc_cong_control_roce_ecn_auto_bits { struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; u8 reserved_0[0x800]; }; struct mlx5_ifc_query_adapter_param_block_bits { u8 reserved_0[0xc0]; u8 reserved_1[0x8]; u8 ieee_vendor_id[0x18]; u8 reserved_2[0x10]; u8 vsd_vendor_id[0x10]; u8 vsd[208][0x8]; u8 vsd_contd_psid[16][0x8]; }; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { struct mlx5_ifc_modify_field_select_bits modify_field_select; struct mlx5_ifc_resize_field_select_bits resize_field_select; u8 reserved_0[0x20]; }; union mlx5_ifc_field_select_802_1_r_roce_auto_bits { struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; u8 reserved_0[0x20]; }; struct mlx5_ifc_bufferx_reg_bits { u8 reserved_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; u8 reserved_1[0xc]; u8 size[0xc]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; }; struct mlx5_ifc_config_item_bits { u8 valid[0x2]; u8 reserved_0[0x2]; u8 header_type[0x2]; u8 reserved_1[0x2]; u8 default_location[0x1]; u8 reserved_2[0x7]; u8 version[0x4]; u8 reserved_3[0x3]; u8 length[0x9]; u8 type[0x20]; u8 reserved_4[0x10]; u8 crc16[0x10]; }; struct mlx5_ifc_nodnic_port_config_reg_bits { struct mlx5_ifc_nodnic_event_word_bits event; u8 network_en[0x1]; u8 dma_en[0x1]; u8 promisc_en[0x1]; u8 promisc_multicast_en[0x1]; u8 reserved_0[0x17]; u8 receive_filter_en[0x5]; u8 reserved_1[0x10]; u8 mac_47_32[0x10]; u8 mac_31_0[0x20]; u8 receive_filters_mgid_mac[64][0x8]; u8 gid[16][0x8]; u8 reserved_2[0x10]; u8 lid[0x10]; u8 reserved_3[0xc]; u8 sm_sl[0x4]; u8 sm_lid[0x10]; u8 completion_address_63_32[0x20]; u8 completion_address_31_12[0x14]; u8 reserved_4[0x6]; u8 log_cq_size[0x6]; u8 working_buffer_address_63_32[0x20]; u8 working_buffer_address_31_12[0x14]; u8 reserved_5[0xc]; struct mlx5_ifc_nodnic_cq_arming_word_bits arm_cq; u8 pkey_index[0x10]; u8 pkey[0x10]; struct mlx5_ifc_nodnic_ring_config_reg_bits send_ring0; struct mlx5_ifc_nodnic_ring_config_reg_bits send_ring1; struct mlx5_ifc_nodnic_ring_config_reg_bits receive_ring0; struct mlx5_ifc_nodnic_ring_config_reg_bits receive_ring1; u8 reserved_6[0x400]; }; union mlx5_ifc_event_auto_bits { struct mlx5_ifc_comp_event_bits comp_event; struct mlx5_ifc_dct_events_bits dct_events; struct mlx5_ifc_qp_events_bits qp_events; struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; struct mlx5_ifc_cq_error_bits cq_error; struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; struct mlx5_ifc_port_state_change_event_bits port_state_change_event; struct mlx5_ifc_gpio_event_bits gpio_event; struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; struct mlx5_ifc_stall_vl_event_bits stall_vl_event; struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; struct mlx5_ifc_pages_req_event_bits pages_req_event; struct mlx5_ifc_nic_vport_change_event_bits nic_vport_change_event; u8 reserved_0[0xe0]; }; struct mlx5_ifc_health_buffer_bits { u8 reserved_0[0x100]; u8 assert_existptr[0x20]; u8 assert_callra[0x20]; u8 reserved_1[0x40]; u8 fw_version[0x20]; u8 hw_id[0x20]; u8 reserved_2[0x20]; u8 irisc_index[0x8]; u8 synd[0x8]; u8 ext_synd[0x10]; }; struct mlx5_ifc_register_loopback_control_bits { u8 no_lb[0x1]; u8 reserved_0[0x7]; u8 port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x60]; }; struct mlx5_ifc_lrh_bits { u8 vl[4]; u8 lver[4]; u8 sl[4]; u8 reserved2[2]; u8 lnh[2]; u8 dlid[16]; u8 reserved5[5]; u8 pkt_len[11]; u8 slid[16]; }; struct mlx5_ifc_icmd_set_wol_rol_out_bits { u8 reserved_0[0x40]; u8 reserved_1[0x10]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; }; struct mlx5_ifc_icmd_set_wol_rol_in_bits { u8 reserved_0[0x40]; u8 rol_mode_valid[0x1]; u8 wol_mode_valid[0x1]; u8 reserved_1[0xe]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_2[0x7a0]; }; struct mlx5_ifc_icmd_set_virtual_mac_in_bits { u8 virtual_mac_en[0x1]; u8 mac_aux_v[0x1]; u8 reserved_0[0x1e]; u8 reserved_1[0x40]; struct mlx5_ifc_mac_address_layout_bits virtual_mac; u8 reserved_2[0x760]; }; struct mlx5_ifc_icmd_query_virtual_mac_out_bits { u8 virtual_mac_en[0x1]; u8 mac_aux_v[0x1]; u8 reserved_0[0x1e]; struct mlx5_ifc_mac_address_layout_bits permanent_mac; struct mlx5_ifc_mac_address_layout_bits virtual_mac; u8 reserved_1[0x760]; }; struct mlx5_ifc_icmd_query_fw_info_out_bits { struct mlx5_ifc_fw_version_bits fw_version; u8 reserved_0[0x10]; u8 hash_signature[0x10]; u8 psid[16][0x8]; u8 reserved_1[0x6e0]; }; struct mlx5_ifc_icmd_query_cap_in_bits { u8 reserved_0[0x10]; u8 capability_group[0x10]; }; struct mlx5_ifc_icmd_query_cap_general_bits { u8 nv_access[0x1]; u8 fw_info_psid[0x1]; u8 reserved_0[0x1e]; u8 reserved_1[0x16]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_2[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; u8 wol_b[0x1]; u8 wol_m[0x1]; u8 wol_u[0x1]; u8 wol_p[0x1]; }; struct mlx5_ifc_icmd_ocbb_query_header_stats_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 reserved_1[0x7e0]; }; struct mlx5_ifc_icmd_ocbb_query_etoc_stats_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 reserved_1[0x7e0]; }; struct mlx5_ifc_icmd_ocbb_init_in_bits { u8 address_hi[0x20]; u8 address_lo[0x20]; u8 reserved_0[0x7c0]; }; struct mlx5_ifc_icmd_init_ocsd_in_bits { u8 reserved_0[0x20]; u8 address_hi[0x20]; u8 address_lo[0x20]; u8 reserved_1[0x7a0]; }; struct mlx5_ifc_icmd_access_reg_out_bits { u8 reserved_0[0x11]; u8 status[0x7]; u8 reserved_1[0x8]; u8 register_id[0x10]; u8 reserved_2[0x10]; u8 reserved_3[0x40]; u8 reserved_4[0x5]; u8 len[0xb]; u8 reserved_5[0x10]; u8 register_data[0][0x20]; }; enum { MLX5_ICMD_ACCESS_REG_IN_METHOD_QUERY = 0x1, MLX5_ICMD_ACCESS_REG_IN_METHOD_WRITE = 0x2, }; struct mlx5_ifc_icmd_access_reg_in_bits { u8 constant_1[0x5]; u8 constant_2[0xb]; u8 reserved_0[0x10]; u8 register_id[0x10]; u8 reserved_1[0x1]; u8 method[0x7]; u8 constant_3[0x8]; u8 reserved_2[0x40]; u8 constant_4[0x5]; u8 len[0xb]; u8 reserved_3[0x10]; u8 register_data[0][0x20]; }; enum { MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, }; struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x3f]; u8 force_state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, }; struct mlx5_ifc_teardown_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 profile[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_delay_drop_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_delay_drop_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 delay_drop_timeout[0x10]; }; struct mlx5_ifc_query_delay_drop_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 delay_drop_timeout[0x10]; }; struct mlx5_ifc_query_delay_drop_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_suspend_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_suspend_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_sqerr2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_sqerr2rts_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_sqd2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_sqd2rts_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_set_wol_rol_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_wol_rol_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 rol_mode_valid[0x1]; u8 wol_mode_valid[0x1]; u8 reserved_2[0xe]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_roce_address_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_roce_address_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; u8 reserved_2[0x10]; u8 reserved_3[0x20]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_set_rdb_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_rdb_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x18]; u8 rdb_list_size[0x8]; struct mlx5_ifc_rdbc_bits rdb_context[0]; }; struct mlx5_ifc_set_mad_demux_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, }; struct mlx5_ifc_set_mad_demux_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x6]; u8 demux_mode[0x2]; u8 reserved_4[0x18]; }; struct mlx5_ifc_set_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x8]; u8 table_index[0x18]; u8 reserved_4[0x20]; u8 reserved_5[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; u8 reserved_6[0xc0]; }; struct mlx5_ifc_set_issi_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_issi_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 current_issi[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_set_hca_cap_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 }; struct mlx5_ifc_set_flow_table_root_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_flow_table_root_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x8]; u8 underlay_qpn[0x18]; u8 reserved_7[0x120]; }; struct mlx5_ifc_set_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_fte_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x18]; u8 modify_enable_mask[0x8]; u8 reserved_7[0x20]; u8 flow_index[0x20]; u8 reserved_8[0xe0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_set_driver_version_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_driver_version_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; u8 driver_version[64][0x8]; }; struct mlx5_ifc_set_dc_cnak_trace_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_dc_cnak_trace_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 enable[0x1]; u8 reserved_2[0x1f]; u8 reserved_3[0x160]; struct mlx5_ifc_cmd_pas_bits pas; }; struct mlx5_ifc_set_burst_size_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_burst_size_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x9]; u8 device_burst_size[0x17]; }; struct mlx5_ifc_rts2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rts2rts_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_rtr2rts_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_rst2init_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rst2init_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_resume_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_resume_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; u8 reserved_2[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrc_srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_wol_rol_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_query_wol_rol_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; enum { MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, }; struct mlx5_ifc_query_vport_state_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 reserved_2[0x18]; u8 admin_state[0x4]; u8 state[0x4]; }; enum { MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_query_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_traffic_counter_bits received_errors; struct mlx5_ifc_traffic_counter_bits transmit_errors; struct mlx5_ifc_traffic_counter_bits received_ib_unicast; struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; struct mlx5_ifc_traffic_counter_bits received_ib_multicast; struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; struct mlx5_ifc_traffic_counter_bits received_eth_unicast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; struct mlx5_ifc_traffic_counter_bits received_eth_multicast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; u8 reserved_2[0xa00]; }; enum { MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, }; struct mlx5_ifc_query_vport_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x60]; u8 clear[0x1]; u8 reserved_4[0x1f]; u8 reserved_5[0x20]; }; struct mlx5_ifc_query_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_tisc_bits tis_context; }; struct mlx5_ifc_query_tis_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tisn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_query_tir_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tirn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; u8 reserved_2[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_sqc_bits sq_context; }; struct mlx5_ifc_query_sq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 sqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_special_contexts_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; }; struct mlx5_ifc_query_special_contexts_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; enum { MLX5_SCHEDULING_ELEMENT_IN_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_query_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_query_rqt_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqtn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_rqc_bits rq_context; }; struct mlx5_ifc_query_rq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_roce_address_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_query_roce_address_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; u8 reserved_2[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xc0]; struct mlx5_ifc_rmpc_bits rmp_context; }; struct mlx5_ifc_query_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rmpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_rdb_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 reserved_2[0x18]; u8 rdb_list_size[0x8]; struct mlx5_ifc_rdbc_bits rdb_context[0]; }; struct mlx5_ifc_query_rdb_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 opt_param_mask[0x20]; u8 reserved_2[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_3[0x80]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_q_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 rx_write_requests[0x20]; u8 reserved_2[0x20]; u8 rx_read_requests[0x20]; u8 reserved_3[0x20]; u8 rx_atomic_requests[0x20]; u8 reserved_4[0x20]; u8 rx_dct_connect[0x20]; u8 reserved_5[0x20]; u8 out_of_buffer[0x20]; u8 reserved_7[0x20]; u8 out_of_sequence[0x20]; u8 reserved_8[0x20]; u8 duplicate_request[0x20]; u8 reserved_9[0x20]; u8 rnr_nak_retry_err[0x20]; u8 reserved_10[0x20]; u8 packet_seq_err[0x20]; u8 reserved_11[0x20]; u8 implied_nak_seq_err[0x20]; u8 reserved_12[0x20]; u8 local_ack_timeout_err[0x20]; u8 reserved_13[0x20]; u8 resp_rnr_nak[0x20]; u8 reserved_14[0x20]; u8 req_rnr_retries_exceeded[0x20]; u8 reserved_15[0x460]; }; struct mlx5_ifc_query_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x80]; u8 clear[0x1]; u8 reserved_3[0x1f]; u8 reserved_4[0x18]; u8 counter_set_id[0x8]; }; struct mlx5_ifc_query_pages_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; }; enum { MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, }; struct mlx5_ifc_query_pages_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_nic_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_query_nic_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x5]; u8 allowed_list_type[0x3]; u8 reserved_4[0x18]; }; struct mlx5_ifc_query_mkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; u8 reserved_2[0x600]; u8 bsf0_klm0_pas_mtt0_1[16][0x8]; u8 bsf1_klm1_pas_mtt2_3[16][0x8]; }; struct mlx5_ifc_query_mkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 mkey_index[0x18]; u8 pg_access[0x1]; u8 reserved_3[0x1f]; }; struct mlx5_ifc_query_mad_demux_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 mad_dumux_parameters_block[0x20]; }; struct mlx5_ifc_query_mad_demux_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xa0]; u8 reserved_2[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; u8 reserved_3[0xc0]; }; struct mlx5_ifc_query_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x8]; u8 table_index[0x18]; u8 reserved_4[0x140]; }; struct mlx5_ifc_query_issi_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 current_issi[0x10]; u8 reserved_2[0xa0]; u8 supported_issi_reserved[76][0x8]; u8 supported_issi_dw0[0x20]; }; struct mlx5_ifc_query_issi_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_pkey_bits pkey[0]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x10]; u8 pkey_index[0x10]; }; struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 gids_num[0x10]; u8 reserved_2[0x10]; struct mlx5_ifc_array128_auto_bits gid[0]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x10]; u8 gid_index[0x10]; }; struct mlx5_ifc_query_hca_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_query_hca_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_hca_cap_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; struct mlx5_ifc_query_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x80]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_query_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x140]; }; struct mlx5_ifc_query_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x1c0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_query_fte_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x40]; u8 flow_index[0x20]; u8 reserved_7[0xe0]; }; enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, }; struct mlx5_ifc_query_flow_group_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0xa0]; u8 start_flow_index[0x20]; u8 reserved_2[0x20]; u8 end_flow_index[0x20]; u8 reserved_3[0xa0]; u8 reserved_4[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; u8 reserved_5[0xe00]; }; struct mlx5_ifc_query_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; u8 reserved_6[0x120]; }; struct mlx5_ifc_query_flow_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; }; struct mlx5_ifc_query_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x80]; u8 clear[0x1]; u8 reserved_at_c1[0xf]; u8 num_of_counters[0x10]; u8 reserved_at_e0[0x10]; u8 flow_counter_id[0x10]; }; struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_query_esw_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; u8 reserved_2[0x40]; u8 event_bitmask[0x40]; u8 reserved_3[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_eq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 eq_number[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; u8 reserved_2[0x180]; }; struct mlx5_ifc_query_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_dc_cnak_trace_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 enable[0x1]; u8 reserved_1[0x1f]; u8 reserved_2[0x160]; struct mlx5_ifc_cmd_pas_bits pas; }; struct mlx5_ifc_query_dc_cnak_trace_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_2[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_cq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 cqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_cong_status_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 enable[0x1]; u8 tag_enable[0x1]; u8 reserved_2[0x1e]; }; struct mlx5_ifc_query_cong_status_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_cong_statistics_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 rp_cur_flows[0x20]; u8 sum_flows[0x20]; u8 rp_cnp_ignored_high[0x20]; u8 rp_cnp_ignored_low[0x20]; u8 rp_cnp_handled_high[0x20]; u8 rp_cnp_handled_low[0x20]; u8 reserved_2[0x100]; u8 time_stamp_high[0x20]; u8 time_stamp_low[0x20]; u8 accumulators_period[0x20]; u8 np_ecn_marked_roce_packets_high[0x20]; u8 np_ecn_marked_roce_packets_low[0x20]; u8 np_cnp_sent_high[0x20]; u8 np_cnp_sent_low[0x20]; u8 reserved_3[0x560]; }; struct mlx5_ifc_query_cong_statistics_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 clear[0x1]; u8 reserved_2[0x1f]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_cong_params_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_query_cong_params_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x1c]; u8 cong_protocol[0x4]; u8 reserved_3[0x20]; }; struct mlx5_ifc_query_burst_size_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; u8 reserved_2[0x9]; u8 device_burst_size[0x17]; }; struct mlx5_ifc_query_burst_size_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_query_adapter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; }; struct mlx5_ifc_query_adapter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_qp_2rst_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_qp_2rst_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_qp_2err_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_qp_2err_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_para_vport_element_bits { u8 reserved_at_0[0xc]; u8 traffic_class[0x4]; u8 qos_para_vport_number[0x10]; }; struct mlx5_ifc_page_fault_resume_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_page_fault_resume_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 error[0x1]; u8 reserved_2[0x4]; u8 rdma[0x1]; u8 read_write[0x1]; u8 req_res[0x1]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_nop_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_nop_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_modify_vport_state_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_NIC_VPORT = 0x0, MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2, }; enum { MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_DOWN = 0x0, MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_UP = 0x1, MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_FOLLOW = 0x2, }; struct mlx5_ifc_modify_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x18]; u8 admin_state[0x4]; u8 reserved_4[0x4]; }; struct mlx5_ifc_modify_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_tis_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1d]; u8 lag_tx_port_affinity[0x1]; u8 strict_lag_tx_port_affinity[0x1]; u8 prio[0x1]; }; struct mlx5_ifc_modify_tis_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tisn[0x18]; u8 reserved_3[0x20]; struct mlx5_ifc_modify_tis_bitmask_bits bitmask; u8 reserved_4[0x40]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_modify_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_SQ_BITMASK_PACKET_PACING_RATE_LIMIT_INDEX = 0x1 << 0, MLX5_MODIFY_SQ_BITMASK_QOS_PARA_VPORT_NUMBER = 0x1 << 1 }; struct mlx5_ifc_modify_tir_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tirn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_modify_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_sq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 sq_state[0x4]; u8 reserved_2[0x4]; u8 sqn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_modify_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; enum { MLX5_MODIFY_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; enum { MLX5_MODIFY_SCHEDULING_ELEMENT_BITMASK_BW_SHARE = 0x1, MLX5_MODIFY_SCHEDULING_ELEMENT_BITMASK_MAX_AVERAGE_BW = 0x2, }; struct mlx5_ifc_modify_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x20]; u8 modify_bitmask[0x20]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqtn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_rqtc_bits ctx; }; struct mlx5_ifc_modify_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3, }; struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 rq_state[0x4]; u8 reserved_2[0x4]; u8 rqn[0x18]; u8 reserved_3[0x20]; u8 modify_bitmask[0x40]; u8 reserved_4[0x40]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_modify_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_rmp_bitmask_bits { u8 reserved[0x20]; u8 reserved1[0x1f]; u8 lwm[0x1]; }; struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 rmp_state[0x4]; u8 reserved_2[0x4]; u8 rmpn[0x18]; u8 reserved_3[0x20]; struct mlx5_ifc_rmp_bitmask_bits bitmask; u8 reserved_4[0x40]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_modify_nic_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_nic_vport_field_select_bits { u8 reserved_0[0x14]; u8 disable_uc_local_lb[0x1]; u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; u8 port_guid[0x1]; u8 min_wqe_inline_mode[0x1]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; u8 permanent_address[0x1]; u8 addresses_list[0x1]; u8 roce_en[0x1]; u8 reserved_1[0x1]; }; struct mlx5_ifc_modify_nic_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; u8 reserved_3[0x780]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_modify_hca_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_grh_bits { u8 ip_version[4]; u8 traffic_class[8]; u8 flow_label[20]; u8 payload_length[16]; u8 next_header[8]; u8 hop_limit[8]; u8 sgid[128]; u8 dgid[128]; }; struct mlx5_ifc_bth_bits { u8 opcode[8]; u8 se[1]; u8 migreq[1]; u8 pad_count[2]; u8 tver[4]; u8 p_key[16]; u8 reserved8[8]; u8 dest_qp[24]; u8 ack_req[1]; u8 reserved7[7]; u8 psn[24]; }; struct mlx5_ifc_aeth_bits { u8 syndrome[8]; u8 msn[24]; }; struct mlx5_ifc_dceth_bits { u8 reserved0[8]; u8 session_id[24]; u8 reserved1[8]; u8 dci_dct[24]; }; struct mlx5_ifc_modify_hca_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x20]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_modify_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_MODIFY_FLOW_TABLE_SELECT_MISS_ACTION_AND_ID = 0x1, MLX5_MODIFY_FLOW_TABLE_SELECT_LAG_MASTER_NEXT_TABLE_ID = 0x8000, }; struct mlx5_ifc_modify_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_modify_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_esw_vport_context_fields_select_bits { u8 reserved[0x1c]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_strip[0x1]; }; struct mlx5_ifc_modify_esw_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_modify_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, }; struct mlx5_ifc_modify_cq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 cqn[0x18]; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_3[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_cong_status_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 enable[0x1]; u8 tag_enable[0x1]; u8 reserved_3[0x1e]; }; struct mlx5_ifc_modify_cong_params_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_modify_cong_params_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x1c]; u8 cong_protocol[0x4]; union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; u8 reserved_3[0x80]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_manage_pages_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 output_num_entries[0x20]; u8 reserved_1[0x20]; u8 pas[0][0x40]; }; enum { MLX5_PAGES_CANT_GIVE = 0x0, MLX5_PAGES_GIVE = 0x1, MLX5_PAGES_TAKE = 0x2, }; struct mlx5_ifc_manage_pages_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 input_num_entries[0x20]; u8 pas[0][0x40]; }; struct mlx5_ifc_mad_ifc_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 response_mad_packet[256][0x8]; }; struct mlx5_ifc_mad_ifc_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 remote_lid[0x10]; u8 reserved_2[0x8]; u8 port[0x8]; u8 reserved_3[0x20]; u8 mad[256][0x8]; }; struct mlx5_ifc_init_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_INIT_HCA_IN_OP_MOD_INIT = 0x0, MLX5_INIT_HCA_IN_OP_MOD_PRE_INIT = 0x1, }; struct mlx5_ifc_init_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_init2rtr_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_init2rtr_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_init2init_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_init2init_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; }; struct mlx5_ifc_get_dropped_packet_log_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 packet_headers_log[128][0x8]; u8 packet_syndrome[64][0x8]; }; struct mlx5_ifc_get_dropped_packet_log_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_gen_eqe_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 eq_number[0x8]; u8 reserved_3[0x20]; u8 eqe[64][0x8]; }; struct mlx5_ifc_gen_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_enable_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; }; struct mlx5_ifc_enable_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_drain_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_drain_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_disable_hca_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x20]; }; struct mlx5_ifc_disable_hca_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 function_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_detach_from_mcg_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_detach_from_mcg_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_destroy_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_xrc_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrc_srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_tis_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tisn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_tir_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 tirn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 srqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_sq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 sqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; enum { MLX5_DESTROY_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_destroy_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_rqt_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqtn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_rq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 rmpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_qos_para_vport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; struct mlx5_ifc_destroy_qos_para_vport_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 qos_para_vport_number[0x10]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_destroy_psv_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_psv_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 psvn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_mkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 mkey_index[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_flow_table_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x140]; }; struct mlx5_ifc_destroy_flow_group_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; u8 reserved_6[0x120]; }; struct mlx5_ifc_destroy_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_eq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 eq_number[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_destroy_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_destroy_cq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 cqn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_delete_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_delete_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x8]; u8 table_index[0x18]; u8 reserved_4[0x140]; }; struct mlx5_ifc_delete_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_delete_fte_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x40]; u8 flow_index[0x20]; u8 reserved_7[0xe0]; }; struct mlx5_ifc_dealloc_xrcd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_xrcd_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrcd[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_uar_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 uar[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_transport_domain_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_transport_domain_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 transport_domain[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_q_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_counter_id_bits { u8 reserved[0x10]; u8 counter_id[0x10]; }; struct mlx5_ifc_diagnostic_params_context_bits { u8 num_of_counters[0x10]; u8 reserved_2[0x8]; u8 log_num_of_samples[0x8]; u8 single[0x1]; u8 repetitive[0x1]; u8 sync[0x1]; u8 clear[0x1]; u8 on_demand[0x1]; u8 enable[0x1]; u8 reserved_3[0x12]; u8 log_sample_period[0x8]; u8 reserved_4[0x80]; struct mlx5_ifc_counter_id_bits counter_id[0]; }; struct mlx5_ifc_set_diagnostic_params_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; struct mlx5_ifc_diagnostic_params_context_bits diagnostic_params_ctx; }; struct mlx5_ifc_set_diagnostic_params_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_query_diagnostic_counters_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 num_of_samples[0x10]; u8 sample_index[0x10]; u8 reserved_2[0x20]; }; struct mlx5_ifc_diagnostic_counter_bits { u8 counter_id[0x10]; u8 sample_id[0x10]; u8 time_stamp_31_0[0x20]; u8 counter_value_h[0x20]; u8 counter_value_l[0x20]; }; struct mlx5_ifc_query_diagnostic_counters_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; struct mlx5_ifc_diagnostic_counter_bits diag_counter[0]; }; struct mlx5_ifc_dealloc_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x18]; u8 counter_set_id[0x8]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_pd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_pd_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 pd[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_dealloc_flow_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_dealloc_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 flow_counter_id[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_deactivate_tracer_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_deactivate_tracer_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 mkey[0x20]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 xrc_srqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_xrc_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; u8 reserved_3[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_tis_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 tisn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_tis_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_create_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 tirn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_tir_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_create_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 srqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; u8 reserved_3[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_sq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 sqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_sq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_create_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 scheduling_element_id[0x20]; u8 reserved_at_a0[0x160]; }; enum { MLX5_CREATE_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_create_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 reserved_at_60[0xa0]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 rqtn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_rqt_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_create_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 rqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_rq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_create_rmp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 rmpn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0xc0]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_create_qp_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 qpn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_qp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 input_qpn[0x18]; u8 reserved_3[0x20]; u8 opt_param_mask[0x20]; u8 reserved_4[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_5[0x80]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_qos_para_vport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 qos_para_vport_number[0x10]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_create_qos_para_vport_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x1c0]; }; struct mlx5_ifc_create_psv_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 reserved_2[0x8]; u8 psv0_index[0x18]; u8 reserved_3[0x8]; u8 psv1_index[0x18]; u8 reserved_4[0x8]; u8 psv2_index[0x18]; u8 reserved_5[0x8]; u8 psv3_index[0x18]; }; struct mlx5_ifc_create_psv_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 num_psv[0x4]; u8 reserved_2[0x4]; u8 pd[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_create_mkey_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 mkey_index[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 pg_access[0x1]; u8 reserved_3[0x1f]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; u8 reserved_4[0x80]; u8 translations_octword_actual_size[0x20]; u8 reserved_5[0x560]; u8 klm_pas_mtt[0][0x20]; }; struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 table_id[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_create_flow_group_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 group_id[0x18]; u8 reserved_2[0x20]; }; enum { MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, }; struct mlx5_ifc_create_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_2[0xf]; u8 vport_number[0x10]; u8 reserved_3[0x20]; u8 table_type[0x8]; u8 reserved_4[0x18]; u8 reserved_5[0x8]; u8 table_id[0x18]; u8 reserved_6[0x20]; u8 start_flow_index[0x20]; u8 reserved_7[0x20]; u8 end_flow_index[0x20]; u8 reserved_8[0xa0]; u8 reserved_9[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; u8 reserved_10[0xe00]; }; struct mlx5_ifc_create_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x18]; u8 eq_number[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_eq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; u8 reserved_3[0x40]; u8 event_bitmask[0x40]; u8 reserved_4[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 dctn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; u8 reserved_3[0x180]; }; struct mlx5_ifc_create_cq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 cqn[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_create_cq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_3[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; u8 reserved_2[0x20]; }; enum { MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, }; struct mlx5_ifc_config_int_moderation_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; u8 reserved_3[0x20]; }; struct mlx5_ifc_attach_to_mcg_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_attach_to_mcg_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 qpn[0x18]; u8 reserved_3[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_arm_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, }; struct mlx5_ifc_arm_xrc_srq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 xrc_srqn[0x18]; u8 reserved_3[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_rq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; enum { MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, }; struct mlx5_ifc_arm_rq_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 srq_number[0x18]; u8 reserved_3[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_dct_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_arm_dct_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x8]; u8 dctn[0x18]; u8 reserved_3[0x20]; }; struct mlx5_ifc_alloc_xrcd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 xrcd[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_xrcd_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_uar_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 uar[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_transport_domain_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 transport_domain[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_transport_domain_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_q_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x18]; u8 counter_set_id[0x8]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_pd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x8]; u8 pd[0x18]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_pd_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_alloc_flow_counter_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x10]; u8 flow_counter_id[0x10]; u8 reserved_2[0x20]; }; struct mlx5_ifc_alloc_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x20]; u8 reserved_3[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_activate_tracer_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_activate_tracer_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 mkey[0x20]; u8 reserved_2[0x20]; }; struct mlx5_ifc_set_rate_limit_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_rate_limit_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 rate_limit_index[0x10]; u8 reserved_at_60[0x20]; u8 rate_limit[0x20]; u8 burst_upper_bound[0x20]; }; struct mlx5_ifc_access_register_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; u8 register_data[0][0x20]; }; enum { MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, }; struct mlx5_ifc_access_register_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x10]; u8 register_id[0x10]; u8 argument[0x20]; u8 register_data[0][0x20]; }; struct mlx5_ifc_sltp_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x2]; u8 lane[0x4]; u8 reserved_1[0x8]; u8 reserved_2[0x20]; u8 reserved_3[0x7]; u8 polarity[0x1]; u8 ob_tap0[0x8]; u8 ob_tap1[0x8]; u8 ob_tap2[0x8]; u8 reserved_4[0xc]; u8 ob_preemp_mode[0x4]; u8 ob_reg[0x8]; u8 ob_bias[0x8]; u8 reserved_5[0x20]; }; struct mlx5_ifc_slrp_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x2]; u8 lane[0x4]; u8 reserved_1[0x8]; u8 ib_sel[0x2]; u8 reserved_2[0x11]; u8 dp_sel[0x1]; u8 dp90sel[0x4]; u8 mix90phase[0x8]; u8 ffe_tap0[0x8]; u8 ffe_tap1[0x8]; u8 ffe_tap2[0x8]; u8 ffe_tap3[0x8]; u8 ffe_tap4[0x8]; u8 ffe_tap5[0x8]; u8 ffe_tap6[0x8]; u8 ffe_tap7[0x8]; u8 ffe_tap8[0x8]; u8 mixerbias_tap_amp[0x8]; u8 reserved_3[0x7]; u8 ffe_tap_en[0x9]; u8 ffe_tap_offset0[0x8]; u8 ffe_tap_offset1[0x8]; u8 slicer_offset0[0x10]; u8 mixer_offset0[0x10]; u8 mixer_offset1[0x10]; u8 mixerbgn_inp[0x8]; u8 mixerbgn_inn[0x8]; u8 mixerbgn_refp[0x8]; u8 mixerbgn_refn[0x8]; u8 sel_slicer_lctrl_h[0x1]; u8 sel_slicer_lctrl_l[0x1]; u8 reserved_4[0x1]; u8 ref_mixer_vreg[0x5]; u8 slicer_gctrl[0x8]; u8 lctrl_input[0x8]; u8 mixer_offset_cm1[0x8]; u8 common_mode[0x6]; u8 reserved_5[0x1]; u8 mixer_offset_cm0[0x9]; u8 reserved_6[0x7]; u8 slicer_offset_cm[0x9]; }; struct mlx5_ifc_slrg_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x2]; u8 lane[0x4]; u8 reserved_1[0x8]; u8 time_to_link_up[0x10]; u8 reserved_2[0xc]; u8 grade_lane_speed[0x4]; u8 grade_version[0x8]; u8 grade[0x18]; u8 reserved_3[0x4]; u8 height_grade_type[0x4]; u8 height_grade[0x18]; u8 height_dz[0x10]; u8 height_dv[0x10]; u8 reserved_4[0x10]; u8 height_sigma[0x10]; u8 reserved_5[0x20]; u8 reserved_6[0x4]; u8 phase_grade_type[0x4]; u8 phase_grade[0x18]; u8 reserved_7[0x8]; u8 phase_eo_pos[0x8]; u8 reserved_8[0x8]; u8 phase_eo_neg[0x8]; u8 ffe_set_tested[0x10]; u8 test_errors_per_lane[0x10]; }; struct mlx5_ifc_pvlc_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x1c]; u8 vl_hw_cap[0x4]; u8 reserved_3[0x1c]; u8 vl_admin[0x4]; u8 reserved_4[0x1c]; u8 vl_operational[0x4]; }; struct mlx5_ifc_pude_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 reserved_0[0x4]; u8 admin_status[0x4]; u8 reserved_1[0x4]; u8 oper_status[0x4]; u8 reserved_2[0x60]; }; enum { MLX5_PTYS_REG_PROTO_MASK_INFINIBAND = 0x1, MLX5_PTYS_REG_PROTO_MASK_ETHERNET = 0x4, }; struct mlx5_ifc_ptys_reg_bits { u8 reserved_0[0x1]; u8 an_disable_admin[0x1]; u8 an_disable_cap[0x1]; u8 reserved_1[0x4]; u8 force_tx_aba_param[0x1]; u8 local_port[0x8]; u8 reserved_2[0xd]; u8 proto_mask[0x3]; u8 an_status[0x4]; u8 reserved_3[0xc]; u8 data_rate_oper[0x10]; u8 fc_proto_capability[0x20]; u8 eth_proto_capability[0x20]; u8 ib_link_width_capability[0x10]; u8 ib_proto_capability[0x10]; u8 fc_proto_admin[0x20]; u8 eth_proto_admin[0x20]; u8 ib_link_width_admin[0x10]; u8 ib_proto_admin[0x10]; u8 fc_proto_oper[0x20]; u8 eth_proto_oper[0x20]; u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; u8 reserved_4[0x20]; u8 eth_proto_lp_advertise[0x20]; u8 reserved_5[0x60]; }; struct mlx5_ifc_ptas_reg_bits { u8 reserved_0[0x20]; u8 algorithm_options[0x10]; u8 reserved_1[0x4]; u8 repetitions_mode[0x4]; u8 num_of_repetitions[0x8]; u8 grade_version[0x8]; u8 height_grade_type[0x4]; u8 phase_grade_type[0x4]; u8 height_grade_weight[0x8]; u8 phase_grade_weight[0x8]; u8 gisim_measure_bits[0x10]; u8 adaptive_tap_measure_bits[0x10]; u8 ber_bath_high_error_threshold[0x10]; u8 ber_bath_mid_error_threshold[0x10]; u8 ber_bath_low_error_threshold[0x10]; u8 one_ratio_high_threshold[0x10]; u8 one_ratio_high_mid_threshold[0x10]; u8 one_ratio_low_mid_threshold[0x10]; u8 one_ratio_low_threshold[0x10]; u8 ndeo_error_threshold[0x10]; u8 mixer_offset_step_size[0x10]; u8 reserved_2[0x8]; u8 mix90_phase_for_voltage_bath[0x8]; u8 mixer_offset_start[0x10]; u8 mixer_offset_end[0x10]; u8 reserved_3[0x15]; u8 ber_test_time[0xb]; }; struct mlx5_ifc_pspa_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 sub_port[0x8]; u8 reserved_0[0x8]; u8 reserved_1[0x20]; }; struct mlx5_ifc_ppsc_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x60]; u8 reserved_3[0x1c]; u8 wrps_admin[0x4]; u8 reserved_4[0x1c]; u8 wrps_status[0x4]; u8 up_th_vld[0x1]; u8 down_th_vld[0x1]; u8 reserved_5[0x6]; u8 up_threshold[0x8]; u8 reserved_6[0x8]; u8 down_threshold[0x8]; u8 reserved_7[0x20]; u8 reserved_8[0x1c]; u8 srps_admin[0x4]; u8 reserved_9[0x60]; }; struct mlx5_ifc_pplr_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x8]; u8 lb_cap[0x8]; u8 reserved_3[0x8]; u8 lb_en[0x8]; }; struct mlx5_ifc_pplm_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x20]; u8 port_profile_mode[0x8]; u8 static_port_profile[0x8]; u8 active_port_profile[0x8]; u8 reserved_3[0x8]; u8 retransmission_active[0x8]; u8 fec_mode_active[0x18]; u8 reserved_4[0x10]; u8 v_100g_fec_override_cap[0x4]; u8 v_50g_fec_override_cap[0x4]; u8 v_25g_fec_override_cap[0x4]; u8 v_10g_40g_fec_override_cap[0x4]; u8 reserved_5[0x10]; u8 v_100g_fec_override_admin[0x4]; u8 v_50g_fec_override_admin[0x4]; u8 v_25g_fec_override_admin[0x4]; u8 v_10g_40g_fec_override_admin[0x4]; }; struct mlx5_ifc_ppll_reg_bits { u8 num_pll_groups[0x8]; u8 pll_group[0x8]; u8 reserved_0[0x4]; u8 num_plls[0x4]; u8 reserved_1[0x8]; u8 reserved_2[0x1f]; u8 ae[0x1]; u8 pll_status[4][0x40]; }; struct mlx5_ifc_ppad_reg_bits { u8 reserved_0[0x3]; u8 single_mac[0x1]; u8 reserved_1[0x4]; u8 local_port[0x8]; u8 mac_47_32[0x10]; u8 mac_31_0[0x20]; u8 reserved_2[0x40]; }; struct mlx5_ifc_pmtu_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 max_mtu[0x10]; u8 reserved_2[0x10]; u8 admin_mtu[0x10]; u8 reserved_3[0x10]; u8 oper_mtu[0x10]; u8 reserved_4[0x10]; }; struct mlx5_ifc_pmpr_reg_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x18]; u8 attenuation_5g[0x8]; u8 reserved_3[0x18]; u8 attenuation_7g[0x8]; u8 reserved_4[0x18]; u8 attenuation_12g[0x8]; }; struct mlx5_ifc_pmpe_reg_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0xc]; u8 module_status[0x4]; u8 reserved_2[0x14]; u8 error_type[0x4]; u8 reserved_3[0x8]; u8 reserved_4[0x40]; }; struct mlx5_ifc_pmpc_reg_bits { u8 module_state_updated[32][0x8]; }; struct mlx5_ifc_pmlpn_reg_bits { u8 reserved_0[0x4]; u8 mlpn_status[0x4]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 e[0x1]; u8 reserved_2[0x1f]; }; struct mlx5_ifc_pmlp_reg_bits { u8 rxtx[0x1]; u8 reserved_0[0x7]; u8 local_port[0x8]; u8 reserved_1[0x8]; u8 width[0x8]; u8 lane0_module_mapping[0x20]; u8 lane1_module_mapping[0x20]; u8 lane2_module_mapping[0x20]; u8 lane3_module_mapping[0x20]; u8 reserved_2[0x160]; }; struct mlx5_ifc_pmaos_reg_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0x4]; u8 admin_status[0x4]; u8 reserved_2[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; u8 reserved_3[0x12]; u8 error_type[0x4]; u8 reserved_4[0x6]; u8 e[0x2]; u8 reserved_5[0x40]; }; struct mlx5_ifc_plpc_reg_bits { u8 reserved_0[0x4]; u8 profile_id[0xc]; u8 reserved_1[0x4]; u8 proto_mask[0x4]; u8 reserved_2[0x8]; u8 reserved_3[0x10]; u8 lane_speed[0x10]; u8 reserved_4[0x17]; u8 lpbf[0x1]; u8 fec_mode_policy[0x8]; u8 retransmission_capability[0x8]; u8 fec_mode_capability[0x18]; u8 retransmission_support_admin[0x8]; u8 fec_mode_support_admin[0x18]; u8 retransmission_request_admin[0x8]; u8 fec_mode_request_admin[0x18]; u8 reserved_5[0x80]; }; struct mlx5_ifc_pll_status_data_bits { u8 reserved_0[0x1]; u8 lock_cal[0x1]; u8 lock_status[0x2]; u8 reserved_1[0x2]; u8 algo_f_ctrl[0xa]; u8 analog_algo_num_var[0x6]; u8 f_ctrl_measure[0xa]; u8 reserved_2[0x2]; u8 analog_var[0x6]; u8 reserved_3[0x2]; u8 high_var[0x6]; u8 reserved_4[0x2]; u8 low_var[0x6]; u8 reserved_5[0x2]; u8 mid_val[0x6]; }; struct mlx5_ifc_plib_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x8]; u8 ib_port[0x8]; u8 reserved_2[0x60]; }; struct mlx5_ifc_plbf_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0xd]; u8 lbf_mode[0x3]; u8 reserved_2[0x20]; }; struct mlx5_ifc_pipg_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 dic[0x1]; u8 reserved_2[0x19]; u8 ipg[0x4]; u8 reserved_3[0x2]; }; struct mlx5_ifc_pifr_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0xe0]; u8 port_filter[8][0x20]; u8 port_filter_update_en[8][0x20]; }; struct mlx5_ifc_phys_layer_cntrs_bits { u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 symbol_errors_high[0x20]; u8 symbol_errors_low[0x20]; u8 sync_headers_errors_high[0x20]; u8 sync_headers_errors_low[0x20]; u8 edpl_bip_errors_lane0_high[0x20]; u8 edpl_bip_errors_lane0_low[0x20]; u8 edpl_bip_errors_lane1_high[0x20]; u8 edpl_bip_errors_lane1_low[0x20]; u8 edpl_bip_errors_lane2_high[0x20]; u8 edpl_bip_errors_lane2_low[0x20]; u8 edpl_bip_errors_lane3_high[0x20]; u8 edpl_bip_errors_lane3_low[0x20]; u8 fc_fec_corrected_blocks_lane0_high[0x20]; u8 fc_fec_corrected_blocks_lane0_low[0x20]; u8 fc_fec_corrected_blocks_lane1_high[0x20]; u8 fc_fec_corrected_blocks_lane1_low[0x20]; u8 fc_fec_corrected_blocks_lane2_high[0x20]; u8 fc_fec_corrected_blocks_lane2_low[0x20]; u8 fc_fec_corrected_blocks_lane3_high[0x20]; u8 fc_fec_corrected_blocks_lane3_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; u8 rs_fec_corrected_blocks_high[0x20]; u8 rs_fec_corrected_blocks_low[0x20]; u8 rs_fec_uncorrectable_blocks_high[0x20]; u8 rs_fec_uncorrectable_blocks_low[0x20]; u8 rs_fec_no_errors_blocks_high[0x20]; u8 rs_fec_no_errors_blocks_low[0x20]; u8 rs_fec_single_error_blocks_high[0x20]; u8 rs_fec_single_error_blocks_low[0x20]; u8 rs_fec_corrected_symbols_total_high[0x20]; u8 rs_fec_corrected_symbols_total_low[0x20]; u8 rs_fec_corrected_symbols_lane0_high[0x20]; u8 rs_fec_corrected_symbols_lane0_low[0x20]; u8 rs_fec_corrected_symbols_lane1_high[0x20]; u8 rs_fec_corrected_symbols_lane1_low[0x20]; u8 rs_fec_corrected_symbols_lane2_high[0x20]; u8 rs_fec_corrected_symbols_lane2_low[0x20]; u8 rs_fec_corrected_symbols_lane3_high[0x20]; u8 rs_fec_corrected_symbols_lane3_low[0x20]; u8 link_down_events[0x20]; u8 successful_recovery_events[0x20]; u8 reserved_0[0x180]; }; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_at_70[0x8]; u8 link_overrun_errors[0x8]; u8 reserved_at_80[0x10]; u8 vl_15_dropped[0x10]; u8 reserved_at_a0[0xa0]; }; struct mlx5_ifc_phys_layer_statistical_cntrs_bits { u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 phy_received_bits_high[0x20]; u8 phy_received_bits_low[0x20]; u8 phy_symbol_errors_high[0x20]; u8 phy_symbol_errors_low[0x20]; u8 phy_corrected_bits_high[0x20]; u8 phy_corrected_bits_low[0x20]; u8 phy_corrected_bits_lane0_high[0x20]; u8 phy_corrected_bits_lane0_low[0x20]; u8 phy_corrected_bits_lane1_high[0x20]; u8 phy_corrected_bits_lane1_low[0x20]; u8 phy_corrected_bits_lane2_high[0x20]; u8 phy_corrected_bits_lane2_low[0x20]; u8 phy_corrected_bits_lane3_high[0x20]; u8 phy_corrected_bits_lane3_low[0x20]; u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_infiniband_port_cntrs_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_0[0x8]; u8 local_link_integrity_errors[0x4]; u8 excessive_buffer_overrun_errors[0x4]; u8 reserved_1[0x10]; u8 vl_15_dropped[0x10]; u8 port_xmit_data[0x20]; u8 port_rcv_data[0x20]; u8 port_xmit_pkts[0x20]; u8 port_rcv_pkts[0x20]; u8 port_xmit_wait[0x20]; u8 reserved_2[0x680]; }; struct mlx5_ifc_phrr_reg_bits { u8 clr[0x1]; u8 reserved_0[0x7]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 hist_group[0x8]; u8 reserved_2[0x10]; u8 hist_id[0x8]; u8 reserved_3[0x40]; u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 bin[10][0x20]; }; struct mlx5_ifc_phbr_for_prio_reg_bits { u8 reserved_0[0x18]; u8 prio[0x8]; }; struct mlx5_ifc_phbr_for_port_tclass_reg_bits { u8 reserved_0[0x18]; u8 tclass[0x8]; }; struct mlx5_ifc_phbr_binding_reg_bits { u8 opcode[0x4]; u8 reserved_0[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_1[0xe]; u8 hist_group[0x8]; u8 reserved_2[0x10]; u8 hist_id[0x8]; u8 reserved_3[0x10]; u8 hist_type[0x10]; u8 hist_parameters[0x20]; u8 hist_min_value[0x20]; u8 hist_max_value[0x20]; u8 sample_time[0x20]; }; enum { MLX5_PFCC_REG_PPAN_DISABLED = 0x0, MLX5_PFCC_REG_PPAN_ENABLED = 0x1, }; struct mlx5_ifc_pfcc_reg_bits { u8 dcbx_operation_type[0x2]; u8 cap_local_admin[0x1]; u8 cap_remote_admin[0x1]; u8 reserved_0[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_1[0xc]; u8 shl_cap[0x1]; u8 shl_opr[0x1]; u8 ppan[0x4]; u8 reserved_2[0x4]; u8 prio_mask_tx[0x8]; u8 reserved_3[0x8]; u8 prio_mask_rx[0x8]; u8 pptx[0x1]; u8 aptx[0x1]; u8 reserved_4[0x6]; u8 pfctx[0x8]; u8 reserved_5[0x8]; u8 cbftx[0x8]; u8 pprx[0x1]; u8 aprx[0x1]; u8 reserved_6[0x6]; u8 pfcrx[0x8]; u8 reserved_7[0x8]; u8 cbfrx[0x8]; u8 device_stall_minor_watermark[0x10]; u8 device_stall_critical_watermark[0x10]; u8 reserved_8[0x60]; }; struct mlx5_ifc_pelc_reg_bits { u8 op[0x4]; u8 reserved_0[0x4]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 op_admin[0x8]; u8 op_capability[0x8]; u8 op_request[0x8]; u8 op_active[0x8]; u8 admin[0x40]; u8 capability[0x40]; u8 request[0x40]; u8 active[0x40]; u8 reserved_2[0x80]; }; struct mlx5_ifc_peir_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0xc]; u8 error_count[0x4]; u8 reserved_3[0x10]; u8 reserved_4[0xc]; u8 lane[0x4]; u8 reserved_5[0x8]; u8 error_type[0x8]; }; struct mlx5_ifc_qcam_access_reg_cap_mask { u8 qcam_access_reg_cap_mask_127_to_20[0x6C]; u8 qpdpm[0x1]; u8 qcam_access_reg_cap_mask_18_to_4[0x0F]; u8 qdpm[0x1]; u8 qpts[0x1]; u8 qcap[0x1]; u8 qcam_access_reg_cap_mask_0[0x1]; }; struct mlx5_ifc_qcam_qos_feature_cap_mask { u8 qcam_qos_feature_cap_mask_127_to_1[0x7F]; u8 qpts_trust_both[0x1]; }; struct mlx5_ifc_qcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap; u8 reserved_at_0[0x80]; } qos_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap; u8 reserved_at_0[0x80]; } qos_feature_cap_mask; u8 reserved_at_1c0[0x80]; }; struct mlx5_ifc_pcap_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 port_capability_mask[4][0x20]; }; struct mlx5_ifc_pbmc_reg_bits { u8 reserved_0[0x8]; u8 local_port[0x8]; u8 reserved_1[0x10]; u8 xoff_timer_value[0x10]; u8 xoff_refresh[0x10]; u8 reserved_2[0x10]; u8 port_buffer_size[0x10]; struct mlx5_ifc_bufferx_reg_bits buffer[10]; u8 reserved_3[0x40]; u8 port_shared_buffer[0x40]; }; struct mlx5_ifc_paos_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 reserved_0[0x4]; u8 admin_status[0x4]; u8 reserved_1[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; u8 reserved_2[0x1c]; u8 e[0x2]; u8 reserved_3[0x40]; }; struct mlx5_ifc_pamp_reg_bits { u8 reserved_0[0x8]; u8 opamp_group[0x8]; u8 reserved_1[0xc]; u8 opamp_group_type[0x4]; u8 start_index[0x10]; u8 reserved_2[0x4]; u8 num_of_indices[0xc]; u8 index_data[18][0x10]; }; struct mlx5_ifc_link_level_retrans_cntr_grp_date_bits { u8 llr_rx_cells_high[0x20]; u8 llr_rx_cells_low[0x20]; u8 llr_rx_error_high[0x20]; u8 llr_rx_error_low[0x20]; u8 llr_rx_crc_error_high[0x20]; u8 llr_rx_crc_error_low[0x20]; u8 llr_tx_cells_high[0x20]; u8 llr_tx_cells_low[0x20]; u8 llr_tx_ret_cells_high[0x20]; u8 llr_tx_ret_cells_low[0x20]; u8 llr_tx_ret_events_high[0x20]; u8 llr_tx_ret_events_low[0x20]; u8 reserved_0[0x640]; }; struct mlx5_ifc_mtmp_reg_bits { u8 i[0x1]; u8 reserved_at_1[0x18]; u8 sensor_index[0x7]; u8 reserved_at_20[0x10]; u8 temperature[0x10]; u8 mte[0x1]; u8 mtr[0x1]; u8 reserved_at_42[0x0e]; u8 max_temperature[0x10]; u8 tee[0x2]; u8 reserved_at_62[0x0e]; u8 temperature_threshold_hi[0x10]; u8 reserved_at_80[0x10]; u8 temperature_threshold_lo[0x10]; u8 reserved_at_100[0x20]; u8 sensor_name[0x40]; }; struct mlx5_ifc_lane_2_module_mapping_bits { u8 reserved_0[0x6]; u8 rx_lane[0x2]; u8 reserved_1[0x6]; u8 tx_lane[0x2]; u8 reserved_2[0x8]; u8 module[0x8]; }; struct mlx5_ifc_eth_per_traffic_class_layout_bits { u8 transmit_queue_high[0x20]; u8 transmit_queue_low[0x20]; u8 reserved_0[0x780]; }; struct mlx5_ifc_eth_per_traffic_class_cong_layout_bits { u8 no_buffer_discard_uc_high[0x20]; u8 no_buffer_discard_uc_low[0x20]; u8 wred_discard_high[0x20]; u8 wred_discard_low[0x20]; u8 reserved_0[0x740]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_octets_high[0x20]; u8 rx_octets_low[0x20]; u8 reserved_0[0xc0]; u8 rx_frames_high[0x20]; u8 rx_frames_low[0x20]; u8 tx_octets_high[0x20]; u8 tx_octets_low[0x20]; u8 reserved_1[0xc0]; u8 tx_frames_high[0x20]; u8 tx_frames_low[0x20]; u8 rx_pause_high[0x20]; u8 rx_pause_low[0x20]; u8 rx_pause_duration_high[0x20]; u8 rx_pause_duration_low[0x20]; u8 tx_pause_high[0x20]; u8 tx_pause_low[0x20]; u8 tx_pause_duration_high[0x20]; u8 tx_pause_duration_low[0x20]; u8 rx_pause_transition_high[0x20]; u8 rx_pause_transition_low[0x20]; u8 rx_discards_high[0x20]; u8 rx_discards_low[0x20]; u8 device_stall_minor_watermark_cnt_high[0x20]; u8 device_stall_minor_watermark_cnt_low[0x20]; u8 device_stall_critical_watermark_cnt_high[0x20]; u8 device_stall_critical_watermark_cnt_low[0x20]; u8 reserved_2[0x340]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 port_transmit_wait_high[0x20]; u8 port_transmit_wait_low[0x20]; u8 ecn_marked_high[0x20]; u8 ecn_marked_low[0x20]; u8 no_buffer_discard_mc_high[0x20]; u8 no_buffer_discard_mc_low[0x20]; u8 reserved_0[0x700]; }; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 a_frames_transmitted_ok_high[0x20]; u8 a_frames_transmitted_ok_low[0x20]; u8 a_frames_received_ok_high[0x20]; u8 a_frames_received_ok_low[0x20]; u8 a_frame_check_sequence_errors_high[0x20]; u8 a_frame_check_sequence_errors_low[0x20]; u8 a_alignment_errors_high[0x20]; u8 a_alignment_errors_low[0x20]; u8 a_octets_transmitted_ok_high[0x20]; u8 a_octets_transmitted_ok_low[0x20]; u8 a_octets_received_ok_high[0x20]; u8 a_octets_received_ok_low[0x20]; u8 a_multicast_frames_xmitted_ok_high[0x20]; u8 a_multicast_frames_xmitted_ok_low[0x20]; u8 a_broadcast_frames_xmitted_ok_high[0x20]; u8 a_broadcast_frames_xmitted_ok_low[0x20]; u8 a_multicast_frames_received_ok_high[0x20]; u8 a_multicast_frames_received_ok_low[0x20]; u8 a_broadcast_frames_recieved_ok_high[0x20]; u8 a_broadcast_frames_recieved_ok_low[0x20]; u8 a_in_range_length_errors_high[0x20]; u8 a_in_range_length_errors_low[0x20]; u8 a_out_of_range_length_field_high[0x20]; u8 a_out_of_range_length_field_low[0x20]; u8 a_frame_too_long_errors_high[0x20]; u8 a_frame_too_long_errors_low[0x20]; u8 a_symbol_error_during_carrier_high[0x20]; u8 a_symbol_error_during_carrier_low[0x20]; u8 a_mac_control_frames_transmitted_high[0x20]; u8 a_mac_control_frames_transmitted_low[0x20]; u8 a_mac_control_frames_received_high[0x20]; u8 a_mac_control_frames_received_low[0x20]; u8 a_unsupported_opcodes_received_high[0x20]; u8 a_unsupported_opcodes_received_low[0x20]; u8 a_pause_mac_ctrl_frames_received_high[0x20]; u8 a_pause_mac_ctrl_frames_received_low[0x20]; u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; u8 reserved_0[0x300]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { u8 dot3stats_alignment_errors_high[0x20]; u8 dot3stats_alignment_errors_low[0x20]; u8 dot3stats_fcs_errors_high[0x20]; u8 dot3stats_fcs_errors_low[0x20]; u8 dot3stats_single_collision_frames_high[0x20]; u8 dot3stats_single_collision_frames_low[0x20]; u8 dot3stats_multiple_collision_frames_high[0x20]; u8 dot3stats_multiple_collision_frames_low[0x20]; u8 dot3stats_sqe_test_errors_high[0x20]; u8 dot3stats_sqe_test_errors_low[0x20]; u8 dot3stats_deferred_transmissions_high[0x20]; u8 dot3stats_deferred_transmissions_low[0x20]; u8 dot3stats_late_collisions_high[0x20]; u8 dot3stats_late_collisions_low[0x20]; u8 dot3stats_excessive_collisions_high[0x20]; u8 dot3stats_excessive_collisions_low[0x20]; u8 dot3stats_internal_mac_transmit_errors_high[0x20]; u8 dot3stats_internal_mac_transmit_errors_low[0x20]; u8 dot3stats_carrier_sense_errors_high[0x20]; u8 dot3stats_carrier_sense_errors_low[0x20]; u8 dot3stats_frame_too_longs_high[0x20]; u8 dot3stats_frame_too_longs_low[0x20]; u8 dot3stats_internal_mac_receive_errors_high[0x20]; u8 dot3stats_internal_mac_receive_errors_low[0x20]; u8 dot3stats_symbol_errors_high[0x20]; u8 dot3stats_symbol_errors_low[0x20]; u8 dot3control_in_unknown_opcodes_high[0x20]; u8 dot3control_in_unknown_opcodes_low[0x20]; u8 dot3in_pause_frames_high[0x20]; u8 dot3in_pause_frames_low[0x20]; u8 dot3out_pause_frames_high[0x20]; u8 dot3out_pause_frames_low[0x20]; u8 reserved_0[0x3c0]; }; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { u8 if_in_octets_high[0x20]; u8 if_in_octets_low[0x20]; u8 if_in_ucast_pkts_high[0x20]; u8 if_in_ucast_pkts_low[0x20]; u8 if_in_discards_high[0x20]; u8 if_in_discards_low[0x20]; u8 if_in_errors_high[0x20]; u8 if_in_errors_low[0x20]; u8 if_in_unknown_protos_high[0x20]; u8 if_in_unknown_protos_low[0x20]; u8 if_out_octets_high[0x20]; u8 if_out_octets_low[0x20]; u8 if_out_ucast_pkts_high[0x20]; u8 if_out_ucast_pkts_low[0x20]; u8 if_out_discards_high[0x20]; u8 if_out_discards_low[0x20]; u8 if_out_errors_high[0x20]; u8 if_out_errors_low[0x20]; u8 if_in_multicast_pkts_high[0x20]; u8 if_in_multicast_pkts_low[0x20]; u8 if_in_broadcast_pkts_high[0x20]; u8 if_in_broadcast_pkts_low[0x20]; u8 if_out_multicast_pkts_high[0x20]; u8 if_out_multicast_pkts_low[0x20]; u8 if_out_broadcast_pkts_high[0x20]; u8 if_out_broadcast_pkts_low[0x20]; u8 reserved_0[0x480]; }; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { u8 ether_stats_drop_events_high[0x20]; u8 ether_stats_drop_events_low[0x20]; u8 ether_stats_octets_high[0x20]; u8 ether_stats_octets_low[0x20]; u8 ether_stats_pkts_high[0x20]; u8 ether_stats_pkts_low[0x20]; u8 ether_stats_broadcast_pkts_high[0x20]; u8 ether_stats_broadcast_pkts_low[0x20]; u8 ether_stats_multicast_pkts_high[0x20]; u8 ether_stats_multicast_pkts_low[0x20]; u8 ether_stats_crc_align_errors_high[0x20]; u8 ether_stats_crc_align_errors_low[0x20]; u8 ether_stats_undersize_pkts_high[0x20]; u8 ether_stats_undersize_pkts_low[0x20]; u8 ether_stats_oversize_pkts_high[0x20]; u8 ether_stats_oversize_pkts_low[0x20]; u8 ether_stats_fragments_high[0x20]; u8 ether_stats_fragments_low[0x20]; u8 ether_stats_jabbers_high[0x20]; u8 ether_stats_jabbers_low[0x20]; u8 ether_stats_collisions_high[0x20]; u8 ether_stats_collisions_low[0x20]; u8 ether_stats_pkts64octets_high[0x20]; u8 ether_stats_pkts64octets_low[0x20]; u8 ether_stats_pkts65to127octets_high[0x20]; u8 ether_stats_pkts65to127octets_low[0x20]; u8 ether_stats_pkts128to255octets_high[0x20]; u8 ether_stats_pkts128to255octets_low[0x20]; u8 ether_stats_pkts256to511octets_high[0x20]; u8 ether_stats_pkts256to511octets_low[0x20]; u8 ether_stats_pkts512to1023octets_high[0x20]; u8 ether_stats_pkts512to1023octets_low[0x20]; u8 ether_stats_pkts1024to1518octets_high[0x20]; u8 ether_stats_pkts1024to1518octets_low[0x20]; u8 ether_stats_pkts1519to2047octets_high[0x20]; u8 ether_stats_pkts1519to2047octets_low[0x20]; u8 ether_stats_pkts2048to4095octets_high[0x20]; u8 ether_stats_pkts2048to4095octets_low[0x20]; u8 ether_stats_pkts4096to8191octets_high[0x20]; u8 ether_stats_pkts4096to8191octets_low[0x20]; u8 ether_stats_pkts8192to10239octets_high[0x20]; u8 ether_stats_pkts8192to10239octets_low[0x20]; u8 reserved_0[0x280]; }; struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_0[0x8]; u8 local_link_integrity_errors[0x4]; u8 excessive_buffer_overrun_errors[0x4]; u8 reserved_1[0x10]; u8 vl_15_dropped[0x10]; u8 port_xmit_data[0x20]; u8 port_rcv_data[0x20]; u8 port_xmit_pkts[0x20]; u8 port_rcv_pkts[0x20]; u8 port_xmit_wait[0x20]; u8 reserved_2[0x680]; }; struct mlx5_ifc_trc_tlb_reg_bits { u8 reserved_0[0x80]; u8 tlb_addr[0][0x40]; }; struct mlx5_ifc_trc_read_fifo_reg_bits { u8 reserved_0[0x10]; u8 requested_event_num[0x10]; u8 reserved_1[0x20]; u8 reserved_2[0x10]; u8 acual_event_num[0x10]; u8 reserved_3[0x20]; u8 event[0][0x40]; }; struct mlx5_ifc_trc_lock_reg_bits { u8 reserved_0[0x1f]; u8 lock[0x1]; u8 reserved_1[0x60]; }; struct mlx5_ifc_trc_filter_reg_bits { u8 status[0x1]; u8 reserved_0[0xf]; u8 filter_index[0x10]; u8 reserved_1[0x20]; u8 filter_val[0x20]; u8 reserved_2[0x1a0]; }; struct mlx5_ifc_trc_event_reg_bits { u8 status[0x1]; u8 reserved_0[0xf]; u8 event_index[0x10]; u8 reserved_1[0x20]; u8 event_id[0x20]; u8 event_selector_val[0x10]; u8 event_selector_size[0x10]; u8 reserved_2[0x180]; }; struct mlx5_ifc_trc_conf_reg_bits { u8 limit_en[0x1]; u8 reserved_0[0x3]; u8 dump_mode[0x4]; u8 reserved_1[0x15]; u8 state[0x3]; u8 reserved_2[0x20]; u8 limit_event_index[0x20]; u8 mkey[0x20]; u8 fifo_ready_ev_num[0x20]; u8 reserved_3[0x160]; }; struct mlx5_ifc_trc_cap_reg_bits { u8 reserved_0[0x18]; u8 dump_mode[0x8]; u8 reserved_1[0x20]; u8 num_of_events[0x10]; u8 num_of_filters[0x10]; u8 fifo_size[0x20]; u8 tlb_size[0x10]; u8 event_size[0x10]; u8 reserved_2[0x160]; }; struct mlx5_ifc_set_node_in_bits { u8 node_description[64][0x8]; }; struct mlx5_ifc_register_power_settings_bits { u8 reserved_0[0x18]; u8 power_settings_level[0x8]; u8 reserved_1[0x60]; }; struct mlx5_ifc_register_host_endianess_bits { u8 he[0x1]; u8 reserved_0[0x1f]; u8 reserved_1[0x60]; }; struct mlx5_ifc_register_diag_buffer_ctrl_bits { u8 physical_address[0x40]; }; struct mlx5_ifc_qtct_reg_bits { u8 operation_type[0x2]; u8 cap_local_admin[0x1]; u8 cap_remote_admin[0x1]; u8 reserved_0[0x4]; u8 port_number[0x8]; u8 reserved_1[0xd]; u8 prio[0x3]; u8 reserved_2[0x1d]; u8 tclass[0x3]; }; struct mlx5_ifc_qpdp_reg_bits { u8 reserved_0[0x8]; u8 port_number[0x8]; u8 reserved_1[0x10]; u8 reserved_2[0x1d]; u8 pprio[0x3]; }; struct mlx5_ifc_port_info_ro_fields_param_bits { u8 reserved_0[0x8]; u8 port[0x8]; u8 max_gid[0x10]; u8 reserved_1[0x20]; u8 port_guid[0x40]; }; struct mlx5_ifc_nvqc_reg_bits { u8 type[0x20]; u8 reserved_0[0x18]; u8 version[0x4]; u8 reserved_1[0x2]; u8 support_wr[0x1]; u8 support_rd[0x1]; }; struct mlx5_ifc_nvia_reg_bits { u8 reserved_0[0x1d]; u8 target[0x3]; u8 reserved_1[0x20]; }; struct mlx5_ifc_nvdi_reg_bits { struct mlx5_ifc_config_item_bits configuration_item_header; }; struct mlx5_ifc_nvda_reg_bits { struct mlx5_ifc_config_item_bits configuration_item_header; u8 configuration_item_data[0x20]; }; struct mlx5_ifc_node_info_ro_fields_param_bits { u8 system_image_guid[0x40]; u8 reserved_0[0x40]; u8 node_guid[0x40]; u8 reserved_1[0x10]; u8 max_pkey[0x10]; u8 reserved_2[0x20]; }; struct mlx5_ifc_ets_tcn_config_reg_bits { u8 g[0x1]; u8 b[0x1]; u8 r[0x1]; u8 reserved_0[0x9]; u8 group[0x4]; u8 reserved_1[0x9]; u8 bw_allocation[0x7]; u8 reserved_2[0xc]; u8 max_bw_units[0x4]; u8 reserved_3[0x8]; u8 max_bw_value[0x8]; }; struct mlx5_ifc_ets_global_config_reg_bits { u8 reserved_0[0x2]; u8 r[0x1]; u8 reserved_1[0x1d]; u8 reserved_2[0xc]; u8 max_bw_units[0x4]; u8 reserved_3[0x8]; u8 max_bw_value[0x8]; }; struct mlx5_ifc_qetc_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; u8 reserved_at_10[0x30]; struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; struct mlx5_ifc_ets_global_config_reg_bits global_configuration; }; struct mlx5_ifc_nodnic_mac_filters_bits { struct mlx5_ifc_mac_address_layout_bits mac_filter0; struct mlx5_ifc_mac_address_layout_bits mac_filter1; struct mlx5_ifc_mac_address_layout_bits mac_filter2; struct mlx5_ifc_mac_address_layout_bits mac_filter3; struct mlx5_ifc_mac_address_layout_bits mac_filter4; u8 reserved_0[0xc0]; }; struct mlx5_ifc_nodnic_gid_filters_bits { u8 mgid_filter0[16][0x8]; u8 mgid_filter1[16][0x8]; u8 mgid_filter2[16][0x8]; u8 mgid_filter3[16][0x8]; }; enum { MLX5_NODNIC_CONFIG_REG_NUM_PORTS_SINGLE_PORT = 0x0, MLX5_NODNIC_CONFIG_REG_NUM_PORTS_DUAL_PORT = 0x1, }; enum { MLX5_NODNIC_CONFIG_REG_CQE_FORMAT_LEGACY_CQE = 0x0, MLX5_NODNIC_CONFIG_REG_CQE_FORMAT_NEW_CQE = 0x1, }; struct mlx5_ifc_nodnic_config_reg_bits { u8 no_dram_nic_revision[0x8]; u8 hardware_format[0x8]; u8 support_receive_filter[0x1]; u8 support_promisc_filter[0x1]; u8 support_promisc_multicast_filter[0x1]; u8 reserved_0[0x2]; u8 log_working_buffer_size[0x3]; u8 log_pkey_table_size[0x4]; u8 reserved_1[0x3]; u8 num_ports[0x1]; u8 reserved_2[0x2]; u8 log_max_ring_size[0x6]; u8 reserved_3[0x18]; u8 lkey[0x20]; u8 cqe_format[0x4]; u8 reserved_4[0x1c]; u8 node_guid[0x40]; u8 reserved_5[0x740]; struct mlx5_ifc_nodnic_port_config_reg_bits port1_settings; struct mlx5_ifc_nodnic_port_config_reg_bits port2_settings; }; struct mlx5_ifc_vlan_layout_bits { u8 reserved_0[0x14]; u8 vlan[0xc]; u8 reserved_1[0x20]; }; struct mlx5_ifc_umr_pointer_desc_argument_bits { u8 reserved_0[0x20]; u8 mkey[0x20]; u8 addressh_63_32[0x20]; u8 addressl_31_0[0x20]; }; struct mlx5_ifc_ud_adrs_vector_bits { u8 dc_key[0x40]; u8 ext[0x1]; u8 reserved_0[0x7]; u8 destination_qp_dct[0x18]; u8 static_rate[0x4]; u8 sl_eth_prio[0x4]; u8 fl[0x1]; u8 mlid[0x7]; u8 rlid_udp_sport[0x10]; u8 reserved_1[0x20]; u8 rmac_47_16[0x20]; u8 rmac_15_0[0x10]; u8 tclass[0x8]; u8 hop_limit[0x8]; u8 reserved_2[0x1]; u8 grh[0x1]; u8 reserved_3[0x2]; u8 src_addr_index[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; }; struct mlx5_ifc_port_module_event_bits { u8 reserved_0[0x8]; u8 module[0x8]; u8 reserved_1[0xc]; u8 module_status[0x4]; u8 reserved_2[0x14]; u8 error_type[0x4]; u8 reserved_3[0x8]; u8 reserved_4[0xa0]; }; struct mlx5_ifc_icmd_control_bits { u8 opcode[0x10]; u8 status[0x8]; u8 reserved_0[0x7]; u8 busy[0x1]; }; struct mlx5_ifc_eqe_bits { u8 reserved_0[0x8]; u8 event_type[0x8]; u8 reserved_1[0x8]; u8 event_sub_type[0x8]; u8 reserved_2[0xe0]; union mlx5_ifc_event_auto_bits event_data; u8 reserved_3[0x10]; u8 signature[0x8]; u8 reserved_4[0x7]; u8 owner[0x1]; }; enum { MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, }; struct mlx5_ifc_cmd_queue_entry_bits { u8 type[0x8]; u8 reserved_0[0x18]; u8 input_length[0x20]; u8 input_mailbox_pointer_63_32[0x20]; u8 input_mailbox_pointer_31_9[0x17]; u8 reserved_1[0x9]; u8 command_input_inline_data[16][0x8]; u8 command_output_inline_data[16][0x8]; u8 output_mailbox_pointer_63_32[0x20]; u8 output_mailbox_pointer_31_9[0x17]; u8 reserved_2[0x9]; u8 output_length[0x20]; u8 token[0x8]; u8 signature[0x8]; u8 reserved_3[0x8]; u8 status[0x7]; u8 ownership[0x1]; }; struct mlx5_ifc_cmd_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 command_output[0x20]; }; struct mlx5_ifc_cmd_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 command[0][0x20]; }; struct mlx5_ifc_cmd_if_box_bits { u8 mailbox_data[512][0x8]; u8 reserved_0[0x180]; u8 next_pointer_63_32[0x20]; u8 next_pointer_31_10[0x16]; u8 reserved_1[0xa]; u8 block_number[0x20]; u8 reserved_2[0x8]; u8 token[0x8]; u8 ctrl_signature[0x8]; u8 signature[0x8]; }; struct mlx5_ifc_mtt_bits { u8 ptag_63_32[0x20]; u8 ptag_31_8[0x18]; u8 reserved_0[0x6]; u8 wr_en[0x1]; u8 rd_en[0x1]; }; /* Vendor Specific Capabilities, VSC */ enum { MLX5_VSC_DOMAIN_ICMD = 0x1, MLX5_VSC_DOMAIN_PROTECTED_CRSPACE = 0x6, MLX5_VSC_DOMAIN_SEMAPHORES = 0xA, }; struct mlx5_ifc_vendor_specific_cap_bits { u8 type[0x8]; u8 length[0x8]; u8 next_pointer[0x8]; u8 capability_id[0x8]; u8 status[0x3]; u8 reserved_0[0xd]; u8 space[0x10]; u8 counter[0x20]; u8 semaphore[0x20]; u8 flag[0x1]; u8 reserved_1[0x1]; u8 address[0x1e]; u8 data[0x20]; }; enum { MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, }; enum { MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, }; enum { MLX5_HEALTH_SYNDR_FW_ERR = 0x1, MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8, MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, MLX5_HEALTH_SYNDR_EQ_INV = 0xe, MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10, }; struct mlx5_ifc_initial_seg_bits { u8 fw_rev_minor[0x10]; u8 fw_rev_major[0x10]; u8 cmd_interface_rev[0x10]; u8 fw_rev_subminor[0x10]; u8 reserved_0[0x40]; u8 cmdq_phy_addr_63_32[0x20]; u8 cmdq_phy_addr_31_12[0x14]; u8 reserved_1[0x2]; u8 nic_interface[0x2]; u8 log_cmdq_size[0x4]; u8 log_cmdq_stride[0x4]; u8 command_doorbell_vector[0x20]; u8 reserved_2[0xf00]; u8 initializing[0x1]; u8 reserved_3[0x4]; u8 nic_interface_supported[0x3]; u8 reserved_4[0x18]; struct mlx5_ifc_health_buffer_bits health_buffer; u8 no_dram_nic_offset[0x20]; u8 reserved_5[0x6de0]; u8 internal_timer_h[0x20]; u8 internal_timer_l[0x20]; u8 reserved_6[0x20]; u8 reserved_7[0x1f]; u8 clear_int[0x1]; u8 health_syndrome[0x8]; u8 health_counter[0x18]; u8 reserved_8[0x17fc0]; }; union mlx5_ifc_icmd_interface_document_bits { struct mlx5_ifc_fw_version_bits fw_version; struct mlx5_ifc_icmd_access_reg_in_bits icmd_access_reg_in; struct mlx5_ifc_icmd_access_reg_out_bits icmd_access_reg_out; struct mlx5_ifc_icmd_init_ocsd_in_bits icmd_init_ocsd_in; struct mlx5_ifc_icmd_ocbb_init_in_bits icmd_ocbb_init_in; struct mlx5_ifc_icmd_ocbb_query_etoc_stats_out_bits icmd_ocbb_query_etoc_stats_out; struct mlx5_ifc_icmd_ocbb_query_header_stats_out_bits icmd_ocbb_query_header_stats_out; struct mlx5_ifc_icmd_query_cap_general_bits icmd_query_cap_general; struct mlx5_ifc_icmd_query_cap_in_bits icmd_query_cap_in; struct mlx5_ifc_icmd_query_fw_info_out_bits icmd_query_fw_info_out; struct mlx5_ifc_icmd_query_virtual_mac_out_bits icmd_query_virtual_mac_out; struct mlx5_ifc_icmd_set_virtual_mac_in_bits icmd_set_virtual_mac_in; struct mlx5_ifc_icmd_set_wol_rol_in_bits icmd_set_wol_rol_in; struct mlx5_ifc_icmd_set_wol_rol_out_bits icmd_set_wol_rol_out; u8 reserved_0[0x42c0]; }; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_discard_cntrs_grp_bits eth_discard_cntrs_grp; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; struct mlx5_ifc_infiniband_port_cntrs_bits infiniband_port_cntrs; u8 reserved_0[0x7c0]; }; struct mlx5_ifc_ppcnt_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_0[0x8]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_1[0x1c]; u8 prio_tc[0x3]; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; struct mlx5_ifc_pcie_performance_counters_data_layout_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 rx_errors[0x20]; u8 tx_errors[0x20]; u8 l0_to_recovery_eieos[0x20]; u8 l0_to_recovery_ts[0x20]; u8 l0_to_recovery_framing[0x20]; u8 l0_to_recovery_retrain[0x20]; u8 crc_error_dllp[0x20]; u8 crc_error_tlp[0x20]; u8 reserved_0[0x680]; }; struct mlx5_ifc_pcie_timers_and_states_data_layout_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 time_to_boot_image_start[0x20]; u8 time_to_link_image[0x20]; u8 calibration_time[0x20]; u8 time_to_first_perst[0x20]; u8 time_to_detect_state[0x20]; u8 time_to_l0[0x20]; u8 time_to_crs_en[0x20]; u8 time_to_plastic_image_start[0x20]; u8 time_to_iron_image_start[0x20]; u8 perst_handler[0x20]; u8 times_in_l1[0x20]; u8 times_in_l23[0x20]; u8 dl_down[0x20]; u8 config_cycle1usec[0x20]; u8 config_cycle2to7usec[0x20]; u8 config_cycle8to15usec[0x20]; u8 config_cycle16to63usec[0x20]; u8 config_cycle64usec[0x20]; u8 correctable_err_msg_sent[0x20]; u8 non_fatal_err_msg_sent[0x20]; u8 fatal_err_msg_sent[0x20]; u8 reserved_0[0x4e0]; }; struct mlx5_ifc_pcie_lanes_counters_data_layout_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 error_counter_lane0[0x20]; u8 error_counter_lane1[0x20]; u8 error_counter_lane2[0x20]; u8 error_counter_lane3[0x20]; u8 error_counter_lane4[0x20]; u8 error_counter_lane5[0x20]; u8 error_counter_lane6[0x20]; u8 error_counter_lane7[0x20]; u8 error_counter_lane8[0x20]; u8 error_counter_lane9[0x20]; u8 error_counter_lane10[0x20]; u8 error_counter_lane11[0x20]; u8 error_counter_lane12[0x20]; u8 error_counter_lane13[0x20]; u8 error_counter_lane14[0x20]; u8 error_counter_lane15[0x20]; u8 reserved_0[0x580]; }; union mlx5_ifc_mpcnt_cntrs_grp_data_layout_bits { struct mlx5_ifc_pcie_performance_counters_data_layout_bits pcie_performance_counters_data_layout; struct mlx5_ifc_pcie_timers_and_states_data_layout_bits pcie_timers_and_states_data_layout; struct mlx5_ifc_pcie_lanes_counters_data_layout_bits pcie_lanes_counters_data_layout; u8 reserved_0[0xf8]; }; struct mlx5_ifc_mpcnt_reg_bits { u8 reserved_0[0x8]; u8 pcie_index[0x8]; u8 reserved_1[0xa]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_2[0x1f]; union mlx5_ifc_mpcnt_cntrs_grp_data_layout_bits counter_set; }; union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits ib_portcntrs_attribute_grp_data; struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_discard_cntrs_grp_bits eth_discard_cntrs_grp; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_eth_per_traffic_class_cong_layout_bits eth_per_traffic_class_cong_layout; struct mlx5_ifc_eth_per_traffic_class_layout_bits eth_per_traffic_class_layout; struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; struct mlx5_ifc_link_level_retrans_cntr_grp_date_bits link_level_retrans_cntr_grp_date; struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; struct mlx5_ifc_pbmc_reg_bits pbmc_reg; struct mlx5_ifc_pcap_reg_bits pcap_reg; struct mlx5_ifc_peir_reg_bits peir_reg; struct mlx5_ifc_pelc_reg_bits pelc_reg; struct mlx5_ifc_pfcc_reg_bits pfcc_reg; struct mlx5_ifc_phbr_binding_reg_bits phbr_binding_reg; struct mlx5_ifc_phbr_for_port_tclass_reg_bits phbr_for_port_tclass_reg; struct mlx5_ifc_phbr_for_prio_reg_bits phbr_for_prio_reg; struct mlx5_ifc_phrr_reg_bits phrr_reg; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_pifr_reg_bits pifr_reg; struct mlx5_ifc_pipg_reg_bits pipg_reg; struct mlx5_ifc_plbf_reg_bits plbf_reg; struct mlx5_ifc_plib_reg_bits plib_reg; struct mlx5_ifc_pll_status_data_bits pll_status_data; struct mlx5_ifc_plpc_reg_bits plpc_reg; struct mlx5_ifc_pmaos_reg_bits pmaos_reg; struct mlx5_ifc_pmlp_reg_bits pmlp_reg; struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; struct mlx5_ifc_pmpc_reg_bits pmpc_reg; struct mlx5_ifc_pmpe_reg_bits pmpe_reg; struct mlx5_ifc_pmpr_reg_bits pmpr_reg; struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; struct mlx5_ifc_ppll_reg_bits ppll_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; struct mlx5_ifc_pspa_reg_bits pspa_reg; struct mlx5_ifc_ptas_reg_bits ptas_reg; struct mlx5_ifc_ptys_reg_bits ptys_reg; struct mlx5_ifc_pude_reg_bits pude_reg; struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_slrp_reg_bits slrp_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; u8 reserved_0[0x7880]; }; union mlx5_ifc_debug_enhancements_document_bits { struct mlx5_ifc_health_buffer_bits health_buffer; u8 reserved_0[0x200]; }; union mlx5_ifc_no_dram_nic_document_bits { struct mlx5_ifc_nodnic_config_reg_bits nodnic_config_reg; struct mlx5_ifc_nodnic_cq_arming_word_bits nodnic_cq_arming_word; struct mlx5_ifc_nodnic_event_word_bits nodnic_event_word; struct mlx5_ifc_nodnic_gid_filters_bits nodnic_gid_filters; struct mlx5_ifc_nodnic_mac_filters_bits nodnic_mac_filters; struct mlx5_ifc_nodnic_port_config_reg_bits nodnic_port_config_reg; struct mlx5_ifc_nodnic_ring_config_reg_bits nodnic_ring_config_reg; struct mlx5_ifc_nodnic_ring_doorbell_bits nodnic_ring_doorbell; u8 reserved_0[0x3160]; }; union mlx5_ifc_uplink_pci_interface_document_bits { struct mlx5_ifc_initial_seg_bits initial_seg; struct mlx5_ifc_vendor_specific_cap_bits vendor_specific_cap; u8 reserved_0[0x20120]; }; struct mlx5_ifc_qpdpm_dscp_reg_bits { u8 e[0x1]; u8 reserved_at_01[0x0b]; u8 prio[0x04]; }; struct mlx5_ifc_qpdpm_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64]; }; struct mlx5_ifc_qpts_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x2d]; u8 trust_state[0x3]; }; #endif /* MLX5_IFC_H */ Index: head/sys/dev/mlx5/port.h =================================================================== --- head/sys/dev/mlx5/port.h (revision 341580) +++ head/sys/dev/mlx5/port.h (revision 341581) @@ -1,177 +1,179 @@ /*- * Copyright (c) 2016-2018, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __MLX5_PORT_H__ #define __MLX5_PORT_H__ #include enum mlx5_beacon_duration { MLX5_BEACON_DURATION_OFF = 0x0, MLX5_BEACON_DURATION_INF = 0xffff, }; enum mlx5_module_id { MLX5_MODULE_ID_SFP = 0x3, MLX5_MODULE_ID_QSFP = 0xC, MLX5_MODULE_ID_QSFP_PLUS = 0xD, MLX5_MODULE_ID_QSFP28 = 0x11, }; enum mlx5_an_status { MLX5_AN_UNAVAILABLE = 0, MLX5_AN_COMPLETE = 1, MLX5_AN_FAILED = 2, MLX5_AN_LINK_UP = 3, MLX5_AN_LINK_DOWN = 4, }; #define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00 #define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000 #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, MLX5E_1000BASE_KX = 1, MLX5E_10GBASE_CX4 = 2, MLX5E_10GBASE_KX4 = 3, MLX5E_10GBASE_KR = 4, MLX5E_20GBASE_KR2 = 5, MLX5E_40GBASE_CR4 = 6, MLX5E_40GBASE_KR4 = 7, MLX5E_56GBASE_R4 = 8, MLX5E_10GBASE_CR = 12, MLX5E_10GBASE_SR = 13, MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, MLX5E_100GBASE_LR4 = 23, MLX5E_100BASE_TX = 24, MLX5E_1000BASE_T = 25, MLX5E_10GBASE_T = 26, MLX5E_25GBASE_CR = 27, MLX5E_25GBASE_KR = 28, MLX5E_25GBASE_SR = 29, MLX5E_50GBASE_CR2 = 30, MLX5E_50GBASE_KR2 = 31, MLX5E_LINK_MODES_NUMBER, }; enum mlx5e_connector_type { MLX5E_PORT_UNKNOWN = 0, MLX5E_PORT_NONE = 1, MLX5E_PORT_TP = 2, MLX5E_PORT_AUI = 3, MLX5E_PORT_BNC = 4, MLX5E_PORT_MII = 5, MLX5E_PORT_FIBRE = 6, MLX5E_PORT_DA = 7, MLX5E_PORT_OTHER = 8, MLX5E_CONNECTOR_TYPE_NUMBER, }; enum mlx5_qpts_trust_state { MLX5_QPTS_TRUST_PCP = 1, MLX5_QPTS_TRUST_DSCP = 2, MLX5_QPTS_TRUST_BOTH = 3, }; #define MLX5E_PROT_MASK(link_mode) (1 << (link_mode)) #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask); int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, u8 *an_disable_cap, u8 *an_disable_status); int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable, u32 eth_proto_admin, int proto_mask); int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask); int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, u32 *proto_oper, u8 local_port); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask); int mlx5_set_port_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_pause_and_pfc(struct mlx5_core_dev *dev, u32 port, u8 rx_pause, u8 tx_pause, u8 pfc_en_rx, u8 pfc_en_tx); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port, u32 *rx_pause, u32 *tx_pause); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu); int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu); unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num); int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num); int mlx5_query_eeprom(struct mlx5_core_dev *dev, int i2c_addr, int page_num, int device_addr, int size, int module_num, u32 *data, int *size_read); int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_query_port_tc_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_units); int mlx5_modify_port_tc_rate_limit(struct mlx5_core_dev *mdev, const u8 *max_bw_value, const u8 *max_bw_units); int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, u8 prio, u8 *tc); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, int prio_index, const u8 prio_tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, const u8 *tc_group); int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, u8 tc, u8 *tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, const u8 *tc_bw); int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *bw_pct); int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); #define MLX5_MAX_SUPPORTED_DSCP 64 int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, const u8 *dscp2prio); int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); +int mlx5_query_pddr_range_info(struct mlx5_core_dev *mdev, u8 local_port, u8 *is_er_type); + #endif /* __MLX5_PORT_H__ */