Index: stable/11/sys/dev/mlx5/device.h =================================================================== --- stable/11/sys/dev/mlx5/device.h (revision 361413) +++ stable/11/sys/dev/mlx5/device.h (revision 361414) @@ -1,1234 +1,1240 @@ /*- * Copyright (c) 2013-2018, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MLX5_DEVICE_H #define MLX5_DEVICE_H #include #include #include #define FW_INIT_TIMEOUT_MILI 2000 #define FW_INIT_WAIT_MS 2 #define FW_PRE_INIT_TIMEOUT_MILI 120000 #define FW_INIT_WARN_MESSAGE_INTERVAL 20000 #if defined(__LITTLE_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0 #elif defined(__BIG_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0x80 #else #error Host endianness not defined #endif /* helper macros */ #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld) #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf)) #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_SET_TO_ONES(typ, p, fld) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ __mlx5_mask(typ, fld)) #define MLX5_GET_PR(typ, p, fld) ({ \ u32 ___t = MLX5_GET(typ, p, fld); \ pr_debug(#fld " = 0x%x\n", ___t); \ ___t; \ }) #define __MLX5_SET64(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ } while (0) #define MLX5_SET64(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ __MLX5_SET64(typ, p, fld, v); \ } while (0) #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ __MLX5_SET64(typ, p, fld[idx], v); \ } while (0) #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\ __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ __mlx5_mask16(typ, fld)) #define MLX5_SET16(typ, p, fld, v) do { \ u16 _v = v; \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \ *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \ (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \ << __mlx5_16_bit_off(typ, fld))); \ } while (0) #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ __mlx5_64_off(typ, fld))) #define MLX5_GET_BE(type_t, typ, p, fld) ({ \ type_t tmp; \ switch (sizeof(tmp)) { \ case sizeof(u8): \ tmp = (__force type_t)MLX5_GET(typ, p, fld); \ break; \ case sizeof(u16): \ tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ break; \ case sizeof(u32): \ tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ break; \ case sizeof(u64): \ tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ break; \ } \ tmp; \ }) #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ MLX5_BY_PASS_NUM_MULTICAST_PRIOS) /* insert a value to a struct */ #define MLX5_VSC_SET(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ *((__le32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_le32((le32_to_cpu(*((__le32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_VSC_GET(typ, p, fld) ((le32_to_cpu(*((__le32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ __mlx5_mask(typ, fld)) #define MLX5_VSC_GET_PR(typ, p, fld) ({ \ u32 ___t = MLX5_VSC_GET(typ, p, fld); \ pr_debug(#fld " = 0x%x\n", ___t); \ ___t; \ }) enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, MLX5_CMD_MBOX_SIZE = 1024, MLX5_PCI_CMD_XPORT = 7, MLX5_MKEY_BSF_OCTO_SIZE = 4, MLX5_MAX_PSVS = 4, }; enum { MLX5_EXTENDED_UD_AV = 0x80000000, }; enum { MLX5_CQ_FLAGS_OI = 2, }; enum { MLX5_STAT_RATE_OFFSET = 5, }; enum { MLX5_INLINE_SEG = 0x80000000, }; enum { MLX5_HW_START_PADDING = MLX5_INLINE_SEG, }; enum { MLX5_MIN_PKEY_TABLE_SIZE = 128, MLX5_MAX_LOG_PKEY_TABLE = 5, }; enum { MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 }; enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, MLX5_PERM_REMOTE_READ = 1 << 4, MLX5_PERM_REMOTE_WRITE = 1 << 5, MLX5_PERM_ATOMIC = 1 << 6, MLX5_PERM_UMR_EN = 1 << 7, }; enum { MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, }; enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, MLX5_MKEY_LEN64 = 1 << 31, }; enum { MLX5_EN_RD = (u64)1, MLX5_EN_WR = (u64)2 }; enum { MLX5_BF_REGS_PER_PAGE = 4, MLX5_MAX_UAR_PAGES = 1 << 8, MLX5_NON_FP_BF_REGS_PER_PAGE = 2, MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, }; enum { MLX5_MKEY_MASK_LEN = 1ull << 0, MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, MLX5_MKEY_MASK_START_ADDR = 1ull << 6, MLX5_MKEY_MASK_PD = 1ull << 7, MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, MLX5_MKEY_MASK_BSF_EN = 1ull << 12, MLX5_MKEY_MASK_KEY = 1ull << 13, MLX5_MKEY_MASK_QPN = 1ull << 14, MLX5_MKEY_MASK_LR = 1ull << 17, MLX5_MKEY_MASK_LW = 1ull << 18, MLX5_MKEY_MASK_RR = 1ull << 19, MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, MLX5_MKEY_MASK_FREE = 1ull << 29, }; enum { MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), MLX5_UMR_CHECK_NOT_FREE = (1 << 5), MLX5_UMR_CHECK_FREE = (2 << 5), MLX5_UMR_INLINE = (1 << 7), }; #define MLX5_UMR_MTT_ALIGNMENT 0x40 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT enum { MLX5_EVENT_QUEUE_TYPE_QP = 0, MLX5_EVENT_QUEUE_TYPE_RQ = 1, MLX5_EVENT_QUEUE_TYPE_SQ = 2, }; enum { MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, MLX5_PORT_CHANGE_SUBTYPE_LID = 6, MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, }; enum { MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1, MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE, MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE, MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE, MLX5_MAX_INLINE_RECEIVE_SIZE = 64 }; enum { MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21, MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33, MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48, }; enum { MLX5_ROCE_VERSION_1 = 0, MLX5_ROCE_VERSION_1_5 = 1, MLX5_ROCE_VERSION_2 = 2, }; enum { MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5, MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, }; enum { MLX5_ROCE_L3_TYPE_IPV4 = 0, MLX5_ROCE_L3_TYPE_IPV6 = 1, }; enum { MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, }; enum { MLX5_OPCODE_NOP = 0x00, MLX5_OPCODE_SEND_INVAL = 0x01, MLX5_OPCODE_RDMA_WRITE = 0x08, MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, MLX5_OPCODE_SEND = 0x0a, MLX5_OPCODE_SEND_IMM = 0x0b, MLX5_OPCODE_LSO = 0x0e, MLX5_OPCODE_RDMA_READ = 0x10, MLX5_OPCODE_ATOMIC_CS = 0x11, MLX5_OPCODE_ATOMIC_FA = 0x12, MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, MLX5_OPCODE_BIND_MW = 0x18, MLX5_OPCODE_CONFIG_CMD = 0x1f, MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX5_RECV_OPCODE_SEND = 0x01, MLX5_RECV_OPCODE_SEND_IMM = 0x02, MLX5_RECV_OPCODE_SEND_INVAL = 0x03, MLX5_CQE_OPCODE_ERROR = 0x1e, MLX5_CQE_OPCODE_RESIZE = 0x16, MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_GET_PSV = 0x21, MLX5_OPCODE_CHECK_PSV = 0x22, MLX5_OPCODE_RGET_PSV = 0x26, MLX5_OPCODE_RCHECK_PSV = 0x27, MLX5_OPCODE_UMR = 0x25, MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15), }; enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, MLX5_SET_PORT_NODE_GUID = 17, MLX5_SET_PORT_SYS_GUID = 18, MLX5_SET_PORT_GID_TABLE = 19, MLX5_SET_PORT_PKEY_TABLE = 20, }; enum { MLX5_MAX_PAGE_SHIFT = 31 }; enum { MLX5_ADAPTER_PAGE_SHIFT = 12, MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, }; enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; enum { /* * Max wqe size for rdma read is 512 bytes, so this * limits our max_sge_rd as the wqe needs to fit: * - ctrl segment (16 bytes) * - rdma segment (16 bytes) * - scatter elements (16 bytes each) */ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 }; struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; __be32 inlen; __be64 in_ptr; __be32 in[4]; __be32 out[4]; __be64 out_ptr; __be32 outlen; u8 token; u8 sig; u8 rsvd1; u8 status_own; }; enum mlx5_fatal_assert_bit_offsets { MLX5_RFR_OFFSET = 31, }; struct mlx5_health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; __be32 assert_exit_ptr; __be32 assert_callra; __be32 rsvd1[2]; __be32 fw_ver; __be32 hw_id; __be32 rfr; u8 irisc_index; u8 synd; __be16 ext_synd; }; enum mlx5_initializing_bit_offsets { MLX5_FW_RESET_SUPPORTED_OFFSET = 30, }; enum mlx5_cmd_addr_l_sz_offset { MLX5_NIC_IFC_OFFSET = 8, }; struct mlx5_init_seg { __be32 fw_rev; __be32 cmdif_rev_fw_sub; __be32 rsvd0[2]; __be32 cmdq_addr_h; __be32 cmdq_addr_l_sz; __be32 cmd_dbell; __be32 rsvd1[120]; __be32 initializing; struct mlx5_health_buffer health; __be32 rsvd2[880]; __be32 internal_timer_h; __be32 internal_timer_l; __be32 rsvd3[2]; __be32 health_counter; __be32 rsvd4[1019]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; }; struct mlx5_eqe_comp { __be32 reserved[6]; __be32 cqn; }; struct mlx5_eqe_qp_srq { __be32 reserved[6]; __be32 qp_srq_n; }; struct mlx5_eqe_cq_err { __be32 cqn; u8 reserved1[7]; u8 syndrome; }; struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; }; struct mlx5_eqe_gpio { __be32 reserved0[2]; __be64 gpio_event; }; struct mlx5_eqe_congestion { u8 type; u8 rsvd0; u8 congestion_level; }; struct mlx5_eqe_stall_vl { u8 rsvd0[3]; u8 port_vl; }; struct mlx5_eqe_cmd { __be32 vector; __be32 rsvd[6]; }; struct mlx5_eqe_page_req { u8 rsvd0[2]; __be16 func_id; __be32 num_pages; __be32 rsvd1[5]; }; struct mlx5_eqe_vport_change { u8 rsvd0[2]; __be16 vport_num; __be32 rsvd1[6]; }; #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF enum { MLX5_MODULE_STATUS_PLUGGED_ENABLED = 0x1, MLX5_MODULE_STATUS_UNPLUGGED = 0x2, MLX5_MODULE_STATUS_ERROR = 0x3, MLX5_MODULE_STATUS_NUM , }; enum { MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0, MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1, MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2, MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3, MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4, MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE = 0x5, MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6, MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7, MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED = 0x8, + MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE = 0x9, + MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT = 0xa, + MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE = 0xb, + MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED = 0xc, + MLX5_MODULE_EVENT_ERROR_HIGH_POWER = 0xd, + MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT = 0xe, MLX5_MODULE_EVENT_ERROR_NUM , }; struct mlx5_eqe_port_module_event { u8 rsvd0; u8 module; u8 rsvd1; u8 module_status; u8 rsvd2[2]; u8 error_type; }; struct mlx5_eqe_general_notification_event { u32 rq_user_index_delay_drop; u32 rsvd0[6]; }; struct mlx5_eqe_temp_warning { __be64 sensor_warning_msb; __be64 sensor_warning_lsb; } __packed; union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; struct mlx5_eqe_comp comp; struct mlx5_eqe_qp_srq qp_srq; struct mlx5_eqe_cq_err cq_err; struct mlx5_eqe_port_state port; struct mlx5_eqe_gpio gpio; struct mlx5_eqe_congestion cong; struct mlx5_eqe_stall_vl stall_vl; struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_port_module_event port_module_event; struct mlx5_eqe_vport_change vport_change; struct mlx5_eqe_general_notification_event general_notifications; struct mlx5_eqe_temp_warning temp_warning; } __packed; struct mlx5_eqe { u8 rsvd0; u8 type; u8 rsvd1; u8 sub_type; __be32 rsvd2[7]; union ev_data data; __be16 rsvd3; u8 signature; u8 owner; } __packed; struct mlx5_cmd_prot_block { u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; u8 rsvd0[48]; __be64 next; __be32 block_num; u8 rsvd1; u8 token; u8 ctrl_sig; u8 sig; }; #define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \ (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE) CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block)); CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE); enum { MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, }; struct mlx5_err_cqe { u8 rsvd0[32]; __be32 srqn; u8 rsvd1[18]; u8 vendor_err_synd; u8 syndrome; __be32 s_wqe_opcode_qpn; __be16 wqe_counter; u8 signature; u8 op_own; }; struct mlx5_cqe64 { u8 tunneled_etc; u8 rsvd0[3]; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; __be32 lro_ack_seq_num; __be32 rss_hash_result; u8 rss_hash_type; u8 ml_path; u8 rsvd20[2]; __be16 check_sum; __be16 slid; __be32 flags_rqpn; u8 hds_ip_ext; u8 l4_hdr_type_etc; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; __be64 timestamp; __be32 sop_drop_qpn; __be16 wqe_counter; u8 signature; u8 op_own; }; static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 7) & 1; } static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; } static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) { return (cqe->l4_hdr_type_etc >> 4) & 0x7; } static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe) { return be16_to_cpu(cqe->vlan_info) & 0xfff; } static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac) { memcpy(smac, &cqe->rss_hash_type , 4); memcpy(smac + 4, &cqe->slid , 2); } static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) { return cqe->l4_hdr_type_etc & 0x1; } static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { return cqe->tunneled_etc & 0x1; } enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, CQE_L4_HDR_TYPE_UDP = 0x2, CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, }; enum { /* source L3 hash types */ CQE_RSS_SRC_HTYPE_IP = 0x3 << 0, CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0, CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0, /* destination L3 hash types */ CQE_RSS_DST_HTYPE_IP = 0x3 << 2, CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2, CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2, /* source L4 hash types */ CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4, CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4, CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4, CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4, /* destination L4 hash types */ CQE_RSS_DST_HTYPE_L4 = 0x3 << 6, CQE_RSS_DST_HTYPE_TCP = 0x1 << 6, CQE_RSS_DST_HTYPE_UDP = 0x2 << 6, CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6, }; enum { MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, }; enum { CQE_L2_OK = 1 << 0, CQE_L3_OK = 1 << 1, CQE_L4_OK = 1 << 2, }; struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; __be32 actual_trans_sig; __be32 expected_reftag; __be32 actual_reftag; __be16 syndrome; u8 rsvd22[2]; __be32 mkey; __be64 err_offset; u8 rsvd30[8]; __be32 qpn; u8 rsvd38[2]; u8 signature; u8 op_own; }; struct mlx5_wqe_srq_next_seg { u8 rsvd0[2]; __be16 next_wqe_index; u8 signature; u8 rsvd1[11]; }; union mlx5_ext_cqe { struct ib_grh grh; u8 inl[64]; }; struct mlx5_cqe128 { union mlx5_ext_cqe inl_grh; struct mlx5_cqe64 cqe64; }; enum { MLX5_MKEY_STATUS_FREE = 1 << 6, }; struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation */ u8 status; u8 pcie_control; u8 flags; u8 version; __be32 qpn_mkey7_0; u8 rsvd1[4]; __be32 flags_pd; __be64 start_addr; __be64 len; __be32 bsfs_octo_size; u8 rsvd2[16]; __be32 xlt_oct_size; u8 rsvd3[3]; u8 log2_page_size; u8 rsvd4[4]; }; #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 }; static inline int mlx5_host_is_le(void) { #if defined(__LITTLE_ENDIAN) return 1; #elif defined(__BIG_ENDIAN) return 0; #else #error Host endianness not defined #endif } #define MLX5_CMD_OP_MAX 0x939 enum { VPORT_STATE_DOWN = 0x0, VPORT_STATE_UP = 0x1, VPORT_STATE_FOLLOW = 0x2, }; enum { MLX5_L3_PROT_TYPE_IPV4 = 0, MLX5_L3_PROT_TYPE_IPV6 = 1, }; enum { MLX5_L4_PROT_TYPE_TCP = 0, MLX5_L4_PROT_TYPE_UDP = 1, }; enum { MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, }; enum { MLX5_MATCH_OUTER_HEADERS = 1 << 0, MLX5_MATCH_MISC_PARAMETERS = 1 << 1, MLX5_MATCH_INNER_HEADERS = 1 << 2, }; enum { MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2, MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3, MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5, MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6, MLX5_FLOW_TABLE_TYPE_NIC_RX_RDMA = 7, }; enum { MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0, MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1, MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2 }; enum { MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0, MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1, MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2, MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3 }; enum { MLX5_UC_ADDR_CHANGE = (1 << 0), MLX5_MC_ADDR_CHANGE = (1 << 1), MLX5_VLAN_CHANGE = (1 << 2), MLX5_PROMISC_CHANGE = (1 << 3), MLX5_MTU_CHANGE = (1 << 4), }; enum mlx5_list_type { MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0, MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1, MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2, }; enum { MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, }; /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_MAX = 0, HCA_CAP_OPMOD_GET_CUR = 1, }; enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, MLX5_CAP_ODP, MLX5_CAP_ATOMIC, MLX5_CAP_ROCE, MLX5_CAP_IPOIB_OFFLOADS, MLX5_CAP_EOIB_OFFLOADS, MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, MLX5_CAP_SNAPSHOT, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, MLX5_CAP_DEBUG, /* NUM OF CAP Types */ MLX5_CAP_NUM }; enum mlx5_qcam_reg_groups { MLX5_QCAM_REGS_FIRST_128 = 0x0, }; enum mlx5_qcam_feature_groups { MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; enum mlx5_pcam_reg_groups { MLX5_PCAM_REGS_5000_TO_507F = 0x0, }; enum mlx5_pcam_feature_groups { MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; enum mlx5_mcam_reg_groups { MLX5_MCAM_REGS_FIRST_128 = 0x0, }; enum mlx5_mcam_feature_groups { MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ROCE(mdev, cap) \ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap) #define MLX5_CAP_SNAPSHOT(mdev, cap) \ MLX5_GET(snapshot_cap, \ mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap) #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \ MLX5_GET(snapshot_cap, \ mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap) #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap) #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap) #define MLX5_CAP_DEBUG(mdev, cap) \ MLX5_GET(debug_cap, \ mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap) #define MLX5_CAP_DEBUG_MAX(mdev, cap) \ MLX5_GET(debug_cap, \ mdev->hca_caps_max[MLX5_CAP_DEBUG], cap) #define MLX5_CAP_QOS(mdev, cap) \ MLX5_GET(qos_cap,\ mdev->hca_caps_cur[MLX5_CAP_QOS], cap) #define MLX5_CAP_QOS_MAX(mdev, cap) \ MLX5_GET(qos_cap,\ mdev->hca_caps_max[MLX5_CAP_QOS], cap) #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) #define MLX5_CAP_PCAM_REG(mdev, reg) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) #define MLX5_CAP_MCAM_REG(mdev, reg) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg) #define MLX5_CAP_QCAM_REG(mdev, fld) \ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) #define MLX5_CAP_FPGA(mdev, cap) \ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) #define MLX5_CAP64_FPGA(mdev, cap) \ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, MLX5_CMD_STAT_BAD_OP_ERR = 0x2, MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, MLX5_CMD_STAT_BAD_RES_ERR = 0x5, MLX5_CMD_STAT_RES_BUSY = 0x6, MLX5_CMD_STAT_LIM_ERR = 0x8, MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, MLX5_CMD_STAT_IX_ERR = 0xa, MLX5_CMD_STAT_NO_RES_ERR = 0xf, MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, }; enum { MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, MLX5_RFC_2863_COUNTERS_GROUP = 0x1, MLX5_RFC_2819_COUNTERS_GROUP = 0x2, MLX5_RFC_3635_COUNTERS_GROUP = 0x3, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_ETHERNET_DISCARD_COUNTERS_GROUP = 0x6, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; enum { MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2, }; enum { MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE, MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE, }; enum { NUM_DRIVER_UARS = 4, NUM_LOW_LAT_UUARS = 4, }; enum { MLX5_CAP_PORT_TYPE_IB = 0x0, MLX5_CAP_PORT_TYPE_ETH = 0x1, }; enum { MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0, MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1, MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2 }; enum mlx5_inline_modes { MLX5_INLINE_MODE_NONE, MLX5_INLINE_MODE_L2, MLX5_INLINE_MODE_IP, MLX5_INLINE_MODE_TCP_UDP, }; enum { MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2, }; static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) return 0; return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } struct mlx5_ifc_mcia_reg_bits { u8 l[0x1]; u8 reserved_0[0x7]; u8 module[0x8]; u8 reserved_1[0x8]; u8 status[0x8]; u8 i2c_device_address[0x8]; u8 page_number[0x8]; u8 device_address[0x10]; u8 reserved_2[0x10]; u8 size[0x10]; u8 reserved_3[0x20]; u8 dword_0[0x20]; u8 dword_1[0x20]; u8 dword_2[0x20]; u8 dword_3[0x20]; u8 dword_4[0x20]; u8 dword_5[0x20]; u8 dword_6[0x20]; u8 dword_7[0x20]; u8 dword_8[0x20]; u8 dword_9[0x20]; u8 dword_10[0x20]; u8 dword_11[0x20]; }; #define MLX5_CMD_OP_QUERY_EEPROM 0x93c struct mlx5_mini_cqe8 { union { __be32 rx_hash_result; __be16 checksum; __be16 rsvd; struct { __be16 wqe_counter; u8 s_wqe_opcode; u8 reserved; } s_wqe_info; }; __be32 byte_cnt; }; enum { MLX5_NO_INLINE_DATA, MLX5_INLINE_DATA32_SEG, MLX5_INLINE_DATA64_SEG, MLX5_COMPRESSED, }; enum mlx5_exp_cqe_zip_recv_type { MLX5_CQE_FORMAT_HASH, MLX5_CQE_FORMAT_CSUM, }; #define MLX5E_CQE_FORMAT_MASK 0xc static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe) { return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2; } enum { MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, }; enum { MLX5_FRL_LEVEL3 = 0x8, MLX5_FRL_LEVEL6 = 0x40, }; /* 8 regular priorities + 1 for multicast */ #define MLX5_NUM_BYPASS_FTS 9 #endif /* MLX5_DEVICE_H */ Index: stable/11/sys/dev/mlx5/mlx5_core/mlx5_eq.c =================================================================== --- stable/11/sys/dev/mlx5/mlx5_core/mlx5_eq.c (revision 361413) +++ stable/11/sys/dev/mlx5/mlx5_core/mlx5_eq.c (revision 361414) @@ -1,773 +1,785 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include "mlx5_core.h" #include "eswitch.h" #include "opt_rss.h" #ifdef RSS #include #include #endif enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), MLX5_EQE_OWNER_INIT_VAL = 0x1, }; enum { MLX5_NUM_SPARE_EQE = 0x80, MLX5_NUM_ASYNC_EQE = 0x100, MLX5_NUM_CMD_EQE = 32, }; enum { MLX5_EQ_DOORBEL_OFFSET = 0x40, }; #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT) | \ (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE)) struct map_eq_in { u64 mask; u32 reserved; u32 unmap_eqn; }; struct cre_des_eq { u8 reserved[15]; u8 eqn; }; /*Function prototype*/ static void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) { u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); MLX5_SET(destroy_eq_in, in, eq_number, eqn); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) { return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); } static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) { struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; } static const char *eqe_type_str(u8 type) { switch (type) { case MLX5_EVENT_TYPE_COMP: return "MLX5_EVENT_TYPE_COMP"; case MLX5_EVENT_TYPE_PATH_MIG: return "MLX5_EVENT_TYPE_PATH_MIG"; case MLX5_EVENT_TYPE_COMM_EST: return "MLX5_EVENT_TYPE_COMM_EST"; case MLX5_EVENT_TYPE_SQ_DRAINED: return "MLX5_EVENT_TYPE_SQ_DRAINED"; case MLX5_EVENT_TYPE_SRQ_LAST_WQE: return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; case MLX5_EVENT_TYPE_CQ_ERROR: return "MLX5_EVENT_TYPE_CQ_ERROR"; case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; case MLX5_EVENT_TYPE_PATH_MIG_FAILED: return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; case MLX5_EVENT_TYPE_INTERNAL_ERROR: return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; case MLX5_EVENT_TYPE_PORT_CHANGE: return "MLX5_EVENT_TYPE_PORT_CHANGE"; case MLX5_EVENT_TYPE_GPIO_EVENT: return "MLX5_EVENT_TYPE_GPIO_EVENT"; case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; case MLX5_EVENT_TYPE_REMOTE_CONFIG: return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; case MLX5_EVENT_TYPE_DB_BF_CONGESTION: return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; case MLX5_EVENT_TYPE_STALL_EVENT: return "MLX5_EVENT_TYPE_STALL_EVENT"; case MLX5_EVENT_TYPE_CMD: return "MLX5_EVENT_TYPE_CMD"; case MLX5_EVENT_TYPE_PAGE_REQUEST: return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; case MLX5_EVENT_TYPE_FPGA_ERROR: return "MLX5_EVENT_TYPE_FPGA_ERROR"; case MLX5_EVENT_TYPE_FPGA_QP_ERROR: return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT"; case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT"; default: return "Unrecognized event"; } } static enum mlx5_dev_event port_subtype_event(u8 subtype) { switch (subtype) { case MLX5_PORT_CHANGE_SUBTYPE_DOWN: return MLX5_DEV_EVENT_PORT_DOWN; case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: return MLX5_DEV_EVENT_PORT_UP; case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: return MLX5_DEV_EVENT_PORT_INITIALIZED; case MLX5_PORT_CHANGE_SUBTYPE_LID: return MLX5_DEV_EVENT_LID_CHANGE; case MLX5_PORT_CHANGE_SUBTYPE_PKEY: return MLX5_DEV_EVENT_PKEY_CHANGE; case MLX5_PORT_CHANGE_SUBTYPE_GUID: return MLX5_DEV_EVENT_GUID_CHANGE; case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: return MLX5_DEV_EVENT_CLIENT_REREG; } return -1; } static enum mlx5_dev_event dcbx_subevent(u8 subtype) { switch (subtype) { case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: return MLX5_DEV_EVENT_ERROR_STATE_DCBX; case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE; case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE; case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE; } return -1; } static void eq_update_ci(struct mlx5_eq *eq, int arm) { __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); __raw_writel((__force u32) cpu_to_be32(val), addr); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } static void mlx5_temp_warning_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) { mlx5_core_warn(dev, "High temperature on sensors with bit set %#jx %#jx\n", (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb), (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb)); } static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { struct mlx5_eqe *eqe; int eqes_found = 0; int set_ci = 0; u32 cqn; u32 rsn; u8 port; while ((eqe = next_eqe_sw(eq))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. */ rmb(); mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); switch (eqe->type) { case MLX5_EVENT_TYPE_COMP: cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; mlx5_cq_completion(dev, cqn); break; case MLX5_EVENT_TYPE_PATH_MIG: case MLX5_EVENT_TYPE_COMM_EST: case MLX5_EVENT_TYPE_SQ_DRAINED: case MLX5_EVENT_TYPE_SRQ_LAST_WQE: case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: case MLX5_EVENT_TYPE_PATH_MIG_FAILED: case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", eqe_type_str(eqe->type), eqe->type, rsn); mlx5_rsc_event(dev, rsn, eqe->type); break; case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", eqe_type_str(eqe->type), eqe->type, rsn); mlx5_srq_event(dev, rsn, eqe->type); break; case MLX5_EVENT_TYPE_CMD: if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), MLX5_CMD_MODE_EVENTS); } break; case MLX5_EVENT_TYPE_PORT_CHANGE: port = (eqe->data.port.port >> 4) & 0xf; switch (eqe->sub_type) { case MLX5_PORT_CHANGE_SUBTYPE_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: case MLX5_PORT_CHANGE_SUBTYPE_LID: case MLX5_PORT_CHANGE_SUBTYPE_PKEY: case MLX5_PORT_CHANGE_SUBTYPE_GUID: case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: if (dev->event) dev->event(dev, port_subtype_event(eqe->sub_type), (unsigned long)port); break; default: mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", port, eqe->sub_type); } break; case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: port = (eqe->data.port.port >> 4) & 0xf; switch (eqe->sub_type) { case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: if (dev->event) dev->event(dev, dcbx_subevent(eqe->sub_type), 0); break; default: mlx5_core_warn(dev, "dcbx event with unrecognized subtype: port %d, sub_type %d\n", port, eqe->sub_type); } break; case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: mlx5_port_general_notification_event(dev, eqe); break; case MLX5_EVENT_TYPE_CQ_ERROR: cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", cqn, eqe->data.cq_err.syndrome); mlx5_cq_event(dev, cqn, eqe->type); break; case MLX5_EVENT_TYPE_PAGE_REQUEST: { u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", func_id, npages); mlx5_core_req_pages_handler(dev, func_id, npages); } break; case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: mlx5_port_module_event(dev, eqe); break; case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: { struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change; u16 vport_num = be16_to_cpu(vc_eqe->vport_num); if (dev->event) dev->event(dev, MLX5_DEV_EVENT_VPORT_CHANGE, (unsigned long)vport_num); } if (dev->priv.eswitch != NULL) mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); break; case MLX5_EVENT_TYPE_FPGA_ERROR: case MLX5_EVENT_TYPE_FPGA_QP_ERROR: mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); break; case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: mlx5_temp_warning_event(dev, eqe); break; default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); break; } ++eq->cons_index; eqes_found = 1; ++set_ci; /* The HCA will think the queue has overflowed if we * don't tell it we've been processing events. We * create our EQs with MLX5_NUM_SPARE_EQE extra * entries, so we must update our consumer index at * least that often. */ if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { eq_update_ci(eq, 0); set_ci = 0; } } eq_update_ci(eq, 1); return eqes_found; } static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) { struct mlx5_eq *eq = eq_ptr; struct mlx5_core_dev *dev = eq->dev; /* check if IRQs are not disabled */ if (likely(dev->priv.disable_irqs == 0)) mlx5_eq_int(dev, eq); /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } static void init_eq_buf(struct mlx5_eq *eq) { struct mlx5_eqe *eqe; int i; for (i = 0; i < eq->nent; i++) { eqe = get_eqe(eq, i); eqe->owner = MLX5_EQE_OWNER_INIT_VAL; } } int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, struct mlx5_uar *uar) { u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; __be64 *pas; void *eqc; int inlen; u32 *in; int err; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, &eq->buf); if (err) return err; init_eq_buf(eq); inlen = MLX5_ST_SZ_BYTES(create_eq_in) + MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; goto err_buf; } pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); mlx5_fill_page_array(&eq->buf, pas); MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); MLX5_SET64(create_eq_in, in, event_bitmask, mask); eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); MLX5_SET(eqc, eqc, uar_page, uar->index); MLX5_SET(eqc, eqc, intr, vecidx); MLX5_SET(eqc, eqc, log_page_size, eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) goto err_in; eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = vecidx; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, "mlx5_core", eq); if (err) goto err_eq; #ifdef RSS if (vecidx >= MLX5_EQ_VEC_COMP_BASE) { u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE; err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector, rss_getcpu(bucket % rss_getnumbuckets())); if (err) goto err_irq; } #else if (0) goto err_irq; #endif /* EQs are created in ARMED state */ eq_update_ci(eq, 1); kvfree(in); return 0; err_irq: free_irq(priv->msix_arr[vecidx].vector, eq); err_eq: mlx5_cmd_destroy_eq(dev, eq->eqn); err_in: kvfree(in); err_buf: mlx5_buf_free(dev, &eq->buf); return err; } EXPORT_SYMBOL_GPL(mlx5_create_map_eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { int err; free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); err = mlx5_cmd_destroy_eq(dev, eq->eqn); if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); mlx5_buf_free(dev, &eq->buf); return err; } EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); int mlx5_eq_init(struct mlx5_core_dev *dev) { int err; spin_lock_init(&dev->priv.eq_table.lock); err = 0; return err; } void mlx5_eq_cleanup(struct mlx5_core_dev *dev) { } int mlx5_start_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; if (MLX5_CAP_GEN(dev, port_module_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT); if (MLX5_CAP_GEN(dev, nic_vport_change_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); if (MLX5_CAP_GEN(dev, dcbx)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT); if (MLX5_CAP_GEN(dev, fpga)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); if (MLX5_CAP_GEN(dev, temp_warn_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); if (MLX5_CAP_GEN(dev, general_notification_event)) { async_event_mask |= (1ull << MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT); } err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, &dev->priv.uuari.uars[0]); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); return err; } mlx5_cmd_use_events(dev); err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, &dev->priv.uuari.uars[0]); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); goto err1; } err = mlx5_create_map_eq(dev, &table->pages_eq, MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, &dev->priv.uuari.uars[0]); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); goto err2; } return err; err2: mlx5_destroy_unmap_eq(dev, &table->async_eq); err1: mlx5_cmd_use_polling(dev); mlx5_destroy_unmap_eq(dev, &table->cmd_eq); return err; } int mlx5_stop_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; int err; err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); if (err) return err; mlx5_destroy_unmap_eq(dev, &table->async_eq); mlx5_cmd_use_polling(dev); err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); if (err) mlx5_cmd_use_events(dev); return err; } int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u32 *out, int outlen) { u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; memset(out, 0, outlen); MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); MLX5_SET(query_eq_in, in, eq_number, eq->eqn); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL_GPL(mlx5_core_eq_query); static const char *mlx5_port_module_event_error_type_to_string(u8 error_type) { switch (error_type) { case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: return "Power budget exceeded"; case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE: return "Long Range for non MLNX cable"; case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: return "Bus stuck(I2C or data shorted)"; case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: return "No EEPROM/retry timeout"; case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: return "Enforce part number list"; case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE: return "Unknown identifier"; case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: return "High Temperature"; case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED: return "Bad or shorted cable/module"; case MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED: return "PMD type is not enabled"; + case MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE: + return "Laster_TEC_failure"; + case MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT: + return "High_current"; + case MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE: + return "High_voltage"; + case MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED: + return "pcie_system_power_slot_Exceeded"; + case MLX5_MODULE_EVENT_ERROR_HIGH_POWER: + return "High_power"; + case MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT: + return "Module_state_machine_fault"; default: return "Unknown error type"; } } unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num) { if (module_num < 0 || module_num >= MLX5_MAX_PORTS) return 0; /* undefined */ return dev->module_status[module_num]; } static void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) { unsigned int module_num; unsigned int module_status; unsigned int error_type; struct mlx5_eqe_port_module_event *module_event_eqe; module_event_eqe = &eqe->data.port_module_event; module_num = (unsigned int)module_event_eqe->module; module_status = (unsigned int)module_event_eqe->module_status & PORT_MODULE_EVENT_MODULE_STATUS_MASK; error_type = (unsigned int)module_event_eqe->error_type & PORT_MODULE_EVENT_ERROR_TYPE_MASK; if (module_status < MLX5_MODULE_STATUS_NUM) dev->priv.pme_stats.status_counters[module_status]++; switch (module_status) { case MLX5_MODULE_STATUS_PLUGGED_ENABLED: mlx5_core_info(dev, "Module %u, status: plugged and enabled\n", module_num); break; case MLX5_MODULE_STATUS_UNPLUGGED: mlx5_core_info(dev, "Module %u, status: unplugged\n", module_num); break; case MLX5_MODULE_STATUS_ERROR: mlx5_core_err(dev, "Module %u, status: error, %s (%d)\n", module_num, mlx5_port_module_event_error_type_to_string(error_type), error_type); if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) dev->priv.pme_stats.error_counters[error_type]++; break; default: mlx5_core_info(dev, "Module %u, unknown status %d\n", module_num, module_status); } /* store module status */ if (module_num < MLX5_MAX_PORTS) dev->module_status[module_num] = module_status; } static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) { u8 port = (eqe->data.port.port >> 4) & 0xf; u32 rqn; struct mlx5_eqe_general_notification_event *general_event; switch (eqe->sub_type) { case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT: general_event = &eqe->data.general_notifications; rqn = be32_to_cpu(general_event->rq_user_index_delay_drop) & 0xffffff; break; case MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT: mlx5_trigger_health_watchdog(dev); break; default: mlx5_core_warn(dev, "general event with unrecognized subtype: port %d, sub_type %d\n", port, eqe->sub_type); break; } } void mlx5_disable_interrupts(struct mlx5_core_dev *dev) { int nvec = dev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; int x; for (x = 0; x != nvec; x++) disable_irq(dev->priv.msix_arr[x].vector); } void mlx5_poll_interrupts(struct mlx5_core_dev *dev) { struct mlx5_eq *eq; if (unlikely(dev->priv.disable_irqs != 0)) return; mlx5_eq_int(dev, &dev->priv.eq_table.cmd_eq); mlx5_eq_int(dev, &dev->priv.eq_table.async_eq); mlx5_eq_int(dev, &dev->priv.eq_table.pages_eq); list_for_each_entry(eq, &dev->priv.eq_table.comp_eqs_list, list) mlx5_eq_int(dev, eq); } Index: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c =================================================================== --- stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c (revision 361413) +++ stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c (revision 361414) @@ -1,2005 +1,2011 @@ /*- * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mlx5_core.h" #include "eswitch.h" #include "fs_core.h" #ifdef PCI_IOV #include #include #include #endif static const char mlx5_version[] = "Mellanox Core driver " DRIVER_VERSION " (" DRIVER_RELDATE ")"; MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); MODULE_DEPEND(mlx5, firmware, 1, 1, 1); MODULE_VERSION(mlx5, 1); SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 hardware controls"); int mlx5_core_debug_mask; SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, &mlx5_core_debug_mask, 0, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); #define MLX5_DEFAULT_PROF 2 static int mlx5_prof_sel = MLX5_DEFAULT_PROF; SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, &mlx5_prof_sel, 0, "profile selector. Valid range 0 - 2"); static int mlx5_fast_unload_enabled = 1; SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, &mlx5_fast_unload_enabled, 0, "Set to enable fast unload. Clear to disable."); #define NUMA_NO_NODE -1 static LIST_HEAD(intf_list); static LIST_HEAD(dev_list); static DEFINE_MUTEX(intf_mutex); struct mlx5_device_context { struct list_head list; struct mlx5_interface *intf; void *context; }; enum { MLX5_ATOMIC_REQ_MODE_BE = 0x0, MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, }; static struct mlx5_profile profiles[] = { [0] = { .mask = 0, }, [1] = { .mask = MLX5_PROF_MASK_QP_SIZE, .log_max_qp = 12, }, [2] = { .mask = MLX5_PROF_MASK_QP_SIZE | MLX5_PROF_MASK_MR_CACHE, .log_max_qp = 17, .mr_cache[0] = { .size = 500, .limit = 250 }, .mr_cache[1] = { .size = 500, .limit = 250 }, .mr_cache[2] = { .size = 500, .limit = 250 }, .mr_cache[3] = { .size = 500, .limit = 250 }, .mr_cache[4] = { .size = 500, .limit = 250 }, .mr_cache[5] = { .size = 500, .limit = 250 }, .mr_cache[6] = { .size = 500, .limit = 250 }, .mr_cache[7] = { .size = 500, .limit = 250 }, .mr_cache[8] = { .size = 500, .limit = 250 }, .mr_cache[9] = { .size = 500, .limit = 250 }, .mr_cache[10] = { .size = 500, .limit = 250 }, .mr_cache[11] = { .size = 500, .limit = 250 }, .mr_cache[12] = { .size = 64, .limit = 32 }, .mr_cache[13] = { .size = 32, .limit = 16 }, .mr_cache[14] = { .size = 16, .limit = 8 }, }, [3] = { .mask = MLX5_PROF_MASK_QP_SIZE, .log_max_qp = 17, }, }; #ifdef PCI_IOV static const char iov_mac_addr_name[] = "mac-addr"; #endif static int set_dma_caps(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); return err; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); return err; } } dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); return err; } int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, u16 *p_power, u8 *p_status) { u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; int err; err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); *p_status = MLX5_GET(mpein_reg, out, pwr_status); *p_power = MLX5_GET(mpein_reg, out, pci_power); return err; } static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) { struct pci_dev *pdev = dev->pdev; int err = 0; mutex_lock(&dev->pci_status_mutex); if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { err = pci_enable_device(pdev); if (!err) dev->pci_status = MLX5_PCI_STATUS_ENABLED; } mutex_unlock(&dev->pci_status_mutex); return err; } static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) { struct pci_dev *pdev = dev->pdev; mutex_lock(&dev->pci_status_mutex); if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { pci_disable_device(pdev); dev->pci_status = MLX5_PCI_STATUS_DISABLED; } mutex_unlock(&dev->pci_status_mutex); } static int request_bar(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { mlx5_core_err(dev, "Missing registers BAR, aborting\n"); return -ENODEV; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); return err; } static void release_bar(struct pci_dev *pdev) { pci_release_regions(pdev); } static int mlx5_enable_msix(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int limit = dev->msix_eqvec; int nvec = MLX5_EQ_VEC_COMP_BASE; int i; if (limit > 0) nvec += limit; else nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); if (nvec > num_eqs) nvec = num_eqs; if (nvec > 256) nvec = 256; /* limit of firmware API */ if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); for (i = 0; i < nvec; i++) priv->msix_arr[i].entry = i; nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; return 0; } static void mlx5_disable_msix(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; pci_disable_msix(dev->pdev); kfree(priv->msix_arr); } struct mlx5_reg_host_endianess { u8 he; u8 rsvd[15]; }; #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) enum { MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | MLX5_DEV_CAP_FLAG_DCT | MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, }; static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) { switch (size) { case 128: return 0; case 256: return 1; case 512: return 2; case 1024: return 3; case 2048: return 4; case 4096: return 5; default: mlx5_core_warn(dev, "invalid pkey table size %d\n", size); return 0; } } static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); void *out, *hca_caps; u16 opmod = (cap_type << 1) | (cap_mode & 0x01); int err; memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) { mlx5_core_warn(dev, "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", cap_type, cap_mode, err); goto query_ex; } hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: memcpy(dev->hca_caps_max[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: memcpy(dev->hca_caps_cur[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: mlx5_core_warn(dev, "Tried to query dev cap type(%x) with wrong opmode(%x)\n", cap_type, cap_mode); err = -EINVAL; break; } query_ex: kfree(out); return err; } int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) { int ret; ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); if (ret) return ret; return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); } static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) { u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); } static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; struct mlx5_profile *prof = dev->profile; int err = -ENOMEM; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); void *set_hca_cap; set_ctx = kzalloc(set_sz, GFP_KERNEL); err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); if (err) goto query_ex; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 128); /* we limit the size of the pkey table to 128 entries for now */ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, to_fw_pkey_sz(dev, 128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, prof->log_max_qp); /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); /* enable drain sigerr */ MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); err = set_caps(dev, set_ctx, set_sz); query_ex: kfree(set_ctx); return err; } static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) { void *set_ctx; void *set_hca_cap; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); int req_endianness; int err; if (MLX5_CAP_GEN(dev, atomic)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); if (err) return err; } else { return 0; } req_endianness = MLX5_CAP_ATOMIC(dev, supported_atomic_req_8B_endianess_mode_1); if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) return 0; set_ctx = kzalloc(set_sz, GFP_KERNEL); if (!set_ctx) return -ENOMEM; MLX5_SET(set_hca_cap_in, set_ctx, op_mod, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); /* Set requestor to host endianness */ MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); err = set_caps(dev, set_ctx, set_sz); kfree(set_ctx); return err; } static int set_hca_ctrl(struct mlx5_core_dev *dev) { struct mlx5_reg_host_endianess he_in; struct mlx5_reg_host_endianess he_out; int err; if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && !MLX5_CAP_GEN(dev, roce)) return 0; memset(&he_in, 0, sizeof(he_in)); he_in.he = MLX5_SET_HOST_ENDIANNESS; err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), &he_out, sizeof(he_out), MLX5_REG_HOST_ENDIANNESS, 0, 1); return err; } static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) { u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, function_id, func_id); return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); } static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) { u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int mlx5_core_set_issi(struct mlx5_core_dev *dev) { u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; u32 sup_issi; int err; MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); if (err) { u32 syndrome; u8 status; mlx5_cmd_mbox_status(query_out, &status, &syndrome); if (status == MLX5_CMD_STAT_BAD_OP_ERR) { mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); return 0; } mlx5_core_err(dev, "failed to query ISSI\n"); return err; } sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); if (sup_issi & (1 << 1)) { u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); MLX5_SET(set_issi_in, set_in, current_issi, 1); err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); if (err) { mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); return err; } dev->issi = 1; return 0; } else if (sup_issi & (1 << 0)) { return 0; } return -ENOTSUPP; } int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) { struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq; int err = -ENOENT; spin_lock(&table->lock); list_for_each_entry(eq, &table->comp_eqs_list, list) { if (eq->index == vector) { *eqn = eq->eqn; *irqn = eq->irqn; err = 0; break; } } spin_unlock(&table->lock); return err; } EXPORT_SYMBOL(mlx5_vector2eqn); static void free_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq, *n; spin_lock(&table->lock); list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_del(&eq->list); spin_unlock(&table->lock); if (mlx5_destroy_unmap_eq(dev, eq)) mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); kfree(eq); spin_lock(&table->lock); } spin_unlock(&table->lock); } static int alloc_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq; int ncomp_vec; int nent; int err; int i; INIT_LIST_HEAD(&table->comp_eqs_list); ncomp_vec = table->num_comp_vectors; nent = MLX5_COMP_EQ_SIZE; for (i = 0; i < ncomp_vec; i++) { eq = kzalloc(sizeof(*eq), GFP_KERNEL); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, &dev->priv.uuari.uars[0]); if (err) { kfree(eq); goto clean; } mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); eq->index = i; spin_lock(&table->lock); list_add_tail(&eq->list, &table->comp_eqs_list); spin_unlock(&table->lock); } return 0; clean: free_comp_eqs(dev); return err; } static int map_bf_area(struct mlx5_core_dev *dev) { resource_size_t bf_start = pci_resource_start(dev->pdev, 0); resource_size_t bf_len = pci_resource_len(dev->pdev, 0); dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); return dev->priv.bf_mapping ? 0 : -ENOMEM; } static void unmap_bf_area(struct mlx5_core_dev *dev) { if (dev->priv.bf_mapping) io_mapping_free(dev->priv.bf_mapping); } static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; } static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, u32 warn_time_mili) { int warn = jiffies + msecs_to_jiffies(warn_time_mili); int end = jiffies + msecs_to_jiffies(max_wait_mili); int err = 0; MPASS(max_wait_mili > warn_time_mili); while (fw_initializing(dev) == 1) { if (time_after(jiffies, end)) { err = -EBUSY; break; } if (warn_time_mili && time_after(jiffies, warn)) { mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %u s\n", (unsigned int)(jiffies_to_msecs(end - warn) / 1000)); warn = jiffies + msecs_to_jiffies(warn_time_mili); } msleep(FW_INIT_WAIT_MS); } if (err != 0) mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", ioread32be(&dev->iseg->initializing)); return err; } static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); if (!dev_ctx) return; dev_ctx->intf = intf; CURVNET_SET_QUIET(vnet0); dev_ctx->context = intf->add(dev); CURVNET_RESTORE(); if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); } } static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf == intf) { spin_lock_irq(&priv->ctx_lock); list_del(&dev_ctx->list); spin_unlock_irq(&priv->ctx_lock); intf->remove(dev, dev_ctx->context); kfree(dev_ctx); return; } } int mlx5_register_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; mutex_lock(&intf_mutex); list_add_tail(&priv->dev_list, &dev_list); list_for_each_entry(intf, &intf_list, list) mlx5_add_device(intf, priv); mutex_unlock(&intf_mutex); return 0; } void mlx5_unregister_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) mlx5_remove_device(intf, priv); list_del(&priv->dev_list); mutex_unlock(&intf_mutex); } int mlx5_register_interface(struct mlx5_interface *intf) { struct mlx5_priv *priv; if (!intf->add || !intf->remove) return -EINVAL; mutex_lock(&intf_mutex); list_add_tail(&intf->list, &intf_list); list_for_each_entry(priv, &dev_list, dev_list) mlx5_add_device(intf, priv); mutex_unlock(&intf_mutex); return 0; } EXPORT_SYMBOL(mlx5_register_interface); void mlx5_unregister_interface(struct mlx5_interface *intf) { struct mlx5_priv *priv; mutex_lock(&intf_mutex); list_for_each_entry(priv, &dev_list, dev_list) mlx5_remove_device(intf, priv); list_del(&intf->list); mutex_unlock(&intf_mutex); } EXPORT_SYMBOL(mlx5_unregister_interface); void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) { struct mlx5_priv *priv = &mdev->priv; struct mlx5_device_context *dev_ctx; unsigned long flags; void *result = NULL; spin_lock_irqsave(&priv->ctx_lock, flags); list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) if ((dev_ctx->intf->protocol == protocol) && dev_ctx->intf->get_dev) { result = dev_ctx->intf->get_dev(dev_ctx->context); break; } spin_unlock_irqrestore(&priv->ctx_lock, flags); return result; } EXPORT_SYMBOL(mlx5_get_protocol_dev); static int mlx5_auto_fw_update; SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &mlx5_auto_fw_update, 0, "Allow automatic firmware update on driver start"); static int mlx5_firmware_update(struct mlx5_core_dev *dev) { const struct firmware *fw; int err; TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); if (!mlx5_auto_fw_update) return (0); fw = firmware_get("mlx5fw_mfa"); if (fw) { err = mlx5_firmware_flash(dev, fw); firmware_put(fw, FIRMWARE_UNLOAD); } else return (-ENOENT); return err; } static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; device_t bsddev; int err; pdev = dev->pdev; bsddev = pdev->dev.bsddev; pci_set_drvdata(dev->pdev, dev); strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); priv->name[MLX5_MAX_NAME_LEN - 1] = 0; mutex_init(&priv->pgdir_mutex); INIT_LIST_HEAD(&priv->pgdir_list); spin_lock_init(&priv->mkey_lock); priv->numa_node = NUMA_NO_NODE; err = mlx5_pci_enable_device(dev); if (err) { mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); goto err_dbg; } err = request_bar(pdev); if (err) { mlx5_core_err(dev, "error requesting BARs, aborting\n"); goto err_disable; } pci_set_master(pdev); err = set_dma_caps(pdev); if (err) { mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); goto err_clr_master; } dev->iseg_base = pci_resource_start(dev->pdev, 0); dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); if (!dev->iseg) { err = -ENOMEM; mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); goto err_clr_master; } return 0; err_clr_master: release_bar(dev->pdev); err_disable: mlx5_pci_disable_device(dev); err_dbg: return err; } static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { #ifdef PCI_IOV if (MLX5_CAP_GEN(dev, eswitch_flow_table)) pci_iov_detach(dev->pdev->dev.bsddev); #endif iounmap(dev->iseg); release_bar(dev->pdev); mlx5_pci_disable_device(dev); } static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { int err; err = mlx5_vsc_find_cap(dev); if (err) mlx5_core_err(dev, "Unable to find vendor specific capabilities\n"); err = mlx5_query_hca_caps(dev); if (err) { mlx5_core_err(dev, "query hca failed\n"); goto out; } err = mlx5_query_board_id(dev); if (err) { mlx5_core_err(dev, "query board id failed\n"); goto out; } err = mlx5_eq_init(dev); if (err) { mlx5_core_err(dev, "failed to initialize eq\n"); goto out; } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); err = mlx5_init_cq_table(dev); if (err) { mlx5_core_err(dev, "failed to initialize cq table\n"); goto err_eq_cleanup; } mlx5_init_qp_table(dev); mlx5_init_srq_table(dev); mlx5_init_mr_table(dev); mlx5_init_reserved_gids(dev); mlx5_fpga_init(dev); return 0; err_eq_cleanup: mlx5_eq_cleanup(dev); out: return err; } static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { mlx5_fpga_cleanup(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_eq_cleanup(dev); } static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, bool boot) { int err; mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { mlx5_core_warn(dev, "interface is up, NOP\n"); goto out; } mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); /* * On load removing any previous indication of internal error, * device is up */ dev->state = MLX5_DEVICE_STATE_UP; /* wait for firmware to accept initialization segments configurations */ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL); if (err) { dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", FW_PRE_INIT_TIMEOUT_MILI); goto out_err; } err = mlx5_cmd_init(dev); if (err) { mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); goto out_err; } err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); if (err) { mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI); goto err_cmd_cleanup; } err = mlx5_core_enable_hca(dev, 0); if (err) { mlx5_core_err(dev, "enable hca failed\n"); goto err_cmd_cleanup; } err = mlx5_core_set_issi(dev); if (err) { mlx5_core_err(dev, "failed to set issi\n"); goto err_disable_hca; } err = mlx5_pagealloc_start(dev); if (err) { mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); goto err_disable_hca; } err = mlx5_satisfy_startup_pages(dev, 1); if (err) { mlx5_core_err(dev, "failed to allocate boot pages\n"); goto err_pagealloc_stop; } err = set_hca_ctrl(dev); if (err) { mlx5_core_err(dev, "set_hca_ctrl failed\n"); goto reclaim_boot_pages; } err = handle_hca_cap(dev); if (err) { mlx5_core_err(dev, "handle_hca_cap failed\n"); goto reclaim_boot_pages; } err = handle_hca_cap_atomic(dev); if (err) { mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); goto reclaim_boot_pages; } err = mlx5_satisfy_startup_pages(dev, 0); if (err) { mlx5_core_err(dev, "failed to allocate init pages\n"); goto reclaim_boot_pages; } err = mlx5_cmd_init_hca(dev); if (err) { mlx5_core_err(dev, "init hca failed\n"); goto reclaim_boot_pages; } mlx5_start_health_poll(dev); if (boot && mlx5_init_once(dev, priv)) { mlx5_core_err(dev, "sw objs init failed\n"); goto err_stop_poll; } err = mlx5_enable_msix(dev); if (err) { mlx5_core_err(dev, "enable msix failed\n"); goto err_cleanup_once; } err = mlx5_alloc_uuars(dev, &priv->uuari); if (err) { mlx5_core_err(dev, "Failed allocating uar, aborting\n"); goto err_disable_msix; } err = mlx5_start_eqs(dev); if (err) { mlx5_core_err(dev, "Failed to start pages and async EQs\n"); goto err_free_uar; } err = alloc_comp_eqs(dev); if (err) { mlx5_core_err(dev, "Failed to alloc completion EQs\n"); goto err_stop_eqs; } if (map_bf_area(dev)) mlx5_core_err(dev, "Failed to map blue flame area\n"); err = mlx5_init_fs(dev); if (err) { mlx5_core_err(dev, "flow steering init %d\n", err); goto err_free_comp_eqs; } err = mlx5_mpfs_init(dev); if (err) { mlx5_core_err(dev, "mpfs init failed %d\n", err); goto err_fs; } err = mlx5_fpga_device_start(dev); if (err) { mlx5_core_err(dev, "fpga device start failed %d\n", err); goto err_mpfs; } err = mlx5_register_device(dev); if (err) { mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); goto err_fpga; } set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); out: mutex_unlock(&dev->intf_state_mutex); return 0; err_fpga: mlx5_fpga_device_stop(dev); err_mpfs: mlx5_mpfs_destroy(dev); err_fs: mlx5_cleanup_fs(dev); err_free_comp_eqs: free_comp_eqs(dev); unmap_bf_area(dev); err_stop_eqs: mlx5_stop_eqs(dev); err_free_uar: mlx5_free_uuars(dev, &priv->uuari); err_disable_msix: mlx5_disable_msix(dev); err_cleanup_once: if (boot) mlx5_cleanup_once(dev); err_stop_poll: mlx5_stop_health_poll(dev, boot); if (mlx5_cmd_teardown_hca(dev)) { mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); goto out_err; } reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_pagealloc_stop: mlx5_pagealloc_stop(dev); err_disable_hca: mlx5_core_disable_hca(dev); err_cmd_cleanup: mlx5_cmd_cleanup(dev); out_err: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mutex_unlock(&dev->intf_state_mutex); return err; } static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, bool cleanup) { int err = 0; if (cleanup) mlx5_drain_health_recovery(dev); mutex_lock(&dev->intf_state_mutex); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); if (cleanup) mlx5_cleanup_once(dev); goto out; } mlx5_unregister_device(dev); mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_fpga_device_stop(dev); mlx5_mpfs_destroy(dev); mlx5_cleanup_fs(dev); unmap_bf_area(dev); mlx5_wait_for_reclaim_vfs_pages(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); mlx5_stop_health_poll(dev, cleanup); err = mlx5_cmd_teardown_hca(dev); if (err) { mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); goto out; } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev); mlx5_cmd_cleanup(dev); out: clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mutex_unlock(&dev->intf_state_mutex); return err; } void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param) { struct mlx5_priv *priv = &dev->priv; struct mlx5_device_context *dev_ctx; unsigned long flags; spin_lock_irqsave(&priv->ctx_lock, flags); list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) dev_ctx->intf->event(dev, dev_ctx->context, event, param); spin_unlock_irqrestore(&priv->ctx_lock, flags); } struct mlx5_core_event_handler { void (*event)(struct mlx5_core_dev *dev, enum mlx5_dev_event event, void *data); }; #define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e, #define MLX5_PORT_MODULE_ERROR_STATS(m) \ m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \ m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \ m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \ m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \ m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \ m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \ m(+1, u64, high_temp, "high_temp", "Module High Temperature") \ m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") \ -m(+1, u64, pmd_type_not_enabled, "pmd_type_not_enabled", "PMD type is not enabled") +m(+1, u64, pmd_type_not_enabled, "pmd_type_not_enabled", "PMD type is not enabled") \ +m(+1, u64, laster_tec_failure, "laster_tec_failure", "Laster TEC failure") \ +m(+1, u64, high_current, "high_current", "High current") \ +m(+1, u64, high_voltage, "high_voltage", "High voltage") \ +m(+1, u64, pcie_sys_power_slot_exceeded, "pcie_sys_power_slot_exceeded", "PCIe system power slot Exceeded") \ +m(+1, u64, high_power, "high_power", "High power") \ +m(+1, u64, module_state_machine_fault, "module_state_machine_fault", "Module State Machine fault") static const char *mlx5_pme_err_desc[] = { MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC) }; static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; struct mlx5_priv *priv; device_t bsddev = pdev->dev.bsddev; #ifdef PCI_IOV nvlist_t *pf_schema, *vf_schema; int num_vfs, sriov_pos; #endif int i,err; struct sysctl_oid *pme_sysctl_node; struct sysctl_oid *pme_err_sysctl_node; struct sysctl_oid *cap_sysctl_node; struct sysctl_oid *current_cap_sysctl_node; struct sysctl_oid *max_cap_sysctl_node; dev = kzalloc(sizeof(*dev), GFP_KERNEL); priv = &dev->priv; if (id) priv->pci_dev_data = id->driver_data; if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { device_printf(bsddev, "WARN: selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF); mlx5_prof_sel = MLX5_DEFAULT_PROF; } dev->profile = &profiles[mlx5_prof_sel]; dev->pdev = pdev; dev->event = mlx5_core_event; /* Set desc */ device_set_desc(bsddev, mlx5_version); sysctl_ctx_init(&dev->sysctl_ctx); SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, "Maximum number of MSIX event queue vectors, if set"); SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, "0:Invalid 1:Sufficient 2:Insufficient"); SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, "Current power value in Watts"); pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), OID_AUTO, "pme_stats", CTLFLAG_RD, NULL, "Port module event statistics"); if (pme_sysctl_node == NULL) { err = -ENOMEM; goto clean_sysctl_ctx; } pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, "errors", CTLFLAG_RD, NULL, "Port module event error statistics"); if (pme_err_sysctl_node == NULL) { err = -ENOMEM; goto clean_sysctl_ctx; } SYSCTL_ADD_U64(&dev->sysctl_ctx, SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], 0, "Number of time module plugged"); SYSCTL_ADD_U64(&dev->sysctl_ctx, SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], 0, "Number of time module unplugged"); for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) { SYSCTL_ADD_U64(&dev->sysctl_ctx, SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO, mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->priv.pme_stats.error_counters[i], 0, mlx5_pme_err_desc[2 * i + 1]); } cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hardware capabilities raw bitstrings"); if (cap_sysctl_node == NULL) { err = -ENOMEM; goto clean_sysctl_ctx; } current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, SYSCTL_CHILDREN(cap_sysctl_node), OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); if (current_cap_sysctl_node == NULL) { err = -ENOMEM; goto clean_sysctl_ctx; } max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, SYSCTL_CHILDREN(cap_sysctl_node), OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); if (max_cap_sysctl_node == NULL) { err = -ENOMEM; goto clean_sysctl_ctx; } SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_GENERAL], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_GENERAL], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_ODP], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_ODP], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_ATOMIC], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_ATOMIC], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_ROCE], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_ROCE], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_ESWITCH], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_ESWITCH], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_QOS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_QOS], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(current_cap_sysctl_node), OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_cur[MLX5_CAP_DEBUG], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(max_cap_sysctl_node), OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->hca_caps_max[MLX5_CAP_DEBUG], MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(cap_sysctl_node), OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(cap_sysctl_node), OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(cap_sysctl_node), OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, SYSCTL_CHILDREN(cap_sysctl_node), OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE, &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); err = mlx5_pci_init(dev, priv); if (err) { mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); goto clean_dev; } err = mlx5_health_init(dev); if (err) { mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); goto close_pci; } mlx5_pagealloc_init(dev); err = mlx5_load_one(dev, priv, true); if (err) { mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); goto clean_health; } mlx5_fwdump_prep(dev); mlx5_firmware_update(dev); #ifdef PCI_IOV if (MLX5_CAP_GEN(dev, vport_group_manager)) { if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) { num_vfs = pci_read_config(bsddev, sriov_pos + PCIR_SRIOV_TOTAL_VFS, 2); } else { mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n"); num_vfs = 0; } err = mlx5_eswitch_init(dev, 1 + num_vfs); if (err == 0) { pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, iov_mac_addr_name, 0, NULL); err = pci_iov_attach(bsddev, pf_schema, vf_schema); if (err != 0) { device_printf(bsddev, "Failed to initialize SR-IOV support, error %d\n", err); } } else { mlx5_core_err(dev, "eswitch init failed, error %d\n", err); } } #endif pci_save_state(bsddev); return 0; clean_health: mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); clean_dev: mtx_destroy(&dev->dump_lock); clean_sysctl_ctx: sysctl_ctx_free(&dev->sysctl_ctx); kfree(dev); return err; } static void remove_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; if (mlx5_unload_one(dev, priv, true)) { mlx5_core_err(dev, "mlx5_unload_one failed\n"); mlx5_health_cleanup(dev); return; } mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_fwdump_clean(dev); mlx5_pci_close(dev, priv); mtx_destroy(&dev->dump_lock); pci_set_drvdata(pdev, NULL); sysctl_ctx_free(&dev->sysctl_ctx); kfree(dev); } static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; mlx5_core_info(dev, "%s was called\n", __func__); mlx5_enter_error_state(dev, false); mlx5_unload_one(dev, priv, false); if (state) { mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); } return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; mlx5_core_info(dev,"%s was called\n", __func__); err = mlx5_pci_enable_device(dev); if (err) { mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" ,err); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); pci_restore_state(pdev->dev.bsddev); pci_save_state(pdev->dev.bsddev); return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } /* wait for the device to show vital signs. For now we check * that we can read the device ID and that the health buffer * shows a non zero value which is different than 0xffffffff */ static void wait_vital(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_health *health = &dev->priv.health; const int niter = 100; u32 count; u16 did; int i; /* Wait for firmware to be ready after reset */ msleep(1000); for (i = 0; i < niter; i++) { if (pci_read_config_word(pdev, 2, &did)) { mlx5_core_warn(dev, "failed reading config word\n"); break; } if (did == pdev->device) { mlx5_core_info(dev, "device ID correctly read after %d iterations\n", i); break; } msleep(50); } if (i == niter) mlx5_core_warn(dev, "could not read device ID\n"); for (i = 0; i < niter; i++) { count = ioread32be(health->health_counter); if (count && count != 0xffffffff) { mlx5_core_info(dev, "Counter value 0x%x after %d iterations\n", count, i); break; } msleep(50); } if (i == niter) mlx5_core_warn(dev, "could not read device ID\n"); } static void mlx5_pci_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; int err; mlx5_core_info(dev,"%s was called\n", __func__); wait_vital(pdev); err = mlx5_load_one(dev, priv, false); if (err) mlx5_core_err(dev, "mlx5_load_one failed with error code: %d\n" ,err); else mlx5_core_info(dev,"device recovered\n"); } static const struct pci_error_handlers mlx5_err_handler = { .error_detected = mlx5_pci_err_detected, .slot_reset = mlx5_pci_slot_reset, .resume = mlx5_pci_resume }; #ifdef PCI_IOV static int mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) { struct pci_dev *pdev; struct mlx5_core_dev *core_dev; struct mlx5_priv *priv; int err; pdev = device_get_softc(dev); core_dev = pci_get_drvdata(pdev); priv = &core_dev->priv; if (priv->eswitch == NULL) return (ENXIO); if (priv->eswitch->total_vports < num_vfs + 1) num_vfs = priv->eswitch->total_vports - 1; err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs); return (-err); } static void mlx5_iov_uninit(device_t dev) { struct pci_dev *pdev; struct mlx5_core_dev *core_dev; struct mlx5_priv *priv; pdev = device_get_softc(dev); core_dev = pci_get_drvdata(pdev); priv = &core_dev->priv; mlx5_eswitch_disable_sriov(priv->eswitch); } static int mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) { struct pci_dev *pdev; struct mlx5_core_dev *core_dev; struct mlx5_priv *priv; const void *mac; size_t mac_size; int error; pdev = device_get_softc(dev); core_dev = pci_get_drvdata(pdev); priv = &core_dev->priv; if (vfnum + 1 >= priv->eswitch->total_vports) return (ENXIO); if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) { mac = nvlist_get_binary(vf_config, iov_mac_addr_name, &mac_size); error = -mlx5_eswitch_set_vport_mac(priv->eswitch, vfnum + 1, __DECONST(u8 *, mac)); } error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1, VPORT_STATE_FOLLOW); if (error != 0) { mlx5_core_err(core_dev, "upping vport for VF %d failed, error %d\n", vfnum + 1, error); } error = -mlx5_core_enable_hca(core_dev, vfnum + 1); if (error != 0) { mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n", vfnum + 1, error); } return (error); } #endif static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) { bool fast_teardown, force_teardown; int err; if (!mlx5_fast_unload_enabled) { mlx5_core_dbg(dev, "fast unload is disabled by user\n"); return -EOPNOTSUPP; } fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); force_teardown = MLX5_CAP_GEN(dev, force_teardown); mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); if (!fast_teardown && !force_teardown) return -EOPNOTSUPP; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); return -EAGAIN; } /* Panic tear down fw command will stop the PCI bus communication * with the HCA, so the health polll is no longer needed. */ mlx5_drain_health_wq(dev); mlx5_stop_health_poll(dev, false); err = mlx5_cmd_fast_teardown_hca(dev); if (!err) goto done; err = mlx5_cmd_force_teardown_hca(dev); if (!err) goto done; mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); mlx5_start_health_poll(dev); return err; done: mlx5_enter_error_state(dev, true); return 0; } static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev) { int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; int x; mdev->priv.disable_irqs = 1; /* wait for all IRQ handlers to finish processing */ for (x = 0; x != nvec; x++) synchronize_irq(mdev->priv.msix_arr[x].vector); } static void shutdown_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; int err; /* enter polling mode */ mlx5_cmd_use_polling(dev); set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); /* disable all interrupts */ mlx5_shutdown_disable_interrupts(dev); err = mlx5_try_fast_unload(dev); if (err) mlx5_unload_one(dev, priv, false); mlx5_pci_disable_device(dev); } static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 4121) }, { PCI_VDEVICE(MELLANOX, 4122) }, { PCI_VDEVICE(MELLANOX, 4123) }, { PCI_VDEVICE(MELLANOX, 4124) }, { PCI_VDEVICE(MELLANOX, 4125) }, { PCI_VDEVICE(MELLANOX, 4126) }, { PCI_VDEVICE(MELLANOX, 4127) }, { PCI_VDEVICE(MELLANOX, 4128) }, { PCI_VDEVICE(MELLANOX, 4129) }, { PCI_VDEVICE(MELLANOX, 4130) }, { PCI_VDEVICE(MELLANOX, 4131) }, { PCI_VDEVICE(MELLANOX, 4132) }, { PCI_VDEVICE(MELLANOX, 4133) }, { PCI_VDEVICE(MELLANOX, 4134) }, { PCI_VDEVICE(MELLANOX, 4135) }, { PCI_VDEVICE(MELLANOX, 4136) }, { PCI_VDEVICE(MELLANOX, 4137) }, { PCI_VDEVICE(MELLANOX, 4138) }, { PCI_VDEVICE(MELLANOX, 4139) }, { PCI_VDEVICE(MELLANOX, 4140) }, { PCI_VDEVICE(MELLANOX, 4141) }, { PCI_VDEVICE(MELLANOX, 4142) }, { PCI_VDEVICE(MELLANOX, 4143) }, { PCI_VDEVICE(MELLANOX, 4144) }, { 0, } }; MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); void mlx5_disable_device(struct mlx5_core_dev *dev) { mlx5_pci_err_detected(dev->pdev, 0); } void mlx5_recover_device(struct mlx5_core_dev *dev) { mlx5_pci_disable_device(dev); if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) mlx5_pci_resume(dev->pdev); } struct pci_driver mlx5_core_driver = { .name = DRIVER_NAME, .id_table = mlx5_core_pci_table, .shutdown = shutdown_one, .probe = init_one, .remove = remove_one, .err_handler = &mlx5_err_handler, #ifdef PCI_IOV .bsd_iov_init = mlx5_iov_init, .bsd_iov_uninit = mlx5_iov_uninit, .bsd_iov_add_vf = mlx5_iov_add_vf, #endif }; static int __init init(void) { int err; err = pci_register_driver(&mlx5_core_driver); if (err) goto err_debug; err = mlx5_ctl_init(); if (err) goto err_ctl; return 0; err_ctl: pci_unregister_driver(&mlx5_core_driver); err_debug: return err; } static void __exit cleanup(void) { mlx5_ctl_fini(); pci_unregister_driver(&mlx5_core_driver); } module_init(init); module_exit(cleanup); Index: stable/11 =================================================================== --- stable/11 (revision 361413) +++ stable/11 (revision 361414) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r361277