Index: head/sys/dev/mlx5/device.h =================================================================== --- head/sys/dev/mlx5/device.h (revision 312881) +++ head/sys/dev/mlx5/device.h (revision 312882) @@ -1,1386 +1,1392 @@ /*- - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MLX5_DEVICE_H #define MLX5_DEVICE_H #include #include #include #define FW_INIT_TIMEOUT_MILI 2000 #define FW_INIT_WAIT_MS 2 #if defined(__LITTLE_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0 #elif defined(__BIG_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0x80 #else #error Host endianness not defined #endif /* helper macros */ #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld) #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_SET_TO_ONES(typ, p, fld) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ __mlx5_mask(typ, fld)) #define MLX5_GET_PR(typ, p, fld) ({ \ u32 ___t = MLX5_GET(typ, p, fld); \ pr_debug(#fld " = 0x%x\n", ___t); \ ___t; \ }) #define MLX5_SET64(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ } while (0) #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, + MLX5_CMD_MBOX_SIZE = 1024, MLX5_PCI_CMD_XPORT = 7, MLX5_MKEY_BSF_OCTO_SIZE = 4, MLX5_MAX_PSVS = 4, }; enum { MLX5_EXTENDED_UD_AV = 0x80000000, }; enum { MLX5_CQ_FLAGS_OI = 2, }; enum { MLX5_STAT_RATE_OFFSET = 5, }; enum { MLX5_INLINE_SEG = 0x80000000, }; enum { MLX5_HW_START_PADDING = MLX5_INLINE_SEG, }; enum { MLX5_MIN_PKEY_TABLE_SIZE = 128, MLX5_MAX_LOG_PKEY_TABLE = 5, }; enum { MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 }; enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, MLX5_PERM_REMOTE_READ = 1 << 4, MLX5_PERM_REMOTE_WRITE = 1 << 5, MLX5_PERM_ATOMIC = 1 << 6, MLX5_PERM_UMR_EN = 1 << 7, }; enum { MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, }; enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, MLX5_MKEY_LEN64 = 1 << 31, }; enum { MLX5_EN_RD = (u64)1, MLX5_EN_WR = (u64)2 }; enum { MLX5_BF_REGS_PER_PAGE = 4, MLX5_MAX_UAR_PAGES = 1 << 8, MLX5_NON_FP_BF_REGS_PER_PAGE = 2, MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, }; enum { MLX5_MKEY_MASK_LEN = 1ull << 0, MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, MLX5_MKEY_MASK_START_ADDR = 1ull << 6, MLX5_MKEY_MASK_PD = 1ull << 7, MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, MLX5_MKEY_MASK_BSF_EN = 1ull << 12, MLX5_MKEY_MASK_KEY = 1ull << 13, MLX5_MKEY_MASK_QPN = 1ull << 14, MLX5_MKEY_MASK_LR = 1ull << 17, MLX5_MKEY_MASK_LW = 1ull << 18, MLX5_MKEY_MASK_RR = 1ull << 19, MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, MLX5_MKEY_MASK_FREE = 1ull << 29, }; enum { MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), MLX5_UMR_CHECK_NOT_FREE = (1 << 5), MLX5_UMR_CHECK_FREE = (2 << 5), MLX5_UMR_INLINE = (1 << 7), }; #define MLX5_UMR_MTT_ALIGNMENT 0x40 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT enum { MLX5_EVENT_QUEUE_TYPE_QP = 0, MLX5_EVENT_QUEUE_TYPE_RQ = 1, MLX5_EVENT_QUEUE_TYPE_SQ = 2, }; enum { MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, MLX5_PORT_CHANGE_SUBTYPE_LID = 6, MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, }; enum { MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1, MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE, MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE, MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE, MLX5_MAX_INLINE_RECEIVE_SIZE = 64 }; enum { MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21, MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33, MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48, }; enum { MLX5_ROCE_VERSION_1 = 0, MLX5_ROCE_VERSION_1_5 = 1, MLX5_ROCE_VERSION_2 = 2, }; enum { MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5, MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, }; enum { MLX5_ROCE_L3_TYPE_IPV4 = 0, MLX5_ROCE_L3_TYPE_IPV6 = 1, }; enum { MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, }; enum { MLX5_OPCODE_NOP = 0x00, MLX5_OPCODE_SEND_INVAL = 0x01, MLX5_OPCODE_RDMA_WRITE = 0x08, MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, MLX5_OPCODE_SEND = 0x0a, MLX5_OPCODE_SEND_IMM = 0x0b, MLX5_OPCODE_LSO = 0x0e, MLX5_OPCODE_RDMA_READ = 0x10, MLX5_OPCODE_ATOMIC_CS = 0x11, MLX5_OPCODE_ATOMIC_FA = 0x12, MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, MLX5_OPCODE_BIND_MW = 0x18, MLX5_OPCODE_CONFIG_CMD = 0x1f, MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX5_RECV_OPCODE_SEND = 0x01, MLX5_RECV_OPCODE_SEND_IMM = 0x02, MLX5_RECV_OPCODE_SEND_INVAL = 0x03, MLX5_CQE_OPCODE_ERROR = 0x1e, MLX5_CQE_OPCODE_RESIZE = 0x16, MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_GET_PSV = 0x21, MLX5_OPCODE_CHECK_PSV = 0x22, MLX5_OPCODE_RGET_PSV = 0x26, MLX5_OPCODE_RCHECK_PSV = 0x27, MLX5_OPCODE_UMR = 0x25, MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15), }; enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, MLX5_SET_PORT_NODE_GUID = 17, MLX5_SET_PORT_SYS_GUID = 18, MLX5_SET_PORT_GID_TABLE = 19, MLX5_SET_PORT_PKEY_TABLE = 20, }; enum { MLX5_MAX_PAGE_SHIFT = 31 }; enum { MLX5_ADAPTER_PAGE_SHIFT = 12, MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, }; enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; __be16 opmod; }; struct mlx5_outbox_hdr { u8 status; u8 rsvd[3]; __be32 syndrome; }; struct mlx5_cmd_set_dc_cnak_mbox_in { struct mlx5_inbox_hdr hdr; u8 enable; u8 reserved[47]; __be64 pa; }; struct mlx5_cmd_set_dc_cnak_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; __be32 inlen; __be64 in_ptr; __be32 in[4]; __be32 out[4]; __be64 out_ptr; __be32 outlen; u8 token; u8 sig; u8 rsvd1; u8 status_own; }; struct mlx5_health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; __be32 assert_exit_ptr; __be32 assert_callra; __be32 rsvd1[2]; __be32 fw_ver; __be32 hw_id; __be32 rsvd2; u8 irisc_index; u8 synd; __be16 ext_sync; }; struct mlx5_init_seg { __be32 fw_rev; __be32 cmdif_rev_fw_sub; __be32 rsvd0[2]; __be32 cmdq_addr_h; __be32 cmdq_addr_l_sz; __be32 cmd_dbell; __be32 rsvd1[120]; __be32 initializing; struct mlx5_health_buffer health; __be32 rsvd2[880]; __be32 internal_timer_h; __be32 internal_timer_l; __be32 rsvd3[2]; __be32 health_counter; __be32 rsvd4[1019]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; }; struct mlx5_eqe_comp { __be32 reserved[6]; __be32 cqn; }; struct mlx5_eqe_qp_srq { __be32 reserved[6]; __be32 qp_srq_n; }; struct mlx5_eqe_cq_err { __be32 cqn; u8 reserved1[7]; u8 syndrome; }; struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; }; struct mlx5_eqe_gpio { __be32 reserved0[2]; __be64 gpio_event; }; struct mlx5_eqe_congestion { u8 type; u8 rsvd0; u8 congestion_level; }; struct mlx5_eqe_stall_vl { u8 rsvd0[3]; u8 port_vl; }; struct mlx5_eqe_cmd { __be32 vector; __be32 rsvd[6]; }; struct mlx5_eqe_page_req { u8 rsvd0[2]; __be16 func_id; __be32 num_pages; __be32 rsvd1[5]; }; struct mlx5_eqe_vport_change { u8 rsvd0[2]; __be16 vport_num; __be32 rsvd1[6]; }; #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF enum { MLX5_MODULE_STATUS_PLUGGED = 0x1, MLX5_MODULE_STATUS_UNPLUGGED = 0x2, MLX5_MODULE_STATUS_ERROR = 0x3, }; enum { MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0, MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1, MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2, MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3, MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4, MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5, MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6, MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7, }; struct mlx5_eqe_port_module_event { u8 rsvd0; u8 module; u8 rsvd1; u8 module_status; u8 rsvd2[2]; u8 error_type; }; union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; struct mlx5_eqe_comp comp; struct mlx5_eqe_qp_srq qp_srq; struct mlx5_eqe_cq_err cq_err; struct mlx5_eqe_port_state port; struct mlx5_eqe_gpio gpio; struct mlx5_eqe_congestion cong; struct mlx5_eqe_stall_vl stall_vl; struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_port_module_event port_module_event; struct mlx5_eqe_vport_change vport_change; } __packed; struct mlx5_eqe { u8 rsvd0; u8 type; u8 rsvd1; u8 sub_type; __be32 rsvd2[7]; union ev_data data; __be16 rsvd3; u8 signature; u8 owner; } __packed; struct mlx5_cmd_prot_block { u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; u8 rsvd0[48]; __be64 next; __be32 block_num; u8 rsvd1; u8 token; u8 ctrl_sig; u8 sig; }; + +#define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \ + (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE) +CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block)); +CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE); enum { MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, }; struct mlx5_err_cqe { u8 rsvd0[32]; __be32 srqn; u8 rsvd1[18]; u8 vendor_err_synd; u8 syndrome; __be32 s_wqe_opcode_qpn; __be16 wqe_counter; u8 signature; u8 op_own; }; struct mlx5_cqe64 { u8 tunneled_etc; u8 rsvd0[3]; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; __be32 lro_ack_seq_num; __be32 rss_hash_result; u8 rss_hash_type; u8 ml_path; u8 rsvd20[2]; __be16 check_sum; __be16 slid; __be32 flags_rqpn; u8 hds_ip_ext; u8 l4_hdr_type_etc; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; __be64 timestamp; __be32 sop_drop_qpn; __be16 wqe_counter; u8 signature; u8 op_own; }; static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 7) & 1; } static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; } static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) { return (cqe->l4_hdr_type_etc >> 4) & 0x7; } static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe) { return be16_to_cpu(cqe->vlan_info) & 0xfff; } static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac) { memcpy(smac, &cqe->rss_hash_type , 4); memcpy(smac + 4, &cqe->slid , 2); } static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) { return cqe->l4_hdr_type_etc & 0x1; } static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { return cqe->tunneled_etc & 0x1; } enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, CQE_L4_HDR_TYPE_UDP = 0x2, CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, }; enum { /* source L3 hash types */ CQE_RSS_SRC_HTYPE_IP = 0x3 << 0, CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0, CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0, /* destination L3 hash types */ CQE_RSS_DST_HTYPE_IP = 0x3 << 2, CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2, CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2, /* source L4 hash types */ CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4, CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4, CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4, CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4, /* destination L4 hash types */ CQE_RSS_DST_HTYPE_L4 = 0x3 << 6, CQE_RSS_DST_HTYPE_TCP = 0x1 << 6, CQE_RSS_DST_HTYPE_UDP = 0x2 << 6, CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6, }; enum { CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, }; enum { CQE_L2_OK = 1 << 0, CQE_L3_OK = 1 << 1, CQE_L4_OK = 1 << 2, }; struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; __be32 actual_trans_sig; __be32 expected_reftag; __be32 actual_reftag; __be16 syndrome; u8 rsvd22[2]; __be32 mkey; __be64 err_offset; u8 rsvd30[8]; __be32 qpn; u8 rsvd38[2]; u8 signature; u8 op_own; }; struct mlx5_wqe_srq_next_seg { u8 rsvd0[2]; __be16 next_wqe_index; u8 signature; u8 rsvd1[11]; }; union mlx5_ext_cqe { struct ib_grh grh; u8 inl[64]; }; struct mlx5_cqe128 { union mlx5_ext_cqe inl_grh; struct mlx5_cqe64 cqe64; }; struct mlx5_srq_ctx { u8 state_log_sz; u8 rsvd0[3]; __be32 flags_xrcd; __be32 pgoff_cqn; u8 rsvd1[4]; u8 log_pg_sz; u8 rsvd2[7]; __be32 pd; __be16 lwm; __be16 wqe_cnt; u8 rsvd3[8]; __be64 db_record; }; struct mlx5_create_srq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 input_srqn; u8 rsvd0[4]; struct mlx5_srq_ctx ctx; u8 rsvd1[208]; __be64 pas[0]; }; struct mlx5_create_srq_mbox_out { struct mlx5_outbox_hdr hdr; __be32 srqn; u8 rsvd[4]; }; struct mlx5_destroy_srq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 srqn; u8 rsvd[4]; }; struct mlx5_destroy_srq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_query_srq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 srqn; u8 rsvd0[4]; }; struct mlx5_query_srq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd0[8]; struct mlx5_srq_ctx ctx; u8 rsvd1[32]; __be64 pas[0]; }; struct mlx5_arm_srq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 srqn; __be16 rsvd; __be16 lwm; }; struct mlx5_arm_srq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_cq_context { u8 status; u8 cqe_sz_flags; u8 st; u8 rsvd3; u8 rsvd4[6]; __be16 page_offset; __be32 log_sz_usr_page; __be16 cq_period; __be16 cq_max_count; __be16 rsvd20; __be16 c_eqn; u8 log_pg_sz; u8 rsvd25[7]; __be32 last_notified_index; __be32 solicit_producer_index; __be32 consumer_counter; __be32 producer_counter; u8 rsvd48[8]; __be64 db_record_addr; }; struct mlx5_create_cq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 input_cqn; u8 rsvdx[4]; struct mlx5_cq_context ctx; u8 rsvd6[192]; __be64 pas[0]; }; struct mlx5_create_cq_mbox_out { struct mlx5_outbox_hdr hdr; __be32 cqn; u8 rsvd0[4]; }; struct mlx5_destroy_cq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 cqn; u8 rsvd0[4]; }; struct mlx5_destroy_cq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd0[8]; }; struct mlx5_query_cq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 cqn; u8 rsvd0[4]; }; struct mlx5_query_cq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd0[8]; struct mlx5_cq_context ctx; u8 rsvd6[16]; __be64 pas[0]; }; struct mlx5_modify_cq_mbox_in { struct mlx5_inbox_hdr hdr; __be32 cqn; __be32 field_select; struct mlx5_cq_context ctx; u8 rsvd[192]; __be64 pas[0]; }; struct mlx5_modify_cq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_eq_context { u8 status; u8 ec_oi; u8 st; u8 rsvd2[7]; __be16 page_pffset; __be32 log_sz_usr_page; u8 rsvd3[7]; u8 intr; u8 log_page_size; u8 rsvd4[15]; __be32 consumer_counter; __be32 produser_counter; u8 rsvd5[16]; }; struct mlx5_create_eq_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd0[3]; u8 input_eqn; u8 rsvd1[4]; struct mlx5_eq_context ctx; u8 rsvd2[8]; __be64 events_mask; u8 rsvd3[176]; __be64 pas[0]; }; struct mlx5_create_eq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd0[3]; u8 eq_number; u8 rsvd1[4]; }; struct mlx5_map_eq_mbox_in { struct mlx5_inbox_hdr hdr; __be64 mask; u8 mu; u8 rsvd0[2]; u8 eqn; u8 rsvd1[24]; }; struct mlx5_map_eq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_query_eq_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd0[3]; u8 eqn; u8 rsvd1[4]; }; struct mlx5_query_eq_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; struct mlx5_eq_context ctx; }; enum { MLX5_MKEY_STATUS_FREE = 1 << 6, }; struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation */ u8 status; u8 pcie_control; u8 flags; u8 version; __be32 qpn_mkey7_0; u8 rsvd1[4]; __be32 flags_pd; __be64 start_addr; __be64 len; __be32 bsfs_octo_size; u8 rsvd2[16]; __be32 xlt_oct_size; u8 rsvd3[3]; u8 log2_page_size; u8 rsvd4[4]; }; struct mlx5_query_special_ctxs_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_query_special_ctxs_mbox_out { struct mlx5_outbox_hdr hdr; __be32 dump_fill_mkey; __be32 reserved_lkey; }; struct mlx5_create_mkey_mbox_in { struct mlx5_inbox_hdr hdr; __be32 input_mkey_index; __be32 flags; struct mlx5_mkey_seg seg; u8 rsvd1[16]; __be32 xlat_oct_act_size; __be32 rsvd2; u8 rsvd3[168]; __be64 pas[0]; }; struct mlx5_create_mkey_mbox_out { struct mlx5_outbox_hdr hdr; __be32 mkey; u8 rsvd[4]; }; struct mlx5_query_mkey_mbox_in { struct mlx5_inbox_hdr hdr; __be32 mkey; }; struct mlx5_query_mkey_mbox_out { struct mlx5_outbox_hdr hdr; __be64 pas[0]; }; struct mlx5_modify_mkey_mbox_in { struct mlx5_inbox_hdr hdr; __be32 mkey; __be64 pas[0]; }; struct mlx5_modify_mkey_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_dump_mkey_mbox_in { struct mlx5_inbox_hdr hdr; }; struct mlx5_dump_mkey_mbox_out { struct mlx5_outbox_hdr hdr; __be32 mkey; }; struct mlx5_mad_ifc_mbox_in { struct mlx5_inbox_hdr hdr; __be16 remote_lid; u8 rsvd0; u8 port; u8 rsvd1[4]; u8 data[256]; }; struct mlx5_mad_ifc_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; u8 data[256]; }; struct mlx5_access_reg_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd0[2]; __be16 register_id; __be32 arg; __be32 data[0]; }; struct mlx5_access_reg_mbox_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; __be32 data[0]; }; #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 }; struct mlx5_allocate_psv_in { struct mlx5_inbox_hdr hdr; __be32 npsv_pd; __be32 rsvd_psv0; }; struct mlx5_allocate_psv_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; __be32 psv_idx[4]; }; struct mlx5_destroy_psv_in { struct mlx5_inbox_hdr hdr; __be32 psv_number; u8 rsvd[4]; }; struct mlx5_destroy_psv_out { struct mlx5_outbox_hdr hdr; u8 rsvd[8]; }; static inline int mlx5_host_is_le(void) { #if defined(__LITTLE_ENDIAN) return 1; #elif defined(__BIG_ENDIAN) return 0; #else #error Host endianness not defined #endif } #define MLX5_CMD_OP_MAX 0x939 enum { VPORT_STATE_DOWN = 0x0, VPORT_STATE_UP = 0x1, }; enum { MLX5_L3_PROT_TYPE_IPV4 = 0, MLX5_L3_PROT_TYPE_IPV6 = 1, }; enum { MLX5_L4_PROT_TYPE_TCP = 0, MLX5_L4_PROT_TYPE_UDP = 1, }; enum { MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, }; enum { MLX5_MATCH_OUTER_HEADERS = 1 << 0, MLX5_MATCH_MISC_PARAMETERS = 1 << 1, MLX5_MATCH_INNER_HEADERS = 1 << 2, }; enum { MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2, MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3, MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5, MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6, }; enum { MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0, MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1, MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2 }; enum { MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0, MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1, MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2, MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3 }; enum { MLX5_UC_ADDR_CHANGE = (1 << 0), MLX5_MC_ADDR_CHANGE = (1 << 1), MLX5_VLAN_CHANGE = (1 << 2), MLX5_PROMISC_CHANGE = (1 << 3), MLX5_MTU_CHANGE = (1 << 4), }; enum mlx5_list_type { MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0, MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1, MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2, }; enum { MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, }; /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_MAX = 0, HCA_CAP_OPMOD_GET_CUR = 1, }; enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, MLX5_CAP_ODP, MLX5_CAP_ATOMIC, MLX5_CAP_ROCE, MLX5_CAP_IPOIB_OFFLOADS, MLX5_CAP_EOIB_OFFLOADS, MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, MLX5_CAP_SNAPSHOT, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, MLX5_CAP_DEBUG, /* NUM OF CAP Types */ MLX5_CAP_NUM }; /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ROCE(mdev, cap) \ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap) #define MLX5_CAP_SNAPSHOT(mdev, cap) \ MLX5_GET(snapshot_cap, \ mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap) #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \ MLX5_GET(snapshot_cap, \ mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap) #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap) #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap) #define MLX5_CAP_DEBUG(mdev, cap) \ MLX5_GET(debug_cap, \ mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap) #define MLX5_CAP_DEBUG_MAX(mdev, cap) \ MLX5_GET(debug_cap, \ mdev->hca_caps_max[MLX5_CAP_DEBUG], cap) #define MLX5_CAP_QOS(mdev, cap) \ MLX5_GET(qos_cap,\ mdev->hca_caps_cur[MLX5_CAP_QOS], cap) #define MLX5_CAP_QOS_MAX(mdev, cap) \ MLX5_GET(qos_cap,\ mdev->hca_caps_max[MLX5_CAP_QOS], cap) enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, MLX5_CMD_STAT_BAD_OP_ERR = 0x2, MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, MLX5_CMD_STAT_BAD_RES_ERR = 0x5, MLX5_CMD_STAT_RES_BUSY = 0x6, MLX5_CMD_STAT_LIM_ERR = 0x8, MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, MLX5_CMD_STAT_IX_ERR = 0xa, MLX5_CMD_STAT_NO_RES_ERR = 0xf, MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, }; enum { MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, MLX5_RFC_2863_COUNTERS_GROUP = 0x1, MLX5_RFC_2819_COUNTERS_GROUP = 0x2, MLX5_RFC_3635_COUNTERS_GROUP = 0x3, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_ETHERNET_DISCARD_COUNTERS_GROUP = 0x6, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; enum { MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2, }; enum { MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE, MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE, }; enum { NUM_DRIVER_UARS = 4, NUM_LOW_LAT_UUARS = 4, }; enum { MLX5_CAP_PORT_TYPE_IB = 0x0, MLX5_CAP_PORT_TYPE_ETH = 0x1, }; enum { MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0, MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1, MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2 }; enum { MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2, }; static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) return 0; return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } struct mlx5_ifc_mcia_reg_bits { u8 l[0x1]; u8 reserved_0[0x7]; u8 module[0x8]; u8 reserved_1[0x8]; u8 status[0x8]; u8 i2c_device_address[0x8]; u8 page_number[0x8]; u8 device_address[0x10]; u8 reserved_2[0x10]; u8 size[0x10]; u8 reserved_3[0x20]; u8 dword_0[0x20]; u8 dword_1[0x20]; u8 dword_2[0x20]; u8 dword_3[0x20]; u8 dword_4[0x20]; u8 dword_5[0x20]; u8 dword_6[0x20]; u8 dword_7[0x20]; u8 dword_8[0x20]; u8 dword_9[0x20]; u8 dword_10[0x20]; u8 dword_11[0x20]; }; #define MLX5_CMD_OP_QUERY_EEPROM 0x93c struct mlx5_mini_cqe8 { union { __be32 rx_hash_result; __be16 checksum; __be16 rsvd; struct { __be16 wqe_counter; u8 s_wqe_opcode; u8 reserved; } s_wqe_info; }; __be32 byte_cnt; }; enum { MLX5_NO_INLINE_DATA, MLX5_INLINE_DATA32_SEG, MLX5_INLINE_DATA64_SEG, MLX5_COMPRESSED, }; enum mlx5_exp_cqe_zip_recv_type { MLX5_CQE_FORMAT_HASH, MLX5_CQE_FORMAT_CSUM, }; #define MLX5E_CQE_FORMAT_MASK 0xc static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe) { return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2; } /* 8 regular priorities + 1 for multicast */ #define MLX5_NUM_BYPASS_FTS 9 #endif /* MLX5_DEVICE_H */ Index: head/sys/dev/mlx5/driver.h =================================================================== --- head/sys/dev/mlx5/driver.h (revision 312881) +++ head/sys/dev/mlx5/driver.h (revision 312882) @@ -1,1028 +1,1046 @@ /*- - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MLX5_DRIVER_H #define MLX5_DRIVER_H #include #include #include #include #include #include #include #include #include #include #include #include #define MLX5_QCOUNTER_SETS_NETDEV 64 #define MLX5_MAX_NUMBER_OF_VFS 128 enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, }; enum { MLX5_CMD_TIMEOUT_MSEC = 8 * 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; enum { CMD_OWNER_SW = 0x0, CMD_OWNER_HW = 0x1, CMD_STATUS_SUCCESS = 0, }; enum mlx5_sqp_t { MLX5_SQP_SMI = 0, MLX5_SQP_GSI = 1, MLX5_SQP_IEEE_1588 = 2, MLX5_SQP_SNIFFER = 3, MLX5_SQP_SYNC_UMR = 4, }; enum { MLX5_MAX_PORTS = 2, }; enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, MLX5_EQ_VEC_COMP_BASE, }; enum { MLX5_MAX_IRQ_NAME = 32 }; enum { MLX5_ATOMIC_MODE_OFF = 16, MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF, }; enum { MLX5_ATOMIC_MODE_DCT_OFF = 20, MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF, }; enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2, MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3, }; enum { MLX5_REG_QETCR = 0x4005, MLX5_REG_QPDP = 0x4007, MLX5_REG_QTCT = 0x400A, MLX5_REG_QHLL = 0x4016, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PPTB = 0x500B, MLX5_REG_PBMC = 0x500C, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PMLP = 0x5002, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MPCNT = 0x9051, }; enum dbg_rsc_type { MLX5_DBG_RSC_QP, MLX5_DBG_RSC_EQ, MLX5_DBG_RSC_CQ, }; enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, MLX5_INTERFACE_NUMBER = 2, }; struct mlx5_field_desc { struct dentry *dent; int i; }; struct mlx5_rsc_debug { struct mlx5_core_dev *dev; void *object; enum dbg_rsc_type type; struct dentry *root; struct mlx5_field_desc fields[0]; }; enum mlx5_dev_event { MLX5_DEV_EVENT_SYS_ERROR, MLX5_DEV_EVENT_PORT_UP, MLX5_DEV_EVENT_PORT_DOWN, MLX5_DEV_EVENT_PORT_INITIALIZED, MLX5_DEV_EVENT_LID_CHANGE, MLX5_DEV_EVENT_PKEY_CHANGE, MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, MLX5_DEV_EVENT_VPORT_CHANGE, MLX5_DEV_EVENT_ERROR_STATE_DCBX, MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE, MLX5_DEV_EVENT_LOCAL_OPER_CHANGE, MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE, }; enum mlx5_port_status { MLX5_PORT_UP = 1 << 0, MLX5_PORT_DOWN = 1 << 1, }; enum mlx5_link_mode { MLX5_1000BASE_CX_SGMII = 0, MLX5_1000BASE_KX = 1, MLX5_10GBASE_CX4 = 2, MLX5_10GBASE_KX4 = 3, MLX5_10GBASE_KR = 4, MLX5_20GBASE_KR2 = 5, MLX5_40GBASE_CR4 = 6, MLX5_40GBASE_KR4 = 7, MLX5_56GBASE_R4 = 8, MLX5_10GBASE_CR = 12, MLX5_10GBASE_SR = 13, MLX5_10GBASE_ER = 14, MLX5_40GBASE_SR4 = 15, MLX5_40GBASE_LR4 = 16, MLX5_100GBASE_CR4 = 20, MLX5_100GBASE_SR4 = 21, MLX5_100GBASE_KR4 = 22, MLX5_100GBASE_LR4 = 23, MLX5_100BASE_TX = 24, MLX5_1000BASE_T = 25, MLX5_10GBASE_T = 26, MLX5_25GBASE_CR = 27, MLX5_25GBASE_KR = 28, MLX5_25GBASE_SR = 29, MLX5_50GBASE_CR2 = 30, MLX5_50GBASE_KR2 = 31, MLX5_LINK_MODES_NUMBER, }; #define MLX5_PROT_MASK(link_mode) (1 << link_mode) struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; int num_low_latency_uuars; unsigned long *bitmap; unsigned int *count; struct mlx5_bf *bfs; /* * protect uuar allocation data structs */ struct mutex lock; u32 ver; }; struct mlx5_bf { void __iomem *reg; void __iomem *regreg; int buf_size; struct mlx5_uar *uar; unsigned long offset; int need_lock; /* protect blue flame buffer selection when needed */ spinlock_t lock; /* serialize 64 bit writes when done as two 32 bit accesses */ spinlock_t lock32; int uuarn; }; struct mlx5_cmd_first { __be32 data[4]; }; -struct mlx5_cmd_msg { - struct list_head list; - struct cache_ent *cache; - u32 len; - struct mlx5_cmd_first first; - struct mlx5_cmd_mailbox *next; +struct cache_ent; +struct mlx5_fw_page { + union { + struct rb_node rb_node; + struct list_head list; + }; + struct mlx5_cmd_first first; + struct mlx5_core_dev *dev; + bus_dmamap_t dma_map; + bus_addr_t dma_addr; + void *virt_addr; + struct cache_ent *cache; + u32 numpages; + u16 load_done; +#define MLX5_LOAD_ST_NONE 0 +#define MLX5_LOAD_ST_SUCCESS 1 +#define MLX5_LOAD_ST_FAILURE 2 + u16 func_id; }; +#define mlx5_cmd_msg mlx5_fw_page struct mlx5_cmd_debug { struct dentry *dbg_root; struct dentry *dbg_in; struct dentry *dbg_out; struct dentry *dbg_outlen; struct dentry *dbg_status; struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; u16 inlen; u16 outlen; }; struct cache_ent { /* protect block chain allocations */ spinlock_t lock; struct list_head head; }; struct cmd_msg_cache { struct cache_ent large; struct cache_ent med; }; struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; struct dentry *avg; struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { - void *cmd_alloc_buf; - dma_addr_t alloc_dma; - int alloc_size; + struct mlx5_fw_page *cmd_page; + bus_dma_tag_t dma_tag; + struct sx dma_sx; + struct mtx dma_mtx; +#define MLX5_DMA_OWNED(dev) mtx_owned(&(dev)->cmd.dma_mtx) +#define MLX5_DMA_LOCK(dev) mtx_lock(&(dev)->cmd.dma_mtx) +#define MLX5_DMA_UNLOCK(dev) mtx_unlock(&(dev)->cmd.dma_mtx) + struct cv dma_cv; +#define MLX5_DMA_DONE(dev) cv_broadcast(&(dev)->cmd.dma_cv) +#define MLX5_DMA_WAIT(dev) cv_wait(&(dev)->cmd.dma_cv, &(dev)->cmd.dma_mtx) void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; u8 log_sz; u8 log_stride; int max_reg_cmds; int events; u32 __iomem *vector; /* protect command queue allocations */ spinlock_t alloc_lock; /* protect token allocations */ spinlock_t token_lock; u8 token; unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; struct semaphore sem; struct semaphore pages_sem; int mode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; - struct pci_pool *pool; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; int moving_to_polling; }; struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; }; -struct mlx5_cmd_mailbox { - void *buf; - dma_addr_t dma; - struct mlx5_cmd_mailbox *next; -}; - -struct mlx5_buf_list { - void *buf; - dma_addr_t map; -}; - struct mlx5_buf { - struct mlx5_buf_list direct; - struct mlx5_buf_list *page_list; - int nbufs; + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; + struct mlx5_core_dev *dev; + struct { + void *buf; + } direct; + u64 *page_list; int npages; int size; u8 page_shift; + u8 load_done; }; struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; u32 cons_index; struct mlx5_buf buf; int size; u8 irqn; u8 eqn; int nent; u64 mask; struct list_head list; int index; struct mlx5_rsc_debug *dbg; }; struct mlx5_core_psv { u32 psv_idx; struct psv_layout { u32 pd; u16 syndrome; u16 reserved; u16 bg; u16 app_tag; u32 ref_tag; } psv; }; struct mlx5_core_sig_ctx { struct mlx5_core_psv psv_memory; struct mlx5_core_psv psv_wire; #if (__FreeBSD_version >= 1100000) struct ib_sig_err err_item; #endif bool sig_status_checked; bool sig_err_exists; u32 sigerr_count; }; struct mlx5_core_mr { u64 iova; u64 size; u32 key; u32 pd; }; enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_DCT = 5, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; atomic_t refcount; struct completion free; }; struct mlx5_core_srq { struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; int max_gs; int max_avail_gather; int wqe_shift; void (*event)(struct mlx5_core_srq *, int); atomic_t refcount; struct completion free; }; struct mlx5_eq_table { void __iomem *update_ci; void __iomem *update_arm_ci; struct list_head comp_eqs_list; struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; int num_comp_vectors; /* protect EQs list */ spinlock_t lock; }; struct mlx5_uar { u32 index; void __iomem *bf_map; void __iomem *map; }; struct mlx5_core_health { struct mlx5_health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; struct list_head list; u32 prev; int miss_counter; }; #define MLX5_CQ_LINEAR_ARRAY_SIZE 1024 struct mlx5_cq_linear_array_entry { spinlock_t lock; struct mlx5_core_cq * volatile cq; }; struct mlx5_cq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE]; }; struct mlx5_qp_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_srq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_mr_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_irq_info { char name[MLX5_MAX_IRQ_NAME]; }; struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); struct io_mapping *bf_mapping; /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; s64 fw_pages; atomic_t reg_pages; - struct list_head free_list; s64 pages_per_func[MLX5_MAX_NUMBER_OF_VFS]; struct mlx5_core_health health; struct mlx5_srq_table srq_table; /* start: qp staff */ struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; /* end: qp staff */ /* start: cq staff */ struct mlx5_cq_table cq_table; /* end: cq staff */ /* start: mr staff */ struct mlx5_mr_table mr_table; /* end: mr staff */ /* start: alloc staff */ int numa_node; struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ struct dentry *dbg_root; /* protect mkey key part */ spinlock_t mkey_lock; u8 mkey_key; struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; unsigned long pci_dev_data; }; enum mlx5_device_state { MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; struct mlx5_special_contexts { int resd_lkey; }; struct mlx5_core_dev { struct pci_dev *pdev; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; u32 issi; struct mlx5_special_contexts special_contexts; unsigned int module_status[MLX5_MAX_PORTS]; u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER]; }; enum { MLX5_WOL_DISABLE = 0, MLX5_WOL_SECURED_MAGIC = 1 << 1, MLX5_WOL_MAGIC = 1 << 2, MLX5_WOL_ARP = 1 << 3, MLX5_WOL_BROADCAST = 1 << 4, MLX5_WOL_MULTICAST = 1 << 5, MLX5_WOL_UNICAST = 1 << 6, MLX5_WOL_PHY_ACTIVITY = 1 << 7, }; struct mlx5_db { __be32 *db; union { struct mlx5_db_pgdir *pgdir; struct mlx5_ib_user_db_page *user_page; } u; dma_addr_t dma; int index; }; struct mlx5_net_counters { u64 packets; u64 octets; }; struct mlx5_ptys_reg { u8 an_dis_admin; u8 an_dis_ap; u8 local_port; u8 proto_mask; u32 eth_proto_cap; u16 ib_link_width_cap; u16 ib_proto_cap; u32 eth_proto_admin; u16 ib_link_width_admin; u16 ib_proto_admin; u32 eth_proto_oper; u16 ib_link_width_oper; u16 ib_proto_oper; u32 eth_proto_lp_advertise; }; struct mlx5_pvlc_reg { u8 local_port; u8 vl_hw_cap; u8 vl_admin; u8 vl_operational; }; struct mlx5_pmtu_reg { u8 local_port; u16 max_mtu; u16 admin_mtu; u16 oper_mtu; }; struct mlx5_vport_counters { struct mlx5_net_counters received_errors; struct mlx5_net_counters transmit_errors; struct mlx5_net_counters received_ib_unicast; struct mlx5_net_counters transmitted_ib_unicast; struct mlx5_net_counters received_ib_multicast; struct mlx5_net_counters transmitted_ib_multicast; struct mlx5_net_counters received_eth_broadcast; struct mlx5_net_counters transmitted_eth_broadcast; struct mlx5_net_counters received_eth_unicast; struct mlx5_net_counters transmitted_eth_unicast; struct mlx5_net_counters received_eth_multicast; struct mlx5_net_counters transmitted_eth_multicast; }; enum { - MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, + MLX5_DB_PER_PAGE = MLX5_ADAPTER_PAGE_SIZE / L1_CACHE_BYTES, }; struct mlx5_core_dct { struct mlx5_core_rsc_common common; /* must be first */ void (*event)(struct mlx5_core_dct *, int); int dctn; struct completion drained; struct mlx5_rsc_debug *dbg; int pid; }; enum { MLX5_COMP_EQ_SIZE = 1024, }; enum { MLX5_PTYS_IB = 1 << 0, MLX5_PTYS_EN = 1 << 2, }; struct mlx5_db_pgdir { struct list_head list; DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); + struct mlx5_fw_page *fw_page; __be32 *db_page; dma_addr_t db_dma; }; typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; + int uin_size; void *uout; int uout_size; mlx5_cmd_cbk_t callback; void *context; int idx; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; struct mlx5_cmd_layout *lay; int ret; int page_queue; u8 status; u8 token; u64 ts1; u64 ts2; u16 op; u8 busy; }; struct mlx5_pas { u64 pa; u8 log_sz; }; -static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) +static inline void * +mlx5_buf_offset(struct mlx5_buf *buf, int offset) { - if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) - return buf->direct.buf + offset; - else - return buf->page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); + return ((char *)buf->direct.buf + offset); } extern struct workqueue_struct *mlx5_core_wq; #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) { return pci_get_drvdata(pdev); } extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) & 0xffff; } static inline u16 fw_rev_min(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) >> 16; } static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n"); return 0; } return 8 * (1 << param); } static inline void *mlx5_vzalloc(unsigned long size) { void *rtn; rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); return rtn; } static inline void *mlx5_vmalloc(unsigned long size) { void *rtn; rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!rtn) rtn = vmalloc(size); return rtn; } void mlx5_enter_error_state(struct mlx5_core_dev *dev); int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); int mlx5_cmd_status_to_err_v2(void *ptr); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(void); void __init mlx5_health_init(void); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); -int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf, int node); + +#define mlx5_buf_alloc_node(dev, size, direct, buf, node) \ + mlx5_buf_alloc(dev, size, direct, buf) int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_create_srq_mbox_in *in, int inlen, int is_xrc); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); void mlx5_init_mr_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_create_mkey_mbox_in *in, int inlen, mlx5_cmd_cbk_t callback, void *context, struct mlx5_create_mkey_mbox_out *out); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_query_mkey_mbox_out *out, int outlen); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, u16 opmod, u8 port); +void mlx5_fwp_flush(struct mlx5_fw_page *fwp); +void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp); +struct mlx5_fw_page *mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num); +void mlx5_fwp_free(struct mlx5_fw_page *fwp); +u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset); +void *mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset); void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector); void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, u64 addr); int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask); int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask); int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, u8 *an_disable_cap, u8 *an_disable_status); int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable, u32 eth_proto_admin, int proto_mask); int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask); int mlx5_set_port_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port, u32 rx_pause, u32 tx_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port, u32 *rx_pause, u32 *tx_pause); int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu); int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu); unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num); int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num); int mlx5_query_eeprom(struct mlx5_core_dev *dev, int i2c_addr, int page_num, int device_addr, int size, int module_num, u32 *data, int *size_read); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct mlx5_query_eq_mbox_out *out, int outlen); int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev); int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode); int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode); int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, struct mlx5_pvlc_reg *pvlc, int write); int mlx5_core_access_ptys(struct mlx5_core_dev *dev, struct mlx5_ptys_reg *ptys, int write); int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, struct mlx5_pmtu_reg *pmtu, int write); int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port); int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port); int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int *is_enable); int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int enable); int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, void *out, int out_size); int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, void *out, int out_size); int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, u8 num_of_samples, u16 sample_index, void *out, int out_size); static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; } static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) { return mkey_idx << 8; } static inline u8 mlx5_mkey_variant(u32 mkey) { return mkey & 0xff; } enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, }; enum { MAX_MR_CACHE_ENTRIES = 15, }; struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); void * (*get_dev)(void *context); int protocol; struct list_head list; }; void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); struct mlx5_profile { u64 mask; u8 log_max_qp; struct { int size; int limit; } mr_cache[MAX_MR_CACHE_ENTRIES]; }; enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) { return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } #define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00 #define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000 #endif /* MLX5_DRIVER_H */ Index: head/sys/dev/mlx5/mlx5_core/mlx5_alloc.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_alloc.c (revision 312881) +++ head/sys/dev/mlx5/mlx5_core/mlx5_alloc.c (revision 312882) @@ -1,256 +1,261 @@ /*- - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include "mlx5_core.h" /* Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. If the * requested size is > max_direct, we split the allocation into * multiple pages, so we don't require too much contiguous memory. */ -static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, - size_t size, dma_addr_t *dma_handle, - int node) +static void +mlx5_buf_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { - void *cpu_handle; + struct mlx5_buf *buf; + uint8_t owned; + int x; - cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, - dma_handle, GFP_KERNEL); - return cpu_handle; + buf = (struct mlx5_buf *)arg; + owned = MLX5_DMA_OWNED(buf->dev); + + if (!owned) + MLX5_DMA_LOCK(buf->dev); + + if (error == 0) { + for (x = 0; x != nseg; x++) { + buf->page_list[x] = segs[x].ds_addr; + KASSERT(segs[x].ds_len == PAGE_SIZE, ("Invalid segment size")); + } + buf->load_done = MLX5_LOAD_ST_SUCCESS; + } else { + buf->load_done = MLX5_LOAD_ST_FAILURE; + } + MLX5_DMA_DONE(buf->dev); + + if (!owned) + MLX5_DMA_UNLOCK(buf->dev); } -int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf, int node) +int +mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, + int max_direct, struct mlx5_buf *buf) { - dma_addr_t t; + int err; - buf->size = size; - if (size <= max_direct) { - buf->nbufs = 1; - buf->npages = 1; - buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, - &t, node); - if (!buf->direct.buf) - return -ENOMEM; + buf->npages = howmany(size, PAGE_SIZE); + buf->page_shift = PAGE_SHIFT; + buf->load_done = MLX5_LOAD_ST_NONE; + buf->dev = dev; + buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list), + GFP_KERNEL); - buf->direct.map = t; + err = -bus_dma_tag_create( + bus_get_dma_tag(dev->pdev->dev.bsddev), + PAGE_SIZE, /* alignment */ + 0, /* no boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + PAGE_SIZE * buf->npages, /* maxsize */ + buf->npages, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &buf->dma_tag); - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } - } else { - int i; + if (err != 0) + goto err_dma_tag; - buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; - buf->npages = buf->nbufs; - buf->page_shift = PAGE_SHIFT; - buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), - GFP_KERNEL); + /* allocate memory */ + err = -bus_dmamem_alloc(buf->dma_tag, &buf->direct.buf, + BUS_DMA_WAITOK | BUS_DMA_COHERENT, &buf->dma_map); + if (err != 0) + goto err_dma_alloc; - for (i = 0; i < buf->nbufs; i++) { - buf->page_list[i].buf = - mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, - &t, node); + /* load memory into DMA */ + MLX5_DMA_LOCK(dev); + err = bus_dmamap_load( + buf->dma_tag, buf->dma_map, buf->direct.buf, + PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb, + buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT); - buf->page_list[i].map = t; - } + while (buf->load_done == MLX5_LOAD_ST_NONE) + MLX5_DMA_WAIT(dev); + MLX5_DMA_UNLOCK(dev); - if (BITS_PER_LONG == 64) { - struct page **pages; - - pages = kmalloc(sizeof(*pages) * (buf->nbufs + 1), - GFP_KERNEL); - for (i = 0; i < buf->nbufs; i++) - pages[i] = virt_to_page(buf->page_list[i].buf); - pages[buf->nbufs] = pages[0]; - buf->direct.buf = vmap(pages, buf->nbufs + 1, VM_MAP, - PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; - } + /* check for error */ + if (buf->load_done != MLX5_LOAD_ST_SUCCESS) { + err = -ENOMEM; + goto err_dma_load; } - return 0; + /* clean memory */ + memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages); -err_free: - mlx5_buf_free(dev, buf); + /* flush memory to RAM */ + bus_dmamap_sync(buf->dev->cmd.dma_tag, buf->dma_map, BUS_DMASYNC_PREWRITE); + return (0); - return -ENOMEM; +err_dma_load: + bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map); +err_dma_alloc: + bus_dma_tag_destroy(buf->dma_tag); +err_dma_tag: + kfree(buf->page_list); + return (err); } -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf) -{ - return mlx5_buf_alloc_node(dev, size, max_direct, - buf, dev->priv.numa_node); -} -EXPORT_SYMBOL_GPL(mlx5_buf_alloc); - - void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) { - if (buf->nbufs == 1) - dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, - buf->direct.map); - else { - int i; - if (BITS_PER_LONG == 64 && buf->direct.buf) - vunmap(buf->direct.buf); - for (i = 0; i < buf->nbufs; i++) - if (buf->page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - buf->page_list[i].buf, - buf->page_list[i].map); - kfree(buf->page_list); - } + bus_dmamap_unload(buf->dma_tag, buf->dma_map); + bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map); + bus_dma_tag_destroy(buf->dma_tag); + kfree(buf->page_list); } EXPORT_SYMBOL_GPL(mlx5_buf_free); static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, int node) { struct mlx5_db_pgdir *pgdir; pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); - pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, - &pgdir->db_dma, node); + pgdir->fw_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); + if (pgdir->fw_page != NULL) { + pgdir->db_page = pgdir->fw_page->virt_addr; + pgdir->db_dma = pgdir->fw_page->dma_addr; + + /* clean allocated memory */ + memset(pgdir->db_page, 0, MLX5_ADAPTER_PAGE_SIZE); + + /* flush memory to RAM */ + mlx5_fwp_flush(pgdir->fw_page); + } if (!pgdir->db_page) { kfree(pgdir); return NULL; } return pgdir; } static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, struct mlx5_db *db) { int offset; int i; i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); if (i >= MLX5_DB_PER_PAGE) return -ENOMEM; __clear_bit(i, pgdir->bitmap); db->u.pgdir = pgdir; db->index = i; offset = db->index * L1_CACHE_BYTES; db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); db->dma = pgdir->db_dma + offset; db->db[0] = 0; db->db[1] = 0; return 0; } int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) { struct mlx5_db_pgdir *pgdir; int ret = 0; mutex_lock(&dev->priv.pgdir_mutex); list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) if (!mlx5_alloc_db_from_pgdir(pgdir, db)) goto out; pgdir = mlx5_alloc_db_pgdir(dev, node); if (!pgdir) { ret = -ENOMEM; goto out; } list_add(&pgdir->list, &dev->priv.pgdir_list); /* This should never fail -- we just allocated an empty page: */ WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); out: mutex_unlock(&dev->priv.pgdir_mutex); return ret; } EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) { return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); } EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) { mutex_lock(&dev->priv.pgdir_mutex); __set_bit(db->index, db->u.pgdir->bitmap); if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, - db->u.pgdir->db_page, db->u.pgdir->db_dma); + mlx5_fwp_free(db->u.pgdir->fw_page); list_del(&db->u.pgdir->list); kfree(db->u.pgdir); } mutex_unlock(&dev->priv.pgdir_mutex); } EXPORT_SYMBOL_GPL(mlx5_db_free); - -void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +void +mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) { - u64 addr; int i; - for (i = 0; i < buf->npages; i++) { - if (buf->nbufs == 1) - addr = buf->direct.map + ((u64)i << buf->page_shift); - else - addr = buf->page_list[i].map; - - pas[i] = cpu_to_be64(addr); - } + for (i = 0; i != buf->npages; i++) + pas[i] = cpu_to_be64(buf->page_list[i]); } EXPORT_SYMBOL_GPL(mlx5_fill_page_array); Index: head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (revision 312881) +++ head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (revision 312882) @@ -1,1826 +1,1816 @@ /*- - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include "mlx5_core.h" static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); enum { CMD_IF_REV = 5, }; enum { CMD_MODE_POLLING, CMD_MODE_EVENTS }; enum { NUM_LONG_LISTS = 2, NUM_MED_LISTS = 64, LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + MLX5_CMD_DATA_BLOCK_SIZE, MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, }; enum { MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, + int uin_size, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue) { gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; struct mlx5_cmd_work_ent *ent; ent = kzalloc(sizeof(*ent), alloc_flags); if (!ent) return ERR_PTR(-ENOMEM); ent->in = in; + ent->uin_size = uin_size; ent->out = out; ent->uout = uout; ent->uout_size = uout_size; ent->callback = cbk; ent->context = context; ent->cmd = cmd; ent->page_queue = page_queue; return ent; } static u8 alloc_token(struct mlx5_cmd *cmd) { u8 token; spin_lock(&cmd->token_lock); cmd->token++; if (cmd->token == 0) cmd->token++; token = cmd->token; spin_unlock(&cmd->token_lock); return token; } static int alloc_ent(struct mlx5_cmd_work_ent *ent) { unsigned long flags; struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); int ret = cmd->max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); if (!ent->page_queue) { ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); if (ret >= cmd->max_reg_cmds) ret = -1; } if (dev->state != MLX5_DEVICE_STATE_UP) ret = -1; if (ret != -1) { ent->busy = 1; ent->idx = ret; clear_bit(ent->idx, &cmd->bitmask); cmd->ent_arr[ent->idx] = ent; } spin_unlock_irqrestore(&cmd->alloc_lock, flags); return ret; } static void free_ent(struct mlx5_cmd *cmd, int idx) { unsigned long flags; spin_lock_irqsave(&cmd->alloc_lock, flags); set_bit(idx, &cmd->bitmask); spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) { return cmd->cmd_buf + (idx << cmd->log_stride); } static u8 xor8_buf(void *buf, int len) { u8 *ptr = buf; u8 sum = 0; int i; for (i = 0; i < len; i++) sum ^= ptr[i]; return sum; } static int verify_block_sig(struct mlx5_cmd_prot_block *block) { if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) return -EINVAL; if (xor8_buf(block, sizeof(*block)) != 0xff) return -EINVAL; return 0; } static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, int csum) { block->token = token; if (csum) { block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); block->sig = ~xor8_buf(block, sizeof(*block) - 1); } } -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) +static void +calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) { - struct mlx5_cmd_mailbox *next = msg->next; + size_t i; - while (next) { - calc_block_sig(next->buf, token, csum); - next = next->next; + for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { + struct mlx5_cmd_prot_block *block; + + block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); + + /* compute signature */ + calc_block_sig(block, token, csum); + + /* check for last block */ + if (block->next == 0) + break; } + + /* make sure data gets written to RAM */ + mlx5_fwp_flush(msg); } static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); calc_chain_sig(ent->in, ent->token, csum); calc_chain_sig(ent->out, ent->token, csum); } static void poll_timeout(struct mlx5_cmd_work_ent *ent) { struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); int poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); u8 own; do { own = ent->lay->status_own; if (!(own & CMD_OWNER_HW) || dev->state != MLX5_DEVICE_STATE_UP) { ent->ret = 0; return; } usleep_range(5000, 10000); } while (time_before(jiffies, poll_end)); ent->ret = -ETIMEDOUT; } static void free_cmd(struct mlx5_cmd_work_ent *ent) { kfree(ent); } - -static int verify_signature(struct mlx5_cmd_work_ent *ent) +static int +verify_signature(struct mlx5_cmd_work_ent *ent) { - struct mlx5_cmd_mailbox *next = ent->out->next; + struct mlx5_cmd_msg *msg = ent->out; + size_t i; int err; u8 sig; sig = xor8_buf(ent->lay, sizeof(*ent->lay)); if (sig != 0xff) return -EINVAL; - while (next) { - err = verify_block_sig(next->buf); - if (err) - return err; + for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { + struct mlx5_cmd_prot_block *block; - next = next->next; - } + block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); - return 0; + /* compute signature */ + err = verify_block_sig(block); + if (err != 0) + return (err); + + /* check for last block */ + if (block->next == 0) + break; + } + return (0); } static void dump_buf(void *buf, int size, int data_only, int offset) { __be32 *p = buf; int i; for (i = 0; i < size; i += 16) { pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); p += 4; offset += 16; } if (!data_only) pr_debug("\n"); } const char *mlx5_command_str(int command) { switch (command) { case MLX5_CMD_OP_QUERY_HCA_CAP: return "QUERY_HCA_CAP"; case MLX5_CMD_OP_SET_HCA_CAP: return "SET_HCA_CAP"; case MLX5_CMD_OP_QUERY_ADAPTER: return "QUERY_ADAPTER"; case MLX5_CMD_OP_INIT_HCA: return "INIT_HCA"; case MLX5_CMD_OP_TEARDOWN_HCA: return "TEARDOWN_HCA"; case MLX5_CMD_OP_ENABLE_HCA: return "MLX5_CMD_OP_ENABLE_HCA"; case MLX5_CMD_OP_DISABLE_HCA: return "MLX5_CMD_OP_DISABLE_HCA"; case MLX5_CMD_OP_QUERY_PAGES: return "QUERY_PAGES"; case MLX5_CMD_OP_MANAGE_PAGES: return "MANAGE_PAGES"; case MLX5_CMD_OP_QUERY_ISSI: return "QUERY_ISSI"; case MLX5_CMD_OP_SET_ISSI: return "SET_ISSI"; case MLX5_CMD_OP_CREATE_MKEY: return "CREATE_MKEY"; case MLX5_CMD_OP_QUERY_MKEY: return "QUERY_MKEY"; case MLX5_CMD_OP_DESTROY_MKEY: return "DESTROY_MKEY"; case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: return "QUERY_SPECIAL_CONTEXTS"; case MLX5_CMD_OP_PAGE_FAULT_RESUME: return "PAGE_FAULT_RESUME"; case MLX5_CMD_OP_CREATE_EQ: return "CREATE_EQ"; case MLX5_CMD_OP_DESTROY_EQ: return "DESTROY_EQ"; case MLX5_CMD_OP_QUERY_EQ: return "QUERY_EQ"; case MLX5_CMD_OP_GEN_EQE: return "GEN_EQE"; case MLX5_CMD_OP_CREATE_CQ: return "CREATE_CQ"; case MLX5_CMD_OP_DESTROY_CQ: return "DESTROY_CQ"; case MLX5_CMD_OP_QUERY_CQ: return "QUERY_CQ"; case MLX5_CMD_OP_MODIFY_CQ: return "MODIFY_CQ"; case MLX5_CMD_OP_CREATE_QP: return "CREATE_QP"; case MLX5_CMD_OP_DESTROY_QP: return "DESTROY_QP"; case MLX5_CMD_OP_RST2INIT_QP: return "RST2INIT_QP"; case MLX5_CMD_OP_INIT2RTR_QP: return "INIT2RTR_QP"; case MLX5_CMD_OP_RTR2RTS_QP: return "RTR2RTS_QP"; case MLX5_CMD_OP_RTS2RTS_QP: return "RTS2RTS_QP"; case MLX5_CMD_OP_SQERR2RTS_QP: return "SQERR2RTS_QP"; case MLX5_CMD_OP_2ERR_QP: return "2ERR_QP"; case MLX5_CMD_OP_2RST_QP: return "2RST_QP"; case MLX5_CMD_OP_QUERY_QP: return "QUERY_QP"; case MLX5_CMD_OP_SQD_RTS_QP: return "SQD_RTS_QP"; case MLX5_CMD_OP_MAD_IFC: return "MAD_IFC"; case MLX5_CMD_OP_INIT2INIT_QP: return "INIT2INIT_QP"; case MLX5_CMD_OP_CREATE_PSV: return "CREATE_PSV"; case MLX5_CMD_OP_DESTROY_PSV: return "DESTROY_PSV"; case MLX5_CMD_OP_CREATE_SRQ: return "CREATE_SRQ"; case MLX5_CMD_OP_DESTROY_SRQ: return "DESTROY_SRQ"; case MLX5_CMD_OP_QUERY_SRQ: return "QUERY_SRQ"; case MLX5_CMD_OP_ARM_RQ: return "ARM_RQ"; case MLX5_CMD_OP_CREATE_XRC_SRQ: return "CREATE_XRC_SRQ"; case MLX5_CMD_OP_DESTROY_XRC_SRQ: return "DESTROY_XRC_SRQ"; case MLX5_CMD_OP_QUERY_XRC_SRQ: return "QUERY_XRC_SRQ"; case MLX5_CMD_OP_ARM_XRC_SRQ: return "ARM_XRC_SRQ"; case MLX5_CMD_OP_CREATE_DCT: return "CREATE_DCT"; case MLX5_CMD_OP_SET_DC_CNAK_TRACE: return "SET_DC_CNAK_TRACE"; case MLX5_CMD_OP_DESTROY_DCT: return "DESTROY_DCT"; case MLX5_CMD_OP_DRAIN_DCT: return "DRAIN_DCT"; case MLX5_CMD_OP_QUERY_DCT: return "QUERY_DCT"; case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: return "ARM_DCT_FOR_KEY_VIOLATION"; case MLX5_CMD_OP_QUERY_VPORT_STATE: return "QUERY_VPORT_STATE"; case MLX5_CMD_OP_MODIFY_VPORT_STATE: return "MODIFY_VPORT_STATE"; case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: return "QUERY_ESW_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: return "MODIFY_ESW_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: return "QUERY_NIC_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: return "MODIFY_NIC_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: return "QUERY_ROCE_ADDRESS"; case MLX5_CMD_OP_SET_ROCE_ADDRESS: return "SET_ROCE_ADDRESS"; case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: return "QUERY_HCA_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: return "MODIFY_HCA_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: return "QUERY_HCA_VPORT_GID"; case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: return "QUERY_HCA_VPORT_PKEY"; case MLX5_CMD_OP_QUERY_VPORT_COUNTER: return "QUERY_VPORT_COUNTER"; case MLX5_CMD_OP_SET_WOL_ROL: return "SET_WOL_ROL"; case MLX5_CMD_OP_QUERY_WOL_ROL: return "QUERY_WOL_ROL"; case MLX5_CMD_OP_ALLOC_Q_COUNTER: return "ALLOC_Q_COUNTER"; case MLX5_CMD_OP_DEALLOC_Q_COUNTER: return "DEALLOC_Q_COUNTER"; case MLX5_CMD_OP_QUERY_Q_COUNTER: return "QUERY_Q_COUNTER"; case MLX5_CMD_OP_ALLOC_PD: return "ALLOC_PD"; case MLX5_CMD_OP_DEALLOC_PD: return "DEALLOC_PD"; case MLX5_CMD_OP_ALLOC_UAR: return "ALLOC_UAR"; case MLX5_CMD_OP_DEALLOC_UAR: return "DEALLOC_UAR"; case MLX5_CMD_OP_CONFIG_INT_MODERATION: return "CONFIG_INT_MODERATION"; case MLX5_CMD_OP_ATTACH_TO_MCG: return "ATTACH_TO_MCG"; case MLX5_CMD_OP_DETACH_FROM_MCG: return "DETACH_FROM_MCG"; case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: return "GET_DROPPED_PACKET_LOG"; case MLX5_CMD_OP_QUERY_MAD_DEMUX: return "QUERY_MAD_DEMUX"; case MLX5_CMD_OP_SET_MAD_DEMUX: return "SET_MAD_DEMUX"; case MLX5_CMD_OP_NOP: return "NOP"; case MLX5_CMD_OP_ALLOC_XRCD: return "ALLOC_XRCD"; case MLX5_CMD_OP_DEALLOC_XRCD: return "DEALLOC_XRCD"; case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: return "ALLOC_TRANSPORT_DOMAIN"; case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: return "DEALLOC_TRANSPORT_DOMAIN"; case MLX5_CMD_OP_QUERY_CONG_STATUS: return "QUERY_CONG_STATUS"; case MLX5_CMD_OP_MODIFY_CONG_STATUS: return "MODIFY_CONG_STATUS"; case MLX5_CMD_OP_QUERY_CONG_PARAMS: return "QUERY_CONG_PARAMS"; case MLX5_CMD_OP_MODIFY_CONG_PARAMS: return "MODIFY_CONG_PARAMS"; case MLX5_CMD_OP_QUERY_CONG_STATISTICS: return "QUERY_CONG_STATISTICS"; case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: return "ADD_VXLAN_UDP_DPORT"; case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: return "DELETE_VXLAN_UDP_DPORT"; case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: return "SET_L2_TABLE_ENTRY"; case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: return "QUERY_L2_TABLE_ENTRY"; case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: return "DELETE_L2_TABLE_ENTRY"; case MLX5_CMD_OP_CREATE_RMP: return "CREATE_RMP"; case MLX5_CMD_OP_MODIFY_RMP: return "MODIFY_RMP"; case MLX5_CMD_OP_DESTROY_RMP: return "DESTROY_RMP"; case MLX5_CMD_OP_QUERY_RMP: return "QUERY_RMP"; case MLX5_CMD_OP_CREATE_RQT: return "CREATE_RQT"; case MLX5_CMD_OP_MODIFY_RQT: return "MODIFY_RQT"; case MLX5_CMD_OP_DESTROY_RQT: return "DESTROY_RQT"; case MLX5_CMD_OP_QUERY_RQT: return "QUERY_RQT"; case MLX5_CMD_OP_ACCESS_REG: return "MLX5_CMD_OP_ACCESS_REG"; case MLX5_CMD_OP_CREATE_SQ: return "CREATE_SQ"; case MLX5_CMD_OP_MODIFY_SQ: return "MODIFY_SQ"; case MLX5_CMD_OP_DESTROY_SQ: return "DESTROY_SQ"; case MLX5_CMD_OP_QUERY_SQ: return "QUERY_SQ"; case MLX5_CMD_OP_CREATE_RQ: return "CREATE_RQ"; case MLX5_CMD_OP_MODIFY_RQ: return "MODIFY_RQ"; case MLX5_CMD_OP_DESTROY_RQ: return "DESTROY_RQ"; case MLX5_CMD_OP_QUERY_RQ: return "QUERY_RQ"; case MLX5_CMD_OP_CREATE_TIR: return "CREATE_TIR"; case MLX5_CMD_OP_MODIFY_TIR: return "MODIFY_TIR"; case MLX5_CMD_OP_DESTROY_TIR: return "DESTROY_TIR"; case MLX5_CMD_OP_QUERY_TIR: return "QUERY_TIR"; case MLX5_CMD_OP_CREATE_TIS: return "CREATE_TIS"; case MLX5_CMD_OP_MODIFY_TIS: return "MODIFY_TIS"; case MLX5_CMD_OP_DESTROY_TIS: return "DESTROY_TIS"; case MLX5_CMD_OP_QUERY_TIS: return "QUERY_TIS"; case MLX5_CMD_OP_CREATE_FLOW_TABLE: return "CREATE_FLOW_TABLE"; case MLX5_CMD_OP_DESTROY_FLOW_TABLE: return "DESTROY_FLOW_TABLE"; case MLX5_CMD_OP_QUERY_FLOW_TABLE: return "QUERY_FLOW_TABLE"; case MLX5_CMD_OP_CREATE_FLOW_GROUP: return "CREATE_FLOW_GROUP"; case MLX5_CMD_OP_DESTROY_FLOW_GROUP: return "DESTROY_FLOW_GROUP"; case MLX5_CMD_OP_QUERY_FLOW_GROUP: return "QUERY_FLOW_GROUP"; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: return "SET_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: return "QUERY_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: return "DELETE_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_SET_DIAGNOSTICS: return "MLX5_CMD_OP_SET_DIAGNOSTICS"; case MLX5_CMD_OP_QUERY_DIAGNOSTICS: return "MLX5_CMD_OP_QUERY_DIAGNOSTICS"; default: return "unknown command opcode"; } } static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; - struct mlx5_cmd_mailbox *next = msg->next; + size_t i; int data_only; - u32 offset = 0; + int offset = 0; + int msg_len = input ? ent->uin_size : ent->uout_size; int dump_len; data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); if (data_only) mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, "dump command data %s(0x%x) %s\n", mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); else mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); if (data_only) { if (input) { dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); offset += sizeof(ent->lay->in); } else { dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); offset += sizeof(ent->lay->out); } } else { dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); offset += sizeof(*ent->lay); } - while (next && offset < msg->len) { + for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { + struct mlx5_cmd_prot_block *block; + + block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); + if (data_only) { - dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); - dump_buf(next->buf, dump_len, 1, offset); + if (offset >= msg_len) + break; + dump_len = min_t(int, + MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); + + dump_buf(block->data, dump_len, 1, offset); offset += MLX5_CMD_DATA_BLOCK_SIZE; } else { mlx5_core_dbg(dev, "command block:\n"); - dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); - offset += sizeof(struct mlx5_cmd_prot_block); + dump_buf(block, sizeof(*block), 0, offset); + offset += sizeof(*block); } - next = next->next; + + /* check for last block */ + if (block->next == 0) + break; } if (data_only) pr_debug("\n"); } static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode, struct mlx5_outbox_hdr *hdr) { hdr->status = 0; hdr->syndrome = 0; switch (opcode) { case MLX5_CMD_OP_TEARDOWN_HCA: case MLX5_CMD_OP_DISABLE_HCA: case MLX5_CMD_OP_MANAGE_PAGES: case MLX5_CMD_OP_DESTROY_MKEY: case MLX5_CMD_OP_DESTROY_EQ: case MLX5_CMD_OP_DESTROY_CQ: case MLX5_CMD_OP_DESTROY_QP: case MLX5_CMD_OP_DESTROY_PSV: case MLX5_CMD_OP_DESTROY_SRQ: case MLX5_CMD_OP_DESTROY_XRC_SRQ: case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_UAR: case MLX5_CMD_OP_DETACH_FROM_MCG: case MLX5_CMD_OP_DEALLOC_XRCD: case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: case MLX5_CMD_OP_DESTROY_LAG: case MLX5_CMD_OP_DESTROY_VPORT_LAG: case MLX5_CMD_OP_DESTROY_TIR: case MLX5_CMD_OP_DESTROY_SQ: case MLX5_CMD_OP_DESTROY_RQ: case MLX5_CMD_OP_DESTROY_RMP: case MLX5_CMD_OP_DESTROY_TIS: case MLX5_CMD_OP_DESTROY_RQT: case MLX5_CMD_OP_DESTROY_FLOW_TABLE: case MLX5_CMD_OP_DESTROY_FLOW_GROUP: case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: case MLX5_CMD_OP_2ERR_QP: case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: case MLX5_CMD_OP_MODIFY_VPORT_STATE: case MLX5_CMD_OP_MODIFY_SQ: case MLX5_CMD_OP_MODIFY_RQ: case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_MODIFY_LAG: case MLX5_CMD_OP_MODIFY_TIR: case MLX5_CMD_OP_MODIFY_RMP: case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_MODIFY_CONG_PARAMS: case MLX5_CMD_OP_MODIFY_CONG_STATUS: case MLX5_CMD_OP_MODIFY_CQ: case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP: case MLX5_CMD_OP_ACCESS_REG: case MLX5_CMD_OP_DRAIN_DCT: return 0; case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_ALLOC_XRCD: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_RQ: case MLX5_CMD_OP_ARM_XRC_SRQ: case MLX5_CMD_OP_ATTACH_TO_MCG: case MLX5_CMD_OP_CONFIG_INT_MODERATION: case MLX5_CMD_OP_CREATE_CQ: case MLX5_CMD_OP_CREATE_DCT: case MLX5_CMD_OP_CREATE_EQ: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_CREATE_LAG: case MLX5_CMD_OP_CREATE_MKEY: case MLX5_CMD_OP_CREATE_PSV: case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: case MLX5_CMD_OP_CREATE_QP: case MLX5_CMD_OP_CREATE_RMP: case MLX5_CMD_OP_CREATE_RQ: case MLX5_CMD_OP_CREATE_RQT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_SQ: case MLX5_CMD_OP_CREATE_SRQ: case MLX5_CMD_OP_CREATE_TIR: case MLX5_CMD_OP_CREATE_TIS: case MLX5_CMD_OP_CREATE_VPORT_LAG: case MLX5_CMD_OP_CREATE_XRC_SRQ: case MLX5_CMD_OP_ENABLE_HCA: case MLX5_CMD_OP_GEN_EQE: case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_INIT_HCA: case MLX5_CMD_OP_MAD_IFC: case MLX5_CMD_OP_NOP: case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ADAPTER: case MLX5_CMD_OP_QUERY_CONG_PARAMS: case MLX5_CMD_OP_QUERY_CONG_STATISTICS: case MLX5_CMD_OP_QUERY_CONG_STATUS: case MLX5_CMD_OP_QUERY_CQ: case MLX5_CMD_OP_QUERY_DCT: case MLX5_CMD_OP_QUERY_EQ: case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: case MLX5_CMD_OP_QUERY_ISSI: case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_LAG: case MLX5_CMD_OP_QUERY_MAD_DEMUX: case MLX5_CMD_OP_QUERY_MKEY: case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: case MLX5_CMD_OP_QUERY_PAGES: case MLX5_CMD_OP_QUERY_QP: case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_RMP: case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: case MLX5_CMD_OP_QUERY_RQ: case MLX5_CMD_OP_QUERY_RQT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: case MLX5_CMD_OP_QUERY_SQ: case MLX5_CMD_OP_QUERY_SRQ: case MLX5_CMD_OP_QUERY_TIR: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_QUERY_VPORT_STATE: case MLX5_CMD_OP_QUERY_XRC_SRQ: case MLX5_CMD_OP_RST2INIT_QP: case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SET_DC_CNAK_TRACE: case MLX5_CMD_OP_SET_HCA_CAP: case MLX5_CMD_OP_SET_ISSI: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_SET_MAD_DEMUX: case MLX5_CMD_OP_SET_ROCE_ADDRESS: case MLX5_CMD_OP_SQD_RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: hdr->status = MLX5_CMD_STAT_INT_ERR; hdr->syndrome = 0xFFFFFFFF; return -ECANCELED; default: mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode); return -EINVAL; } } static void complete_command(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); mlx5_cmd_cbk_t callback; void *context; s64 ds; struct mlx5_cmd_stats *stats; unsigned long flags; int err; struct semaphore *sem; if (ent->page_queue) sem = &cmd->pages_sem; else sem = &cmd->sem; if (dev->state != MLX5_DEVICE_STATE_UP) { struct mlx5_outbox_hdr *out_hdr = (struct mlx5_outbox_hdr *)ent->out; struct mlx5_inbox_hdr *in_hdr = (struct mlx5_inbox_hdr *)(ent->in->first.data); u16 opcode = be16_to_cpu(in_hdr->opcode); ent->ret = set_internal_err_outbox(dev, opcode, out_hdr); } if (ent->callback) { ds = ent->ts2 - ent->ts1; if (ent->op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[ent->op]; spin_lock_irqsave(&stats->lock, flags); stats->sum += ds; ++stats->n; spin_unlock_irqrestore(&stats->lock, flags); } callback = ent->callback; context = ent->context; err = ent->ret; if (!err) err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); free_cmd(ent); callback(err, context); } else { complete(&ent->done); } up(sem); } static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); struct mlx5_cmd_layout *lay; struct semaphore *sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; if (cmd->moving_to_polling) { mlx5_core_warn(dev, "not expecting command execution, ignoring...\n"); return; } down(sem); if (alloc_ent(ent) < 0) { complete_command(ent); return; } ent->token = alloc_token(cmd); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); ent->op = be32_to_cpu(lay->in[0]) >> 16; - if (ent->in->next) - lay->in_ptr = cpu_to_be64(ent->in->next->dma); - lay->inlen = cpu_to_be32(ent->in->len); - if (ent->out->next) - lay->out_ptr = cpu_to_be64(ent->out->next->dma); - lay->outlen = cpu_to_be32(ent->out->len); + if (ent->in->numpages != 0) + lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); + if (ent->out->numpages != 0) + lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); + lay->inlen = cpu_to_be32(ent->uin_size); + lay->outlen = cpu_to_be32(ent->uout_size); lay->type = MLX5_PCI_CMD_XPORT; lay->token = ent->token; lay->status_own = CMD_OWNER_HW; set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); ent->busy = 0; /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); - wmb(); + /* make sure data is written to RAM */ + mlx5_fwp_flush(cmd->cmd_page); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point*/ if (cmd->mode == CMD_MODE_POLLING) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ - rmb(); mlx5_cmd_comp_handler(dev, 1U << ent->idx); } } static const char *deliv_status_to_str(u8 status) { switch (status) { case MLX5_CMD_DELIVERY_STAT_OK: return "no errors"; case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: return "signature error"; case MLX5_CMD_DELIVERY_STAT_TOK_ERR: return "token error"; case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: return "bad block number"; case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: return "output pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: return "input pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_FW_ERR: return "firmware internal error"; case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: return "command input length error"; case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: return "command ouput length error"; case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: return "reserved fields not cleared"; case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: return "bad command descriptor type"; default: return "unknown status code"; } } static u16 msg_to_opcode(struct mlx5_cmd_msg *in) { struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); return be16_to_cpu(hdr->opcode); } static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd *cmd = &dev->cmd; int err; if (cmd->mode == CMD_MODE_POLLING) { wait_for_completion(&ent->done); err = ent->ret; } else { if (!wait_for_completion_timeout(&ent->done, timeout)) err = -ETIMEDOUT; else err = 0; } if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); return err; } /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + int uin_size, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_stats *stats; int err = 0; s64 ds; u16 op; if (callback && page_queue) return -EINVAL; - ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, - page_queue); + ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, + context, page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); if (!callback) init_completion(&ent->done); INIT_WORK(&ent->work, cmd_work_handler); if (page_queue) { cmd_work_handler(&ent->work); } else if (!queue_work(cmd->wq, &ent->work)) { mlx5_core_warn(dev, "failed to queue work\n"); err = -ENOMEM; goto out_free; } if (!callback) { err = wait_func(dev, ent); if (err == -ETIMEDOUT) goto out; ds = ent->ts2 - ent->ts1; op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; spin_unlock_irq(&stats->lock); } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", mlx5_command_str(op), (long long)ds); *status = ent->status; free_cmd(ent); } return err; out_free: free_cmd(ent); out: return err; } -static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) { - struct mlx5_cmd_prot_block *block; - struct mlx5_cmd_mailbox *next; - int copy; + size_t delta; + size_t i; - if (!to || !from) - return -ENOMEM; + if (to == NULL || from == NULL) + return (-ENOMEM); - copy = min_t(int, size, sizeof(to->first.data)); - memcpy(to->first.data, from, copy); - size -= copy; - from += copy; + delta = min_t(size_t, size, sizeof(to->first.data)); + memcpy(to->first.data, from, delta); + from = (char *)from + delta; + size -= delta; - next = to->next; - while (size) { - if (!next) { - /* this is a BUG */ - return -ENOMEM; - } + for (i = 0; size != 0; i++) { + struct mlx5_cmd_prot_block *block; - copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); - block = next->buf; - memcpy(block->data, from, copy); - from += copy; - size -= copy; - next = next->next; - } + block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); - return 0; + delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); + memcpy(block->data, from, delta); + from = (char *)from + delta; + size -= delta; + } + return (0); } static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) { - struct mlx5_cmd_prot_block *block; - struct mlx5_cmd_mailbox *next; - int copy; + size_t delta; + size_t i; - if (!to || !from) - return -ENOMEM; + if (to == NULL || from == NULL) + return (-ENOMEM); - copy = min_t(int, size, sizeof(from->first.data)); - memcpy(to, from->first.data, copy); - size -= copy; - to += copy; + delta = min_t(size_t, size, sizeof(from->first.data)); + memcpy(to, from->first.data, delta); + to = (char *)to + delta; + size -= delta; - next = from->next; - while (size) { - if (!next) { - /* this is a BUG */ - return -ENOMEM; - } + for (i = 0; size != 0; i++) { + struct mlx5_cmd_prot_block *block; - copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); - block = next->buf; + block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); - memcpy(to, block->data, copy); - to += copy; - size -= copy; - next = next->next; + delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); + memcpy(to, block->data, delta); + to = (char *)to + delta; + size -= delta; } - - return 0; + return (0); } -static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, - gfp_t flags) +static struct mlx5_cmd_msg * +mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) { - struct mlx5_cmd_mailbox *mailbox; + struct mlx5_cmd_msg *msg; + size_t blen; + size_t n; + size_t i; - mailbox = kmalloc(sizeof(*mailbox), flags); - if (!mailbox) - return ERR_PTR(-ENOMEM); + blen = size - min_t(size_t, sizeof(msg->first.data), size); + n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); - mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, - &mailbox->dma); - if (!mailbox->buf) { - mlx5_core_dbg(dev, "failed allocation\n"); - kfree(mailbox); - return ERR_PTR(-ENOMEM); - } - memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); - mailbox->next = NULL; + msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); + if (msg == NULL) + return (ERR_PTR(-ENOMEM)); - return mailbox; -} + for (i = 0; i != n; i++) { + struct mlx5_cmd_prot_block *block; -static void free_cmd_box(struct mlx5_core_dev *dev, - struct mlx5_cmd_mailbox *mailbox) -{ - pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); - kfree(mailbox); -} + block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); -static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, - gfp_t flags, int size) -{ - struct mlx5_cmd_mailbox *tmp, *head = NULL; - struct mlx5_cmd_prot_block *block; - struct mlx5_cmd_msg *msg; - int blen; - int err; - int n; - int i; + memset(block, 0, MLX5_CMD_MBOX_SIZE); - msg = kzalloc(sizeof(*msg), flags); - if (!msg) - return ERR_PTR(-ENOMEM); - - blen = size - min_t(int, sizeof(msg->first.data), size); - n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; - - for (i = 0; i < n; i++) { - tmp = alloc_cmd_box(dev, flags); - if (IS_ERR(tmp)) { - mlx5_core_warn(dev, "failed allocating block\n"); - err = PTR_ERR(tmp); - goto err_alloc; + if (i != (n - 1)) { + u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); + block->next = cpu_to_be64(dma); } - - block = tmp->buf; - tmp->next = head; - block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); - block->block_num = cpu_to_be32(n - i - 1); - head = tmp; + block->block_num = cpu_to_be32(i); } - msg->next = head; - msg->len = size; - return msg; -err_alloc: - while (head) { - tmp = head->next; - free_cmd_box(dev, head); - head = tmp; - } - kfree(msg); + /* make sure initial data is written to RAM */ + mlx5_fwp_flush(msg); - return ERR_PTR(err); + return (msg); } -static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, - struct mlx5_cmd_msg *msg) +static void +mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { - struct mlx5_cmd_mailbox *head = msg->next; - struct mlx5_cmd_mailbox *next; - while (head) { - next = head->next; - free_cmd_box(dev, head); - head = next; - } - kfree(msg); + mlx5_fwp_free(msg); } static void set_wqname(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", dev_name(&dev->pdev->dev)); } static void clean_debug_files(struct mlx5_core_dev *dev) { } void mlx5_cmd_use_events(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; int i; for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); down(&cmd->pages_sem); flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_EVENTS; up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; synchronize_irq(dev->priv.eq_table.pages_eq.irqn); flush_workqueue(dev->priv.pg_wq); cmd->moving_to_polling = 1; flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_POLLING; cmd->moving_to_polling = 0; } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { unsigned long flags; if (msg->cache) { spin_lock_irqsave(&msg->cache->lock, flags); list_add_tail(&msg->list, &msg->cache->head); spin_unlock_irqrestore(&msg->cache->lock, flags); } else { mlx5_free_cmd_msg(dev, msg); } } void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; int i; + /* make sure data gets read from RAM */ + mlx5_fwp_invalidate(cmd->cmd_page); + while (vector != 0) { i = ffs(vector) - 1; vector &= ~(1U << i); ent = cmd->ent_arr[i]; ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); + /* make sure data gets read from RAM */ + mlx5_fwp_invalidate(ent->out); dump_command(dev, ent, 0); if (!ent->ret) { if (!cmd->checksum_disabled) ent->ret = verify_signature(ent); else ent->ret = 0; ent->status = ent->lay->status_own >> 1; mlx5_core_dbg(dev, "FW command ret 0x%x, status %s(0x%x)\n", ent->ret, deliv_status_to_str(ent->status), ent->status); } free_ent(cmd, ent->idx); complete_command(ent); } } EXPORT_SYMBOL(mlx5_cmd_comp_handler); void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long vector; int i = 0; unsigned long flags; synchronize_irq(dev->priv.eq_table.cmd_eq.irqn); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); if (!vector) return; for (i = 0; i < (1 << dev->cmd.log_sz); i++) { struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; if (!test_bit(i, &vector)) continue; while (ent->busy) usleep_range(1000, 1100); free_ent(&dev->cmd, i); complete_command(ent); } } EXPORT_SYMBOL(mlx5_trigger_cmd_completions); static int status_to_err(u8 status) { return status ? -1 : 0; /* TBD more meaningful codes */ } static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd *cmd = &dev->cmd; struct cache_ent *ent = NULL; if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) ent = &cmd->cache.large; else if (in_size > 16 && in_size <= MED_LIST_SIZE) ent = &cmd->cache.med; if (ent) { spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { msg = list_entry(ent->head.next, struct mlx5_cmd_msg, list); - /* For cached lists, we must explicitly state what is - * the real size - */ - msg->len = in_size; list_del(&msg->list); } spin_unlock_irq(&ent->lock); } if (IS_ERR(msg)) msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); return msg; } static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; } static int cmd_exec_helper(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; int pages_queue; gfp_t gfp; int err; u8 status = 0; pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); return err; } err = mlx5_copy_to_msg(inb, in, in_size); if (err) { mlx5_core_warn(dev, "err %d\n", err); goto out_in; } outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } - err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, - pages_queue, &status); + err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, + context, pages_queue, &status); if (err) { if (err == -ETIMEDOUT) return err; goto out_out; } mlx5_core_dbg(dev, "err %d, status %d\n", err, status); if (status) { err = status_to_err(status); goto out_out; } if (callback) return err; err = mlx5_copy_from_msg(out, outb, out_size); out_out: mlx5_free_cmd_msg(dev, outb); out_in: free_msg(dev, inb); return err; } int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); } EXPORT_SYMBOL(mlx5_cmd_exec); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); } EXPORT_SYMBOL(mlx5_cmd_exec_cb); static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *n; list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { list_del(&msg->list); mlx5_free_cmd_msg(dev, msg); } list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { list_del(&msg->list); mlx5_free_cmd_msg(dev, msg); } } static int create_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_msg *msg; int err; int i; spin_lock_init(&cmd->cache.large.lock); INIT_LIST_HEAD(&cmd->cache.large.head); spin_lock_init(&cmd->cache.med.lock); INIT_LIST_HEAD(&cmd->cache.med.head); for (i = 0; i < NUM_LONG_LISTS; i++) { msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; } msg->cache = &cmd->cache.large; list_add_tail(&msg->list, &cmd->cache.large.head); } for (i = 0; i < NUM_MED_LISTS; i++) { msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; } msg->cache = &cmd->cache.med; list_add_tail(&msg->list, &cmd->cache.med.head); } return 0; ex_err: destroy_msg_cache(dev); return err; } -static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +static int +alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { - struct device *ddev = &dev->pdev->dev; - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, - &cmd->alloc_dma, GFP_KERNEL); - if (!cmd->cmd_alloc_buf) - return -ENOMEM; + int err; - /* make sure it is aligned to 4K */ - if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { - cmd->cmd_buf = cmd->cmd_alloc_buf; - cmd->dma = cmd->alloc_dma; - cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; - return 0; + sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); + mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); + cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); + + /* + * Create global DMA descriptor tag for allocating + * 4K firmware pages: + */ + err = -bus_dma_tag_create( + bus_get_dma_tag(dev->pdev->dev.bsddev), + MLX5_ADAPTER_PAGE_SIZE, /* alignment */ + 0, /* no boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ + 1, /* nsegments */ + MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &cmd->dma_tag); + if (err != 0) + goto failure_destroy_sx; + + cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); + if (cmd->cmd_page == NULL) { + err = -ENOMEM; + goto failure_alloc_page; } + cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); + cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); + return (0); - dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, cmd->alloc_dma); - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 2 * MLX5_ADAPTER_PAGE_SIZE - 1, - &cmd->alloc_dma, GFP_KERNEL); - if (!cmd->cmd_alloc_buf) - return -ENOMEM; +failure_alloc_page: + bus_dma_tag_destroy(cmd->dma_tag); - cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); - cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); - cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; - return 0; +failure_destroy_sx: + cv_destroy(&cmd->dma_cv); + mtx_destroy(&cmd->dma_mtx); + sx_destroy(&cmd->dma_sx); + return (err); } -static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +static void +free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { - struct device *ddev = &dev->pdev->dev; - dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma); + + mlx5_fwp_free(cmd->cmd_page); + bus_dma_tag_destroy(cmd->dma_tag); + cv_destroy(&cmd->dma_cv); + mtx_destroy(&cmd->dma_mtx); + sx_destroy(&cmd->dma_sx); } int mlx5_cmd_init(struct mlx5_core_dev *dev) { - int size = sizeof(struct mlx5_cmd_prot_block); - int align = roundup_pow_of_two(size); struct mlx5_cmd *cmd = &dev->cmd; u32 cmd_h, cmd_l; u16 cmd_if_rev; int err; int i; cmd_if_rev = cmdif_rev_get(dev); if (cmd_if_rev != CMD_IF_REV) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); return -EINVAL; } - cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); - if (!cmd->pool) - return -ENOMEM; - err = alloc_cmd_page(dev, cmd); if (err) goto err_free_pool; cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; cmd->log_sz = cmd_l >> 4 & 0xf; cmd->log_stride = cmd_l & 0xf; if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); err = -EINVAL; goto err_free_page; } if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); err = -EINVAL; goto err_free_page; } cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); err = -ENOTSUPP; goto err_free_page; } spin_lock_init(&cmd->alloc_lock); spin_lock_init(&cmd->token_lock); for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) spin_lock_init(&cmd->stats[i].lock); sema_init(&cmd->sem, cmd->max_reg_cmds); sema_init(&cmd->pages_sem, 1); cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); if (cmd_l & 0xfff) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); err = -ENOMEM; goto err_free_page; } iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); /* Make sure firmware sees the complete address before we proceed */ wmb(); mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); cmd->mode = CMD_MODE_POLLING; err = create_msg_cache(dev); if (err) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); goto err_free_page; } set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); if (!cmd->wq) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); err = -ENOMEM; goto err_cache; } return 0; err_cache: destroy_msg_cache(dev); err_free_page: free_cmd_page(dev, cmd); err_free_pool: - pci_pool_destroy(cmd->pool); - return err; } EXPORT_SYMBOL(mlx5_cmd_init); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; clean_debug_files(dev); destroy_workqueue(cmd->wq); destroy_msg_cache(dev); free_cmd_page(dev, cmd); - pci_pool_destroy(cmd->pool); } EXPORT_SYMBOL(mlx5_cmd_cleanup); static const char *cmd_status_str(u8 status) { switch (status) { case MLX5_CMD_STAT_OK: return "OK"; case MLX5_CMD_STAT_INT_ERR: return "internal error"; case MLX5_CMD_STAT_BAD_OP_ERR: return "bad operation"; case MLX5_CMD_STAT_BAD_PARAM_ERR: return "bad parameter"; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return "bad system state"; case MLX5_CMD_STAT_BAD_RES_ERR: return "bad resource"; case MLX5_CMD_STAT_RES_BUSY: return "resource busy"; case MLX5_CMD_STAT_LIM_ERR: return "limits exceeded"; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return "bad resource state"; case MLX5_CMD_STAT_IX_ERR: return "bad index"; case MLX5_CMD_STAT_NO_RES_ERR: return "no resources"; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return "bad input length"; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return "bad output length"; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return "bad QP state"; case MLX5_CMD_STAT_BAD_PKT_ERR: return "bad packet (discarded)"; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return "bad size too many outstanding CQEs"; default: return "unknown status"; } } static int cmd_status_to_err_helper(u8 status) { switch (status) { case MLX5_CMD_STAT_OK: return 0; case MLX5_CMD_STAT_INT_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; default: return -EIO; } } /* this will be available till all the commands use set/get macros */ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) { if (!hdr->status) return 0; printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); return cmd_status_to_err_helper(hdr->status); } int mlx5_cmd_status_to_err_v2(void *ptr) { u32 syndrome; u8 status; status = be32_to_cpu(*(__be32 *)ptr) >> 24; if (!status) return 0; syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); return cmd_status_to_err_helper(status); } Index: head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c (revision 312881) +++ head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c (revision 312882) @@ -1,528 +1,622 @@ /*- - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "mlx5_core.h" +CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE); + struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; s32 npages; struct work_struct work; }; -struct mlx5_fw_page { - struct rb_node rb_node; - u64 addr; - struct page *page; - u16 func_id; - unsigned long bitmask; - struct list_head list; - unsigned free_count; -}; struct mlx5_manage_pages_inbox { struct mlx5_inbox_hdr hdr; __be16 rsvd; __be16 func_id; __be32 num_entries; __be64 pas[0]; }; struct mlx5_manage_pages_outbox { struct mlx5_outbox_hdr hdr; __be32 num_entries; u8 rsvd[4]; __be64 pas[0]; }; enum { MAX_RECLAIM_TIME_MSECS = 5000, }; -enum { - MLX5_MAX_RECLAIM_TIME_MILI = 5000, - MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, -}; +static void +mlx5_fwp_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct mlx5_fw_page *fwp; + uint8_t owned; -static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) + fwp = (struct mlx5_fw_page *)arg; + owned = MLX5_DMA_OWNED(fwp->dev); + + if (!owned) + MLX5_DMA_LOCK(fwp->dev); + + if (error == 0) { + KASSERT(nseg == 1, ("Number of segments is different from 1")); + fwp->dma_addr = segs->ds_addr; + fwp->load_done = MLX5_LOAD_ST_SUCCESS; + } else { + fwp->load_done = MLX5_LOAD_ST_FAILURE; + } + MLX5_DMA_DONE(fwp->dev); + + if (!owned) + MLX5_DMA_UNLOCK(fwp->dev); +} + +void +mlx5_fwp_flush(struct mlx5_fw_page *fwp) { + unsigned num = fwp->numpages; + + while (num--) + bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREWRITE); +} + +void +mlx5_fwp_invalidate(struct mlx5_fw_page *fwp) +{ + unsigned num = fwp->numpages; + + while (num--) { + bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_POSTREAD); + bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREREAD); + } +} + +struct mlx5_fw_page * +mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num) +{ + struct mlx5_fw_page *fwp; + unsigned x; + int err; + + /* check for special case */ + if (num == 0) { + fwp = kzalloc(sizeof(*fwp), flags); + if (fwp != NULL) + fwp->dev = dev; + return (fwp); + } + + /* we need sleeping context for this function */ + if (flags & M_NOWAIT) + return (NULL); + + fwp = kzalloc(sizeof(*fwp) * num, flags); + + /* serialize loading the DMA map(s) */ + sx_xlock(&dev->cmd.dma_sx); + + for (x = 0; x != num; x++) { + /* store pointer to MLX5 core device */ + fwp[x].dev = dev; + /* store number of pages left from the array */ + fwp[x].numpages = num - x; + + /* allocate memory */ + err = bus_dmamem_alloc(dev->cmd.dma_tag, &fwp[x].virt_addr, + BUS_DMA_WAITOK | BUS_DMA_COHERENT, &fwp[x].dma_map); + if (err != 0) + goto failure; + + /* load memory into DMA */ + MLX5_DMA_LOCK(dev); + err = bus_dmamap_load( + dev->cmd.dma_tag, fwp[x].dma_map, fwp[x].virt_addr, + MLX5_ADAPTER_PAGE_SIZE, &mlx5_fwp_load_mem_cb, + fwp + x, BUS_DMA_WAITOK | BUS_DMA_COHERENT); + + while (fwp[x].load_done == MLX5_LOAD_ST_NONE) + MLX5_DMA_WAIT(dev); + MLX5_DMA_UNLOCK(dev); + + /* check for error */ + if (fwp[x].load_done != MLX5_LOAD_ST_SUCCESS) { + bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, + fwp[x].dma_map); + goto failure; + } + } + sx_xunlock(&dev->cmd.dma_sx); + return (fwp); + +failure: + while (x--) { + bus_dmamap_unload(dev->cmd.dma_tag, fwp[x].dma_map); + bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map); + } + sx_xunlock(&dev->cmd.dma_sx); + return (NULL); +} + +void +mlx5_fwp_free(struct mlx5_fw_page *fwp) +{ + struct mlx5_core_dev *dev; + unsigned num; + + /* be NULL safe */ + if (fwp == NULL) + return; + + /* check for special case */ + if (fwp->numpages == 0) { + kfree(fwp); + return; + } + + num = fwp->numpages; + dev = fwp->dev; + + /* serialize unloading the DMA maps */ + sx_xlock(&dev->cmd.dma_sx); + while (num--) { + bus_dmamap_unload(dev->cmd.dma_tag, fwp[num].dma_map); + bus_dmamem_free(dev->cmd.dma_tag, fwp[num].virt_addr, fwp[num].dma_map); + } + sx_xunlock(&dev->cmd.dma_sx); + + kfree(fwp); +} + +u64 +mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset) +{ + size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE); + KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset)); + + return ((fwp + index)->dma_addr + (offset % MLX5_ADAPTER_PAGE_SIZE)); +} + +void * +mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset) +{ + size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE); + KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset)); + + return ((char *)(fwp + index)->virt_addr + (offset % MLX5_ADAPTER_PAGE_SIZE)); +} + +static int +mlx5_insert_fw_page_locked(struct mlx5_core_dev *dev, struct mlx5_fw_page *nfp) +{ struct rb_root *root = &dev->priv.page_root; struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; - struct mlx5_fw_page *nfp; struct mlx5_fw_page *tfp; - int i; while (*new) { parent = *new; tfp = rb_entry(parent, struct mlx5_fw_page, rb_node); - if (tfp->addr < addr) + if (tfp->dma_addr < nfp->dma_addr) new = &parent->rb_left; - else if (tfp->addr > addr) + else if (tfp->dma_addr > nfp->dma_addr) new = &parent->rb_right; else - return -EEXIST; + return (-EEXIST); } - nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); - - nfp->addr = addr; - nfp->page = page; - nfp->func_id = func_id; - nfp->free_count = MLX5_NUM_4K_IN_PAGE; - for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) - set_bit(i, &nfp->bitmask); - rb_link_node(&nfp->rb_node, parent, new); rb_insert_color(&nfp->rb_node, root); - list_add(&nfp->list, &dev->priv.free_list); - - return 0; + return (0); } -static struct mlx5_fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +static struct mlx5_fw_page * +mlx5_remove_fw_page_locked(struct mlx5_core_dev *dev, bus_addr_t addr) { struct rb_root *root = &dev->priv.page_root; struct rb_node *tmp = root->rb_node; struct mlx5_fw_page *result = NULL; struct mlx5_fw_page *tfp; while (tmp) { tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node); - if (tfp->addr < addr) { + if (tfp->dma_addr < addr) { tmp = tmp->rb_left; - } else if (tfp->addr > addr) { + } else if (tfp->dma_addr > addr) { tmp = tmp->rb_right; } else { + rb_erase(&tfp->rb_node, &dev->priv.page_root); result = tfp; break; } } + return (result); +} - return result; +static int +alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) +{ + struct mlx5_fw_page *fwp; + int err; + + fwp = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); + if (fwp == NULL) + return (-ENOMEM); + + fwp->func_id = func_id; + + MLX5_DMA_LOCK(dev); + err = mlx5_insert_fw_page_locked(dev, fwp); + MLX5_DMA_UNLOCK(dev); + + if (err != 0) { + mlx5_fwp_free(fwp); + } else { + /* make sure cached data is cleaned */ + mlx5_fwp_invalidate(fwp); + + /* store DMA address */ + *addr = fwp->dma_addr; + } + return (err); } +static void +free_4k(struct mlx5_core_dev *dev, u64 addr) +{ + struct mlx5_fw_page *fwp; + + MLX5_DMA_LOCK(dev); + fwp = mlx5_remove_fw_page_locked(dev, addr); + MLX5_DMA_UNLOCK(dev); + + if (fwp == NULL) { + mlx5_core_warn(dev, "Cannot free 4K page at 0x%llx\n", (long long)addr); + return; + } + mlx5_fwp_free(fwp); +} + static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { u32 in[MLX5_ST_SZ_DW(query_pages_in)]; u32 out[MLX5_ST_SZ_DW(query_pages_out)]; int err; memset(in, 0, sizeof(in)); MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); MLX5_SET(query_pages_in, in, op_mod, boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES); memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; *npages = MLX5_GET(query_pages_out, out, num_pages); *func_id = MLX5_GET(query_pages_out, out, function_id); return 0; } -static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) -{ - struct mlx5_fw_page *fp; - unsigned n; - - if (list_empty(&dev->priv.free_list)) - return -ENOMEM; - - fp = list_entry(dev->priv.free_list.next, struct mlx5_fw_page, list); - n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); - if (n >= MLX5_NUM_4K_IN_PAGE) { - mlx5_core_warn(dev, "alloc 4k bug\n"); - return -ENOENT; - } - clear_bit(n, &fp->bitmask); - fp->free_count--; - if (!fp->free_count) - list_del(&fp->list); - - *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; - - return 0; -} - -static void free_4k(struct mlx5_core_dev *dev, u64 addr) -{ - struct mlx5_fw_page *fwp; - int n; - - fwp = find_fw_page(dev, addr & PAGE_MASK); - if (!fwp) { - mlx5_core_warn(dev, "page not found\n"); - return; - } - - n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; - fwp->free_count++; - set_bit(n, &fwp->bitmask); - if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { - rb_erase(&fwp->rb_node, &dev->priv.page_root); - if (fwp->free_count != 1) - list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE, - DMA_BIDIRECTIONAL); - __free_page(fwp->page); - kfree(fwp); - } else if (fwp->free_count == 1) { - list_add(&fwp->list, &dev->priv.free_list); - } -} - -static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) -{ - struct page *page; - u64 addr; - int err; - - page = alloc_page(GFP_HIGHUSER); - if (!page) { - mlx5_core_warn(dev, "failed to allocate page\n"); - return -ENOMEM; - } - addr = dma_map_page(&dev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, addr)) { - mlx5_core_warn(dev, "failed dma mapping page\n"); - err = -ENOMEM; - goto out_alloc; - } - err = insert_page(dev, addr, page, func_id); - if (err) { - mlx5_core_err(dev, "failed to track allocated page\n"); - goto out_mapping; - } - - return 0; - -out_mapping: - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - -out_alloc: - __free_page(page); - return err; -} static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { struct mlx5_manage_pages_inbox *in; struct mlx5_manage_pages_outbox out; struct mlx5_manage_pages_inbox *nin; int inlen; u64 addr; int err; int i = 0; inlen = sizeof(*in) + npages * sizeof(in->pas[0]); in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); err = -ENOMEM; goto out_alloc; } memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { -retry: - err = alloc_4k(dev, &addr); - if (err) { - if (err == -ENOMEM) - err = alloc_system_page(dev, func_id); - if (err) - goto out_alloc; - - goto retry; - } + err = alloc_4k(dev, &addr, func_id); + if (err) + goto out_alloc; in->pas[i] = cpu_to_be64(addr); } in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->func_id = cpu_to_be16(func_id); in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_alloc; } dev->priv.fw_pages += npages; dev->priv.pages_per_func[func_id] += npages; if (out.hdr.status) { err = mlx5_cmd_status_to_err(&out.hdr); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); goto out_alloc; } } mlx5_core_dbg(dev, "err %d\n", err); goto out_free; out_alloc: if (notify_fail) { nin = kzalloc(sizeof(*nin), GFP_KERNEL); + if (!nin) + goto out_4k; + memset(&out, 0, sizeof(out)); nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); nin->func_id = cpu_to_be16(func_id); if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) mlx5_core_warn(dev, "page notify failed\n"); kfree(nin); } + +out_4k: for (i--; i >= 0; i--) free_4k(dev, be64_to_cpu(in->pas[i])); out_free: kvfree(in); return err; } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) { struct mlx5_manage_pages_inbox in; struct mlx5_manage_pages_outbox *out; int num_claimed; int outlen; u64 addr; int err; int i; if (nclaimed) *nclaimed = 0; memset(&in, 0, sizeof(in)); outlen = sizeof(*out) + npages * sizeof(out->pas[0]); out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.func_id = cpu_to_be16(func_id); in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages\n"); goto out_free; } if (out->hdr.status) { err = mlx5_cmd_status_to_err(&out->hdr); goto out_free; } num_claimed = be32_to_cpu(out->num_entries); if (nclaimed) *nclaimed = num_claimed; dev->priv.fw_pages -= num_claimed; dev->priv.pages_per_func[func_id] -= num_claimed; for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); free_4k(dev, addr); } out_free: kvfree(out); return err; } static void pages_work_handler(struct work_struct *work) { struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); struct mlx5_core_dev *dev = req->dev; int err = 0; if (req->npages < 0) err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); else if (req->npages > 0) err = give_pages(dev, req->func_id, req->npages, 1); if (err) mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? "reclaim" : "give", err); kfree(req); } void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages) { struct mlx5_pages_req *req; req = kzalloc(sizeof(*req), GFP_ATOMIC); if (!req) { mlx5_core_warn(dev, "failed to allocate pages request\n"); return; } req->dev = dev; req->func_id = func_id; req->npages = npages; INIT_WORK(&req->work, pages_work_handler); if (!queue_work(dev->priv.pg_wq, &req->work)) mlx5_core_warn(dev, "failed to queue pages handler work\n"); } int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { u16 uninitialized_var(func_id); s32 uninitialized_var(npages); int err; err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err; mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); return give_pages(dev, func_id, npages, 0); } enum { MLX5_BLKS_FOR_RECLAIM_PAGES = 12 }; s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev) { int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); s64 prevpages = 0; s64 npages = 0; while (!time_after(jiffies, end)) { /* exclude own function, VFs only */ npages = dev->priv.fw_pages - dev->priv.pages_per_func[0]; if (!npages) break; if (npages != prevpages) end = end + msecs_to_jiffies(100); prevpages = npages; msleep(1); } if (npages) mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n"); return -npages; } static int optimal_reclaimed_pages(void) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_layout *lay; int ret; ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - sizeof(struct mlx5_manage_pages_outbox)) / FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); return ret; } int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) { int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); struct mlx5_fw_page *fwp; struct rb_node *p; int nclaimed = 0; int err; do { p = rb_first(&dev->priv.page_root); if (p) { fwp = rb_entry(p, struct mlx5_fw_page, rb_node); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { --dev->priv.fw_pages; - free_4k(dev, fwp->addr); + free_4k(dev, fwp->dma_addr); nclaimed = 1; } else { err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), &nclaimed); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); return err; } } if (nclaimed) end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); } if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } } while (p); return 0; } void mlx5_pagealloc_init(struct mlx5_core_dev *dev) { + dev->priv.page_root = RB_ROOT; - INIT_LIST_HEAD(&dev->priv.free_list); } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { /* nothing */ } int mlx5_pagealloc_start(struct mlx5_core_dev *dev) { dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; return 0; } void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { destroy_workqueue(dev->priv.pg_wq); }