Index: head/sys/dev/mlx5/driver.h =================================================================== --- head/sys/dev/mlx5/driver.h (revision 341559) +++ head/sys/dev/mlx5/driver.h (revision 341560) @@ -1,1137 +1,1137 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MLX5_DRIVER_H #define MLX5_DRIVER_H #include "opt_ratelimit.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #define MLX5_QCOUNTER_SETS_NETDEV 64 #define MLX5_MAX_NUMBER_OF_VFS 128 enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, }; enum { MLX5_CMD_TIMEOUT_MSEC = 8 * 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; enum { CMD_OWNER_SW = 0x0, CMD_OWNER_HW = 0x1, CMD_STATUS_SUCCESS = 0, }; enum mlx5_sqp_t { MLX5_SQP_SMI = 0, MLX5_SQP_GSI = 1, MLX5_SQP_IEEE_1588 = 2, MLX5_SQP_SNIFFER = 3, MLX5_SQP_SYNC_UMR = 4, }; enum { MLX5_MAX_PORTS = 2, }; enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, MLX5_EQ_VEC_COMP_BASE, }; enum { MLX5_MAX_IRQ_NAME = 32 }; enum { MLX5_ATOMIC_MODE_OFF = 16, MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF, }; enum { MLX5_ATOMIC_MODE_DCT_OFF = 20, MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF, }; enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2, MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3, }; enum { MLX5_REG_QPTS = 0x4002, MLX5_REG_QETCR = 0x4005, MLX5_REG_QPDP = 0x4007, MLX5_REG_QTCT = 0x400A, MLX5_REG_QPDPM = 0x4013, MLX5_REG_QHLL = 0x4016, MLX5_REG_QCAM = 0x4019, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PPTB = 0x500B, MLX5_REG_PBMC = 0x500C, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PMLP = 0x5002, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MPCNT = 0x9051, }; enum dbg_rsc_type { MLX5_DBG_RSC_QP, MLX5_DBG_RSC_EQ, MLX5_DBG_RSC_CQ, }; enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, MLX5_INTERFACE_NUMBER = 2, }; struct mlx5_field_desc { struct dentry *dent; int i; }; struct mlx5_rsc_debug { struct mlx5_core_dev *dev; void *object; enum dbg_rsc_type type; struct dentry *root; struct mlx5_field_desc fields[0]; }; enum mlx5_dev_event { MLX5_DEV_EVENT_SYS_ERROR, MLX5_DEV_EVENT_PORT_UP, MLX5_DEV_EVENT_PORT_DOWN, MLX5_DEV_EVENT_PORT_INITIALIZED, MLX5_DEV_EVENT_LID_CHANGE, MLX5_DEV_EVENT_PKEY_CHANGE, MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, MLX5_DEV_EVENT_VPORT_CHANGE, MLX5_DEV_EVENT_ERROR_STATE_DCBX, MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE, MLX5_DEV_EVENT_LOCAL_OPER_CHANGE, MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE, }; enum mlx5_port_status { MLX5_PORT_UP = 1 << 0, MLX5_PORT_DOWN = 1 << 1, }; enum mlx5_link_mode { MLX5_1000BASE_CX_SGMII = 0, MLX5_1000BASE_KX = 1, MLX5_10GBASE_CX4 = 2, MLX5_10GBASE_KX4 = 3, MLX5_10GBASE_KR = 4, MLX5_20GBASE_KR2 = 5, MLX5_40GBASE_CR4 = 6, MLX5_40GBASE_KR4 = 7, MLX5_56GBASE_R4 = 8, MLX5_10GBASE_CR = 12, MLX5_10GBASE_SR = 13, MLX5_10GBASE_ER = 14, MLX5_40GBASE_SR4 = 15, MLX5_40GBASE_LR4 = 16, MLX5_100GBASE_CR4 = 20, MLX5_100GBASE_SR4 = 21, MLX5_100GBASE_KR4 = 22, MLX5_100GBASE_LR4 = 23, MLX5_100BASE_TX = 24, MLX5_1000BASE_T = 25, MLX5_10GBASE_T = 26, MLX5_25GBASE_CR = 27, MLX5_25GBASE_KR = 28, MLX5_25GBASE_SR = 29, MLX5_50GBASE_CR2 = 30, MLX5_50GBASE_KR2 = 31, MLX5_LINK_MODES_NUMBER, }; enum { MLX5_VSC_SPACE_SUPPORTED = 0x1, MLX5_VSC_SPACE_OFFSET = 0x4, MLX5_VSC_COUNTER_OFFSET = 0x8, MLX5_VSC_SEMA_OFFSET = 0xC, MLX5_VSC_ADDR_OFFSET = 0x10, MLX5_VSC_DATA_OFFSET = 0x14, MLX5_VSC_MAX_RETRIES = 0x1000, }; #define MLX5_PROT_MASK(link_mode) (1 << link_mode) struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; int num_low_latency_uuars; unsigned long *bitmap; unsigned int *count; struct mlx5_bf *bfs; /* * protect uuar allocation data structs */ struct mutex lock; u32 ver; }; struct mlx5_bf { void __iomem *reg; void __iomem *regreg; int buf_size; struct mlx5_uar *uar; unsigned long offset; int need_lock; /* protect blue flame buffer selection when needed */ spinlock_t lock; /* serialize 64 bit writes when done as two 32 bit accesses */ spinlock_t lock32; int uuarn; }; struct mlx5_cmd_first { __be32 data[4]; }; struct cache_ent; struct mlx5_fw_page { union { struct rb_node rb_node; struct list_head list; }; struct mlx5_cmd_first first; struct mlx5_core_dev *dev; bus_dmamap_t dma_map; bus_addr_t dma_addr; void *virt_addr; struct cache_ent *cache; u32 numpages; u16 load_done; #define MLX5_LOAD_ST_NONE 0 #define MLX5_LOAD_ST_SUCCESS 1 #define MLX5_LOAD_ST_FAILURE 2 u16 func_id; }; #define mlx5_cmd_msg mlx5_fw_page struct mlx5_cmd_debug { struct dentry *dbg_root; struct dentry *dbg_in; struct dentry *dbg_out; struct dentry *dbg_outlen; struct dentry *dbg_status; struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; u16 inlen; u16 outlen; }; struct cache_ent { /* protect block chain allocations */ spinlock_t lock; struct list_head head; }; struct cmd_msg_cache { struct cache_ent large; struct cache_ent med; }; struct mlx5_traffic_counter { u64 packets; u64 octets; }; struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; struct dentry *avg; struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { struct mlx5_fw_page *cmd_page; bus_dma_tag_t dma_tag; struct sx dma_sx; struct mtx dma_mtx; #define MLX5_DMA_OWNED(dev) mtx_owned(&(dev)->cmd.dma_mtx) #define MLX5_DMA_LOCK(dev) mtx_lock(&(dev)->cmd.dma_mtx) #define MLX5_DMA_UNLOCK(dev) mtx_unlock(&(dev)->cmd.dma_mtx) struct cv dma_cv; #define MLX5_DMA_DONE(dev) cv_broadcast(&(dev)->cmd.dma_cv) #define MLX5_DMA_WAIT(dev) cv_wait(&(dev)->cmd.dma_cv, &(dev)->cmd.dma_mtx) void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; u8 log_sz; u8 log_stride; int max_reg_cmds; int events; u32 __iomem *vector; /* protect command queue allocations */ spinlock_t alloc_lock; /* protect token allocations */ spinlock_t token_lock; u8 token; unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; struct semaphore sem; struct semaphore pages_sem; int mode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; }; struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; }; struct mlx5_buf { bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; struct mlx5_core_dev *dev; struct { void *buf; } direct; u64 *page_list; int npages; int size; u8 page_shift; u8 load_done; }; struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; u32 cons_index; struct mlx5_buf buf; int size; u8 irqn; u8 eqn; int nent; u64 mask; struct list_head list; int index; struct mlx5_rsc_debug *dbg; }; struct mlx5_core_psv { u32 psv_idx; struct psv_layout { u32 pd; u16 syndrome; u16 reserved; u16 bg; u16 app_tag; u32 ref_tag; } psv; }; struct mlx5_core_sig_ctx { struct mlx5_core_psv psv_memory; struct mlx5_core_psv psv_wire; #if (__FreeBSD_version >= 1100000) struct ib_sig_err err_item; #endif bool sig_status_checked; bool sig_err_exists; u32 sigerr_count; }; struct mlx5_core_mr { u64 iova; u64 size; u32 key; u32 pd; }; enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_DCT = 5, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; atomic_t refcount; struct completion free; }; struct mlx5_core_srq { struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; size_t max_gs; size_t max_avail_gather; int wqe_shift; void (*event)(struct mlx5_core_srq *, int); atomic_t refcount; struct completion free; }; struct mlx5_eq_table { void __iomem *update_ci; void __iomem *update_arm_ci; struct list_head comp_eqs_list; struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; int num_comp_vectors; /* protect EQs list */ spinlock_t lock; }; struct mlx5_uar { u32 index; void __iomem *bf_map; void __iomem *map; }; struct mlx5_core_health { struct mlx5_health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; u32 prev; int miss_counter; u32 fatal_error; /* wq spinlock to synchronize draining */ spinlock_t wq_lock; struct workqueue_struct *wq; unsigned long flags; struct work_struct work; struct delayed_work recover_work; }; #ifdef RATELIMIT #define MLX5_CQ_LINEAR_ARRAY_SIZE (128 * 1024) #else #define MLX5_CQ_LINEAR_ARRAY_SIZE 1024 #endif struct mlx5_cq_linear_array_entry { spinlock_t lock; struct mlx5_core_cq * volatile cq; }; struct mlx5_cq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE]; }; struct mlx5_qp_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_srq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_mr_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_irq_info { char name[MLX5_MAX_IRQ_NAME]; }; #ifdef RATELIMIT struct mlx5_rl_entry { u32 rate; u16 burst; u16 index; u32 refcount; }; struct mlx5_rl_table { struct mutex rl_lock; u16 max_size; u32 max_rate; u32 min_rate; struct mlx5_rl_entry *rl_entry; }; #endif struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); struct io_mapping *bf_mapping; /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; s64 fw_pages; atomic_t reg_pages; s64 pages_per_func[MLX5_MAX_NUMBER_OF_VFS]; struct mlx5_core_health health; struct mlx5_srq_table srq_table; /* start: qp staff */ struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; /* end: qp staff */ /* start: cq staff */ struct mlx5_cq_table cq_table; /* end: cq staff */ /* start: mr staff */ struct mlx5_mr_table mr_table; /* end: mr staff */ /* start: alloc staff */ int numa_node; struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ struct dentry *dbg_root; /* protect mkey key part */ spinlock_t mkey_lock; u8 mkey_key; struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; unsigned long pci_dev_data; #ifdef RATELIMIT struct mlx5_rl_table rl_table; #endif }; enum mlx5_device_state { MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; enum mlx5_interface_state { MLX5_INTERFACE_STATE_DOWN = BIT(0), MLX5_INTERFACE_STATE_UP = BIT(1), MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), }; enum mlx5_pci_status { MLX5_PCI_STATUS_DISABLED, MLX5_PCI_STATUS_ENABLED, }; struct mlx5_special_contexts { int resd_lkey; }; struct mlx5_flow_root_namespace; struct mlx5_dump_data; struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; enum mlx5_pci_status pci_status; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; struct { u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; } caps; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; unsigned long intf_state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; u32 vsc_addr; u32 issi; struct mlx5_special_contexts special_contexts; unsigned int module_status[MLX5_MAX_PORTS]; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *esw_egress_root_ns; struct mlx5_flow_root_namespace *esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER]; struct mlx5_dump_data *dump_data; struct sysctl_ctx_list sysctl_ctx; int msix_eqvec; }; enum { MLX5_WOL_DISABLE = 0, MLX5_WOL_SECURED_MAGIC = 1 << 1, MLX5_WOL_MAGIC = 1 << 2, MLX5_WOL_ARP = 1 << 3, MLX5_WOL_BROADCAST = 1 << 4, MLX5_WOL_MULTICAST = 1 << 5, MLX5_WOL_UNICAST = 1 << 6, MLX5_WOL_PHY_ACTIVITY = 1 << 7, }; struct mlx5_db { __be32 *db; union { struct mlx5_db_pgdir *pgdir; struct mlx5_ib_user_db_page *user_page; } u; dma_addr_t dma; int index; }; struct mlx5_net_counters { u64 packets; u64 octets; }; struct mlx5_ptys_reg { u8 an_dis_admin; u8 an_dis_ap; u8 local_port; u8 proto_mask; u32 eth_proto_cap; u16 ib_link_width_cap; u16 ib_proto_cap; u32 eth_proto_admin; u16 ib_link_width_admin; u16 ib_proto_admin; u32 eth_proto_oper; u16 ib_link_width_oper; u16 ib_proto_oper; u32 eth_proto_lp_advertise; }; struct mlx5_pvlc_reg { u8 local_port; u8 vl_hw_cap; u8 vl_admin; u8 vl_operational; }; struct mlx5_pmtu_reg { u8 local_port; u16 max_mtu; u16 admin_mtu; u16 oper_mtu; }; struct mlx5_vport_counters { struct mlx5_net_counters received_errors; struct mlx5_net_counters transmit_errors; struct mlx5_net_counters received_ib_unicast; struct mlx5_net_counters transmitted_ib_unicast; struct mlx5_net_counters received_ib_multicast; struct mlx5_net_counters transmitted_ib_multicast; struct mlx5_net_counters received_eth_broadcast; struct mlx5_net_counters transmitted_eth_broadcast; struct mlx5_net_counters received_eth_unicast; struct mlx5_net_counters transmitted_eth_unicast; struct mlx5_net_counters received_eth_multicast; struct mlx5_net_counters transmitted_eth_multicast; }; enum { MLX5_DB_PER_PAGE = MLX5_ADAPTER_PAGE_SIZE / L1_CACHE_BYTES, }; struct mlx5_core_dct { struct mlx5_core_rsc_common common; /* must be first */ void (*event)(struct mlx5_core_dct *, int); int dctn; struct completion drained; struct mlx5_rsc_debug *dbg; int pid; }; enum { MLX5_COMP_EQ_SIZE = 1024, }; enum { MLX5_PTYS_IB = 1 << 0, MLX5_PTYS_EN = 1 << 2, }; struct mlx5_db_pgdir { struct list_head list; DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); struct mlx5_fw_page *fw_page; __be32 *db_page; dma_addr_t db_dma; }; typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; int uin_size; void *uout; int uout_size; mlx5_cmd_cbk_t callback; struct delayed_work cb_timeout_work; void *context; int idx; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; struct mlx5_cmd_layout *lay; int ret; int page_queue; u8 status; u8 token; u64 ts1; u64 ts2; u16 op; u8 busy; bool polling; }; struct mlx5_pas { u64 pa; u8 log_sz; }; enum port_state_policy { MLX5_POLICY_DOWN = 0, MLX5_POLICY_UP = 1, MLX5_POLICY_FOLLOW = 2, MLX5_POLICY_INVALID = 0xffffffff }; static inline void * mlx5_buf_offset(struct mlx5_buf *buf, int offset) { return ((char *)buf->direct.buf + offset); } extern struct workqueue_struct *mlx5_core_wq; #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) { return pci_get_drvdata(pdev); } extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) & 0xffff; } static inline u16 fw_rev_min(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) >> 16; } static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n"); return 0; } return 8 * (1 << param); } static inline void *mlx5_vzalloc(unsigned long size) { void *rtn; rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); return rtn; } static inline void *mlx5_vmalloc(unsigned long size) { void *rtn; rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!rtn) rtn = vmalloc(size); return rtn; } static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; } int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context); int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); -void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); #define mlx5_buf_alloc_node(dev, size, direct, buf, node) \ mlx5_buf_alloc(dev, size, direct, buf) int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); void mlx5_init_mr_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey, u32 *in, int inlen, u32 *out, int outlen, mlx5_cmd_cbk_t callback, void *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, u32 *in, int inlen); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey, u32 *out, int outlen); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); void mlx5_fwp_flush(struct mlx5_fw_page *fwp); void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp); struct mlx5_fw_page *mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num); void mlx5_fwp_free(struct mlx5_fw_page *fwp); u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset); void *mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset); void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, u64 addr); int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u32 *out, int outlen); int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev); int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode); int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout); int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout); int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode); int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, struct mlx5_pvlc_reg *pvlc, int write); int mlx5_core_access_ptys(struct mlx5_core_dev *dev, struct mlx5_ptys_reg *ptys, int write); int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, struct mlx5_pmtu_reg *pmtu, int write); int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port); int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port); int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int *is_enable); int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int enable); int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, void *out, int out_size); int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, void *out, int out_size); int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, u8 num_of_samples, u16 sample_index, void *out, int out_size); int mlx5_vsc_find_cap(struct mlx5_core_dev *mdev); int mlx5_vsc_lock(struct mlx5_core_dev *mdev); void mlx5_vsc_unlock(struct mlx5_core_dev *mdev); int mlx5_vsc_set_space(struct mlx5_core_dev *mdev, u16 space); int mlx5_vsc_write(struct mlx5_core_dev *mdev, u32 addr, const u32 *data); int mlx5_vsc_read(struct mlx5_core_dev *mdev, u32 addr, u32 *data); int mlx5_vsc_lock_addr_space(struct mlx5_core_dev *mdev, u32 addr); int mlx5_vsc_unlock_addr_space(struct mlx5_core_dev *mdev, u32 addr); static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; } static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) { return mkey_idx << 8; } static inline u8 mlx5_mkey_variant(u32 mkey) { return mkey & 0xff; } enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, }; enum { MAX_MR_CACHE_ENTRIES = 15, }; struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); void * (*get_dev)(void *context); int protocol; struct list_head list; }; void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); struct mlx5_profile { u64 mask; u8 log_max_qp; struct { int size; int limit; } mr_cache[MAX_MR_CACHE_ENTRIES]; }; enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) { return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } #ifdef RATELIMIT int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst, u16 *index); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst); bool mlx5_rl_is_in_range(const struct mlx5_core_dev *dev, u32 rate, u32 burst); static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) { return !!(dev->priv.rl_table.max_size); } #endif #endif /* MLX5_DRIVER_H */ Index: head/sys/dev/mlx5/mlx5_core/mlx5_health.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision 341559) +++ head/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision 341560) @@ -1,581 +1,589 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include "mlx5_core.h" #define MLX5_HEALTH_POLL_INTERVAL (2 * HZ) #define MAX_MISSES 3 enum { MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_DISABLED = 1, MLX5_NIC_IFC_NO_DRAM_NIC = 2, MLX5_NIC_IFC_SW_RESET = 7, }; enum { MLX5_DROP_NEW_HEALTH_WORK, MLX5_DROP_NEW_RECOVERY_WORK, }; enum { MLX5_SENSOR_NO_ERR = 0, MLX5_SENSOR_PCI_COMM_ERR = 1, MLX5_SENSOR_PCI_ERR = 2, MLX5_SENSOR_NIC_DISABLED = 3, MLX5_SENSOR_NIC_SW_RESET = 4, MLX5_SENSOR_FW_SYND_RFR = 5, }; static int lock_sem_sw_reset(struct mlx5_core_dev *dev) { int ret; /* Lock GW access */ ret = -mlx5_vsc_lock(dev); if (ret) { mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret); return ret; } ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET); if (ret) { if (ret == -EBUSY) mlx5_core_dbg(dev, "SW reset FW semaphore already locked, another function will handle the reset\n"); else mlx5_core_warn(dev, "SW reset semaphore lock return %d\n", ret); } /* Unlock GW access */ mlx5_vsc_unlock(dev); return ret; } static int unlock_sem_sw_reset(struct mlx5_core_dev *dev) { int ret; /* Lock GW access */ ret = -mlx5_vsc_lock(dev); if (ret) { mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret); return ret; } ret = -mlx5_vsc_unlock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET); /* Unlock GW access */ mlx5_vsc_unlock(dev); return ret; } static u8 get_nic_mode(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7; } static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct mlx5_health_buffer __iomem *h = health->health; u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET; u8 synd = ioread8(&h->synd); if (rfr && synd) mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd); return rfr && synd; } static void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long flags; u64 vector; /* wait for pending handlers to complete */ synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); if (!vector) goto no_trig; vector |= MLX5_TRIGGERED_CMD_COMP; spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector); mlx5_cmd_comp_handler(dev, vector); return; no_trig: spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); } static bool sensor_pci_no_comm(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct mlx5_health_buffer __iomem *h = health->health; bool err = ioread32be(&h->fw_ver) == 0xffffffff; return err; } static bool sensor_nic_disabled(struct mlx5_core_dev *dev) { return get_nic_mode(dev) == MLX5_NIC_IFC_DISABLED; } static bool sensor_nic_sw_reset(struct mlx5_core_dev *dev) { return get_nic_mode(dev) == MLX5_NIC_IFC_SW_RESET; } static u32 check_fatal_sensors(struct mlx5_core_dev *dev) { if (sensor_pci_no_comm(dev)) return MLX5_SENSOR_PCI_COMM_ERR; if (pci_channel_offline(dev->pdev)) return MLX5_SENSOR_PCI_ERR; if (sensor_nic_disabled(dev)) return MLX5_SENSOR_NIC_DISABLED; if (sensor_nic_sw_reset(dev)) return MLX5_SENSOR_NIC_SW_RESET; if (sensor_fw_synd_rfr(dev)) return MLX5_SENSOR_FW_SYND_RFR; return MLX5_SENSOR_NO_ERR; } static void reset_fw_if_needed(struct mlx5_core_dev *dev) { bool supported = (ioread32be(&dev->iseg->initializing) >> MLX5_FW_RESET_SUPPORTED_OFFSET) & 1; u32 cmdq_addr, fatal_error; if (!supported) return; /* The reset only needs to be issued by one PF. The health buffer is * shared between all functions, and will be cleared during a reset. * Check again to avoid a redundant 2nd reset. If the fatal erros was * PCI related a reset won't help. */ fatal_error = check_fatal_sensors(dev); if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR || fatal_error == MLX5_SENSOR_NIC_DISABLED || fatal_error == MLX5_SENSOR_NIC_SW_RESET) { mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.\n"); return; } mlx5_core_warn(dev, "Issuing FW Reset\n"); /* Write the NIC interface field to initiate the reset, the command * interface address also resides here, don't overwrite it. */ cmdq_addr = ioread32be(&dev->iseg->cmdq_addr_l_sz); iowrite32be((cmdq_addr & 0xFFFFF000) | MLX5_NIC_IFC_SW_RESET << MLX5_NIC_IFC_OFFSET, &dev->iseg->cmdq_addr_l_sz); } #define MLX5_CRDUMP_WAIT_MS 60000 #define MLX5_FW_RESET_WAIT_MS 1000 #define MLX5_NIC_STATE_POLL_MS 5 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { unsigned long end, delay_ms = MLX5_CRDUMP_WAIT_MS; u32 fatal_error; int lock = -EBUSY; fatal_error = check_fatal_sensors(dev); if (fatal_error || force) { if (xchg(&dev->state, MLX5_DEVICE_STATE_INTERNAL_ERROR) == MLX5_DEVICE_STATE_INTERNAL_ERROR) return; if (!force) mlx5_core_err(dev, "internal state error detected\n"); mlx5_trigger_cmd_completions(dev); } mutex_lock(&dev->intf_state_mutex); if (force) goto err_state_done; if (fatal_error == MLX5_SENSOR_FW_SYND_RFR) { /* Get cr-dump and reset FW semaphore */ if (mlx5_core_is_pf(dev)) lock = lock_sem_sw_reset(dev); /* Execute cr-dump and SW reset */ if (lock != -EBUSY) { mlx5_fwdump(dev); reset_fw_if_needed(dev); delay_ms = MLX5_FW_RESET_WAIT_MS; } } /* Recover from SW reset */ end = jiffies + msecs_to_jiffies(delay_ms); do { if (sensor_nic_disabled(dev)) break; msleep(MLX5_NIC_STATE_POLL_MS); } while (!time_after(jiffies, end)); if (!sensor_nic_disabled(dev)) { dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", get_nic_mode(dev), delay_ms); } /* Release FW semaphore if you are the lock owner */ if (!lock) unlock_sem_sw_reset(dev); mlx5_core_err(dev, "system error event triggered\n"); err_state_done: mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { u8 nic_mode = get_nic_mode(dev); if (nic_mode == MLX5_NIC_IFC_SW_RESET) { /* The IFC mode field is 3 bits, so it will read 0x7 in two cases: * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded * and this is a VF), this is not recoverable by SW reset. * Logging of this is handled elsewhere. * 2. FW reset has been issued by another function, driver can * be reloaded to recover after the mode switches to * MLX5_NIC_IFC_DISABLED. */ if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR) mlx5_core_warn(dev, "NIC SW reset is already progress\n"); else mlx5_core_warn(dev, "Communication with FW over the PCI link is down\n"); } else { mlx5_core_warn(dev, "NIC mode %d\n", nic_mode); } mlx5_disable_device(dev); } #define MLX5_FW_RESET_WAIT_MS 1000 #define MLX5_NIC_STATE_POLL_MS 5 static void health_recover(struct work_struct *work) { unsigned long end = jiffies + msecs_to_jiffies(MLX5_FW_RESET_WAIT_MS); struct mlx5_core_health *health; struct delayed_work *dwork; struct mlx5_core_dev *dev; struct mlx5_priv *priv; bool recover = true; u8 nic_mode; dwork = container_of(work, struct delayed_work, work); health = container_of(dwork, struct mlx5_core_health, recover_work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mtx_lock(&Giant); /* XXX newbus needs this */ if (sensor_pci_no_comm(dev)) { dev_err(&dev->pdev->dev, "health recovery flow aborted, PCI reads still not working\n"); recover = false; } nic_mode = get_nic_mode(dev); while (nic_mode != MLX5_NIC_IFC_DISABLED && !time_after(jiffies, end)) { msleep(MLX5_NIC_STATE_POLL_MS); nic_mode = get_nic_mode(dev); } if (nic_mode != MLX5_NIC_IFC_DISABLED) { dev_err(&dev->pdev->dev, "health recovery flow aborted, unexpected NIC IFC mode %d.\n", nic_mode); recover = false; } if (recover) { dev_err(&dev->pdev->dev, "starting health recovery flow\n"); mlx5_recover_device(dev); } mtx_unlock(&Giant); } /* How much time to wait until health resetting the driver (in msecs) */ #define MLX5_RECOVERY_DELAY_MSECS 60000 #define MLX5_RECOVERY_NO_DELAY 0 static unsigned long get_recovery_delay(struct mlx5_core_dev *dev) { return dev->priv.health.fatal_error == MLX5_SENSOR_PCI_ERR || dev->priv.health.fatal_error == MLX5_SENSOR_PCI_COMM_ERR ? MLX5_RECOVERY_DELAY_MSECS : MLX5_RECOVERY_NO_DELAY; } static void health_care(struct work_struct *work) { struct mlx5_core_health *health; unsigned long recover_delay; struct mlx5_core_dev *dev; struct mlx5_priv *priv; unsigned long flags; health = container_of(work, struct mlx5_core_health, work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); recover_delay = msecs_to_jiffies(get_recovery_delay(dev)); spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) { mlx5_core_warn(dev, "Scheduling recovery work with %lums delay\n", recover_delay); schedule_delayed_work(&health->recover_work, recover_delay); } else { dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); } spin_unlock_irqrestore(&health->wq_lock, flags); } static int get_next_poll_jiffies(void) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; next += jiffies + MLX5_HEALTH_POLL_INTERVAL; return next; } void mlx5_trigger_health_work(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) queue_work(health->wq, &health->work); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static const char *hsynd_str(u8 synd) { switch (synd) { case MLX5_HEALTH_SYNDR_FW_ERR: return "firmware internal error"; case MLX5_HEALTH_SYNDR_IRISC_ERR: return "irisc not responding"; case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR: return "unrecoverable hardware error"; case MLX5_HEALTH_SYNDR_CRC_ERR: return "firmware CRC error"; case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: return "ICM fetch PCI error"; case MLX5_HEALTH_SYNDR_HW_FTL_ERR: return "HW fatal error\n"; case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: return "async EQ buffer overrun"; case MLX5_HEALTH_SYNDR_EQ_ERR: return "EQ error"; case MLX5_HEALTH_SYNDR_EQ_INV: return "Invalid EQ referenced"; case MLX5_HEALTH_SYNDR_FFSER_ERR: return "FFSER error"; case MLX5_HEALTH_SYNDR_HIGH_TEMP: return "High temprature"; default: return "unrecognized error"; } } static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct mlx5_health_buffer __iomem *h = health->health; char fw_str[18]; u32 fw; int i; /* If the syndrom is 0, the device is OK and no need to print buffer */ if (!ioread8(&h->synd)) return; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str); printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id)); printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index)); printf("mlx5_core: INFO: ""synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw); } static void poll_health(unsigned long data) { struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; struct mlx5_core_health *health = &dev->priv.health; u32 fatal_error; u32 count; if (dev->state != MLX5_DEVICE_STATE_UP) return; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; else health->miss_counter = 0; health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); print_health_info(dev); } fatal_error = check_fatal_sensors(dev); if (fatal_error && !health->fatal_error) { mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); dev->priv.health.fatal_error = fatal_error; print_health_info(dev); mlx5_trigger_health_work(dev); } out: mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; init_timer(&health->timer); health->fatal_error = MLX5_SENSOR_NO_ERR; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; setup_timer(&health->timer, poll_health, (unsigned long)dev); mod_timer(&health->timer, round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL)); } -void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; + + if (disable_health) { + spin_lock_irqsave(&health->wq_lock, flags); + set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); + spin_unlock_irqrestore(&health->wq_lock, flags); + } del_timer_sync(&health->timer); } void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&dev->priv.health.recover_work); } void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; destroy_workqueue(health->wq); } #define HEALTH_NAME "mlx5_health" int mlx5_health_init(struct mlx5_core_dev *dev) { struct mlx5_core_health *health; char *name; int len; health = &dev->priv.health; len = strlen(HEALTH_NAME) + strlen(dev_name(&dev->pdev->dev)); name = kmalloc(len + 1, GFP_KERNEL); if (!name) return -ENOMEM; snprintf(name, len, "%s:%s", HEALTH_NAME, dev_name(&dev->pdev->dev)); health->wq = create_singlethread_workqueue(name); kfree(name); if (!health->wq) return -ENOMEM; spin_lock_init(&health->wq_lock); INIT_WORK(&health->work, health_care); INIT_DELAYED_WORK(&health->recover_work, health_recover); return 0; } Index: head/sys/dev/mlx5/mlx5_core/mlx5_main.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_main.c (revision 341559) +++ head/sys/dev/mlx5/mlx5_core/mlx5_main.c (revision 341560) @@ -1,1520 +1,1526 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #define LINUXKPI_PARAM_PREFIX mlx5_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mlx5_core.h" #include "fs_core.h" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); #if (__FreeBSD_version >= 1100000) MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); #endif MODULE_VERSION(mlx5, 1); int mlx5_core_debug_mask; module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); #define MLX5_DEFAULT_PROF 2 static int prof_sel = MLX5_DEFAULT_PROF; module_param_named(prof_sel, prof_sel, int, 0444); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 HW controls"); #define NUMA_NO_NODE -1 static LIST_HEAD(intf_list); static LIST_HEAD(dev_list); static DEFINE_MUTEX(intf_mutex); struct mlx5_device_context { struct list_head list; struct mlx5_interface *intf; void *context; }; enum { MLX5_ATOMIC_REQ_MODE_BE = 0x0, MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, }; static struct mlx5_profile profiles[] = { [0] = { .mask = 0, }, [1] = { .mask = MLX5_PROF_MASK_QP_SIZE, .log_max_qp = 12, }, [2] = { .mask = MLX5_PROF_MASK_QP_SIZE | MLX5_PROF_MASK_MR_CACHE, .log_max_qp = 17, .mr_cache[0] = { .size = 500, .limit = 250 }, .mr_cache[1] = { .size = 500, .limit = 250 }, .mr_cache[2] = { .size = 500, .limit = 250 }, .mr_cache[3] = { .size = 500, .limit = 250 }, .mr_cache[4] = { .size = 500, .limit = 250 }, .mr_cache[5] = { .size = 500, .limit = 250 }, .mr_cache[6] = { .size = 500, .limit = 250 }, .mr_cache[7] = { .size = 500, .limit = 250 }, .mr_cache[8] = { .size = 500, .limit = 250 }, .mr_cache[9] = { .size = 500, .limit = 250 }, .mr_cache[10] = { .size = 500, .limit = 250 }, .mr_cache[11] = { .size = 500, .limit = 250 }, .mr_cache[12] = { .size = 64, .limit = 32 }, .mr_cache[13] = { .size = 32, .limit = 16 }, .mr_cache[14] = { .size = 16, .limit = 8 }, }, [3] = { .mask = MLX5_PROF_MASK_QP_SIZE, .log_max_qp = 17, }, }; static int set_dma_caps(struct pci_dev *pdev) { int err; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n"); return err; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n"); return err; } } dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); return err; } static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) { struct pci_dev *pdev = dev->pdev; int err = 0; mutex_lock(&dev->pci_status_mutex); if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { err = pci_enable_device(pdev); if (!err) dev->pci_status = MLX5_PCI_STATUS_ENABLED; } mutex_unlock(&dev->pci_status_mutex); return err; } static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) { struct pci_dev *pdev = dev->pdev; mutex_lock(&dev->pci_status_mutex); if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { pci_disable_device(pdev); dev->pci_status = MLX5_PCI_STATUS_DISABLED; } mutex_unlock(&dev->pci_status_mutex); } static int request_bar(struct pci_dev *pdev) { int err = 0; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n"); return -ENODEV; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n"); return err; } static void release_bar(struct pci_dev *pdev) { pci_release_regions(pdev); } static int mlx5_enable_msix(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int limit = dev->msix_eqvec; int nvec = MLX5_EQ_VEC_COMP_BASE; int i; if (limit > 0) nvec += limit; else nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); nvec = min_t(int, nvec, num_eqs); if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL); for (i = 0; i < nvec; i++) priv->msix_arr[i].entry = i; nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; return 0; } static void mlx5_disable_msix(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; pci_disable_msix(dev->pdev); kfree(priv->irq_info); kfree(priv->msix_arr); } struct mlx5_reg_host_endianess { u8 he; u8 rsvd[15]; }; #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) enum { MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | MLX5_DEV_CAP_FLAG_DCT | MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, }; static u16 to_fw_pkey_sz(u32 size) { switch (size) { case 128: return 0; case 256: return 1; case 512: return 2; case 1024: return 3; case 2048: return 4; case 4096: return 5; default: printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size); return 0; } } static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); void *out, *hca_caps; u16 opmod = (cap_type << 1) | (cap_mode & 0x01); int err; memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) { mlx5_core_warn(dev, "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", cap_type, cap_mode, err); goto query_ex; } hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: memcpy(dev->hca_caps_max[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: memcpy(dev->hca_caps_cur[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: mlx5_core_warn(dev, "Tried to query dev cap type(%x) with wrong opmode(%x)\n", cap_type, cap_mode); err = -EINVAL; break; } query_ex: kfree(out); return err; } int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) { int ret; ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); if (ret) return ret; return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); } static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) { u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); } static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; struct mlx5_profile *prof = dev->profile; int err = -ENOMEM; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); void *set_hca_cap; set_ctx = kzalloc(set_sz, GFP_KERNEL); err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); if (err) goto query_ex; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 128); /* we limit the size of the pkey table to 128 entries for now */ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, to_fw_pkey_sz(128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, prof->log_max_qp); /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); /* enable drain sigerr */ MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); err = set_caps(dev, set_ctx, set_sz); query_ex: kfree(set_ctx); return err; } static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) { void *set_ctx; void *set_hca_cap; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); int req_endianness; int err; if (MLX5_CAP_GEN(dev, atomic)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); if (err) return err; } else { return 0; } req_endianness = MLX5_CAP_ATOMIC(dev, supported_atomic_req_8B_endianess_mode_1); if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) return 0; set_ctx = kzalloc(set_sz, GFP_KERNEL); if (!set_ctx) return -ENOMEM; MLX5_SET(set_hca_cap_in, set_ctx, op_mod, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); /* Set requestor to host endianness */ MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); err = set_caps(dev, set_ctx, set_sz); kfree(set_ctx); return err; } static int set_hca_ctrl(struct mlx5_core_dev *dev) { struct mlx5_reg_host_endianess he_in; struct mlx5_reg_host_endianess he_out; int err; if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && !MLX5_CAP_GEN(dev, roce)) return 0; memset(&he_in, 0, sizeof(he_in)); he_in.he = MLX5_SET_HOST_ENDIANNESS; err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), &he_out, sizeof(he_out), MLX5_REG_HOST_ENDIANNESS, 0, 1); return err; } static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) { u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); } static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) { u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int mlx5_core_set_issi(struct mlx5_core_dev *dev) { u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; u32 sup_issi; int err; MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); if (err) { u32 syndrome; u8 status; mlx5_cmd_mbox_status(query_out, &status, &syndrome); if (status == MLX5_CMD_STAT_BAD_OP_ERR) { pr_debug("Only ISSI 0 is supported\n"); return 0; } printf("mlx5_core: ERR: ""failed to query ISSI\n"); return err; } sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); if (sup_issi & (1 << 1)) { u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); MLX5_SET(set_issi_in, set_in, current_issi, 1); err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); if (err) { printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err); return err; } dev->issi = 1; return 0; } else if (sup_issi & (1 << 0)) { return 0; } return -ENOTSUPP; } int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) { struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq; int err = -ENOENT; spin_lock(&table->lock); list_for_each_entry(eq, &table->comp_eqs_list, list) { if (eq->index == vector) { *eqn = eq->eqn; *irqn = eq->irqn; err = 0; break; } } spin_unlock(&table->lock); return err; } EXPORT_SYMBOL(mlx5_vector2eqn); int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; struct mlx5_eq *eq; int err = -ENOENT; spin_lock(&table->lock); list_for_each_entry(eq, &table->comp_eqs_list, list) { if (eq->index == eq_ix) { int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE; snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME, "%s-%d", name, eq_ix); err = 0; break; } } spin_unlock(&table->lock); return err; } static void free_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq, *n; spin_lock(&table->lock); list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_del(&eq->list); spin_unlock(&table->lock); if (mlx5_destroy_unmap_eq(dev, eq)) mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); kfree(eq); spin_lock(&table->lock); } spin_unlock(&table->lock); } static int alloc_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; char name[MLX5_MAX_IRQ_NAME]; struct mlx5_eq *eq; int ncomp_vec; int nent; int err; int i; INIT_LIST_HEAD(&table->comp_eqs_list); ncomp_vec = table->num_comp_vectors; nent = MLX5_COMP_EQ_SIZE; for (i = 0; i < ncomp_vec; i++) { eq = kzalloc(sizeof(*eq), GFP_KERNEL); snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, name, &dev->priv.uuari.uars[0]); if (err) { kfree(eq); goto clean; } mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); eq->index = i; spin_lock(&table->lock); list_add_tail(&eq->list, &table->comp_eqs_list); spin_unlock(&table->lock); } return 0; clean: free_comp_eqs(dev); return err; } static int map_bf_area(struct mlx5_core_dev *dev) { resource_size_t bf_start = pci_resource_start(dev->pdev, 0); resource_size_t bf_len = pci_resource_len(dev->pdev, 0); dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); return dev->priv.bf_mapping ? 0 : -ENOMEM; } static void unmap_bf_area(struct mlx5_core_dev *dev) { if (dev->priv.bf_mapping) io_mapping_free(dev->priv.bf_mapping); } static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; } static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) { u64 end = jiffies + msecs_to_jiffies(max_wait_mili); int err = 0; while (fw_initializing(dev)) { if (time_after(jiffies, end)) { err = -EBUSY; break; } msleep(FW_INIT_WAIT_MS); } return err; } static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); if (!dev_ctx) return; dev_ctx->intf = intf; CURVNET_SET_QUIET(vnet0); dev_ctx->context = intf->add(dev); CURVNET_RESTORE(); if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); } } static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf == intf) { spin_lock_irq(&priv->ctx_lock); list_del(&dev_ctx->list); spin_unlock_irq(&priv->ctx_lock); intf->remove(dev, dev_ctx->context); kfree(dev_ctx); return; } } static int mlx5_register_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; mutex_lock(&intf_mutex); list_add_tail(&priv->dev_list, &dev_list); list_for_each_entry(intf, &intf_list, list) mlx5_add_device(intf, priv); mutex_unlock(&intf_mutex); return 0; } static void mlx5_unregister_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) mlx5_remove_device(intf, priv); list_del(&priv->dev_list); mutex_unlock(&intf_mutex); } int mlx5_register_interface(struct mlx5_interface *intf) { struct mlx5_priv *priv; if (!intf->add || !intf->remove) return -EINVAL; mutex_lock(&intf_mutex); list_add_tail(&intf->list, &intf_list); list_for_each_entry(priv, &dev_list, dev_list) mlx5_add_device(intf, priv); mutex_unlock(&intf_mutex); return 0; } EXPORT_SYMBOL(mlx5_register_interface); void mlx5_unregister_interface(struct mlx5_interface *intf) { struct mlx5_priv *priv; mutex_lock(&intf_mutex); list_for_each_entry(priv, &dev_list, dev_list) mlx5_remove_device(intf, priv); list_del(&intf->list); mutex_unlock(&intf_mutex); } EXPORT_SYMBOL(mlx5_unregister_interface); void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) { struct mlx5_priv *priv = &mdev->priv; struct mlx5_device_context *dev_ctx; unsigned long flags; void *result = NULL; spin_lock_irqsave(&priv->ctx_lock, flags); list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) if ((dev_ctx->intf->protocol == protocol) && dev_ctx->intf->get_dev) { result = dev_ctx->intf->get_dev(dev_ctx->context); break; } spin_unlock_irqrestore(&priv->ctx_lock, flags); return result; } EXPORT_SYMBOL(mlx5_get_protocol_dev); static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; int err = 0; pci_set_drvdata(dev->pdev, dev); strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); priv->name[MLX5_MAX_NAME_LEN - 1] = 0; mutex_init(&priv->pgdir_mutex); INIT_LIST_HEAD(&priv->pgdir_list); spin_lock_init(&priv->mkey_lock); priv->numa_node = NUMA_NO_NODE; err = mlx5_pci_enable_device(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n"); goto err_dbg; } err = request_bar(pdev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n"); goto err_disable; } pci_set_master(pdev); err = set_dma_caps(pdev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n"); goto err_clr_master; } dev->iseg_base = pci_resource_start(dev->pdev, 0); dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); if (!dev->iseg) { err = -ENOMEM; device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n"); goto err_clr_master; } return 0; err_clr_master: pci_clear_master(dev->pdev); release_bar(dev->pdev); err_disable: mlx5_pci_disable_device(dev); err_dbg: return err; } static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { iounmap(dev->iseg); pci_clear_master(dev->pdev); release_bar(dev->pdev); mlx5_pci_disable_device(dev); } static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; int err; err = mlx5_vsc_find_cap(dev); if (err) dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n"); err = mlx5_query_hca_caps(dev); if (err) { dev_err(&pdev->dev, "query hca failed\n"); goto out; } err = mlx5_query_board_id(dev); if (err) { dev_err(&pdev->dev, "query board id failed\n"); goto out; } err = mlx5_eq_init(dev); if (err) { dev_err(&pdev->dev, "failed to initialize eq\n"); goto out; } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); err = mlx5_init_cq_table(dev); if (err) { dev_err(&pdev->dev, "failed to initialize cq table\n"); goto err_eq_cleanup; } mlx5_init_qp_table(dev); mlx5_init_srq_table(dev); mlx5_init_mr_table(dev); #ifdef RATELIMIT err = mlx5_init_rl_table(dev); if (err) { dev_err(&pdev->dev, "Failed to init rate limiting\n"); goto err_tables_cleanup; } #endif return 0; #ifdef RATELIMIT err_tables_cleanup: mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); #endif err_eq_cleanup: mlx5_eq_cleanup(dev); out: return err; } static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { #ifdef RATELIMIT mlx5_cleanup_rl_table(dev); #endif mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_eq_cleanup(dev); } static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, bool boot) { struct pci_dev *pdev = dev->pdev; int err; mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", __func__); goto out; } device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); /* * On load removing any previous indication of internal error, * device is up */ dev->state = MLX5_DEVICE_STATE_UP; err = mlx5_cmd_init(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n"); goto out_err; } err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); if (err) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI); goto err_cmd_cleanup; } err = mlx5_core_enable_hca(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n"); goto err_cmd_cleanup; } err = mlx5_core_set_issi(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n"); goto err_disable_hca; } err = mlx5_pagealloc_start(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n"); goto err_disable_hca; } err = mlx5_satisfy_startup_pages(dev, 1); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n"); goto err_pagealloc_stop; } err = set_hca_ctrl(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n"); goto reclaim_boot_pages; } err = handle_hca_cap(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n"); goto reclaim_boot_pages; } err = handle_hca_cap_atomic(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n"); goto reclaim_boot_pages; } err = mlx5_satisfy_startup_pages(dev, 0); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n"); goto reclaim_boot_pages; } err = mlx5_cmd_init_hca(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n"); goto reclaim_boot_pages; } mlx5_start_health_poll(dev); if (boot && mlx5_init_once(dev, priv)) { dev_err(&pdev->dev, "sw objs init failed\n"); goto err_stop_poll; } err = mlx5_enable_msix(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n"); goto err_cleanup_once; } err = mlx5_alloc_uuars(dev, &priv->uuari); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n"); goto err_disable_msix; } err = mlx5_start_eqs(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n"); goto err_free_uar; } err = alloc_comp_eqs(dev); if (err) { device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n"); goto err_stop_eqs; } if (map_bf_area(dev)) device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n"); err = mlx5_init_fs(dev); if (err) { mlx5_core_err(dev, "flow steering init %d\n", err); goto err_free_comp_eqs; } err = mlx5_register_device(dev); if (err) { dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); goto err_fs; } clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); out: mutex_unlock(&dev->intf_state_mutex); return 0; err_fs: mlx5_cleanup_fs(dev); err_free_comp_eqs: free_comp_eqs(dev); unmap_bf_area(dev); err_stop_eqs: mlx5_stop_eqs(dev); err_free_uar: mlx5_free_uuars(dev, &priv->uuari); err_disable_msix: mlx5_disable_msix(dev); err_cleanup_once: if (boot) mlx5_cleanup_once(dev); err_stop_poll: - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, boot); if (mlx5_cmd_teardown_hca(dev)) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); goto out_err; } reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_pagealloc_stop: mlx5_pagealloc_stop(dev); err_disable_hca: mlx5_core_disable_hca(dev); err_cmd_cleanup: mlx5_cmd_cleanup(dev); out_err: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mutex_unlock(&dev->intf_state_mutex); return err; } static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, bool cleanup) { int err = 0; if (cleanup) mlx5_drain_health_recovery(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); if (cleanup) mlx5_cleanup_once(dev); goto out; } mlx5_unregister_device(dev); mlx5_cleanup_fs(dev); unmap_bf_area(dev); mlx5_wait_for_reclaim_vfs_pages(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, cleanup); err = mlx5_cmd_teardown_hca(dev); if (err) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); goto out; } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev); mlx5_cmd_cleanup(dev); out: clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); mutex_unlock(&dev->intf_state_mutex); return err; } void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param) { struct mlx5_priv *priv = &dev->priv; struct mlx5_device_context *dev_ctx; unsigned long flags; spin_lock_irqsave(&priv->ctx_lock, flags); list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) dev_ctx->intf->event(dev, dev_ctx->context, event, param); spin_unlock_irqrestore(&priv->ctx_lock, flags); } struct mlx5_core_event_handler { void (*event)(struct mlx5_core_dev *dev, enum mlx5_dev_event event, void *data); }; static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; struct mlx5_priv *priv; device_t bsddev = pdev->dev.bsddev; int err; dev = kzalloc(sizeof(*dev), GFP_KERNEL); priv = &dev->priv; if (id) priv->pci_dev_data = id->driver_data; if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) { device_printf(bsddev, "WARN: selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF); prof_sel = MLX5_DEFAULT_PROF; } dev->profile = &profiles[prof_sel]; dev->pdev = pdev; dev->event = mlx5_core_event; sysctl_ctx_init(&dev->sysctl_ctx); SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, "Maximum number of MSIX event queue vectors, if set"); INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); err = mlx5_pci_init(dev, priv); if (err) { device_printf(bsddev, "ERR: mlx5_pci_init failed %d\n", err); goto clean_dev; } err = mlx5_health_init(dev); if (err) { device_printf(bsddev, "ERR: mlx5_health_init failed %d\n", err); goto close_pci; } mlx5_pagealloc_init(dev); err = mlx5_load_one(dev, priv, true); if (err) { device_printf(bsddev, "ERR: mlx5_load_one failed %d\n", err); goto clean_health; } mlx5_fwdump_prep(dev); pci_save_state(bsddev); return 0; clean_health: mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); clean_dev: sysctl_ctx_free(&dev->sysctl_ctx); kfree(dev); return err; } static void remove_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; if (mlx5_unload_one(dev, priv, true)) { dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); mlx5_health_cleanup(dev); return; } mlx5_fwdump_clean(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); pci_set_drvdata(pdev, NULL); sysctl_ctx_free(&dev->sysctl_ctx); kfree(dev); } static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; dev_info(&pdev->dev, "%s was called\n", __func__); mlx5_enter_error_state(dev, false); mlx5_unload_one(dev, priv, false); if (state) { mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); } return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; dev_info(&pdev->dev, "%s was called\n", __func__); err = mlx5_pci_enable_device(dev); if (err) { dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" , __func__, err); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); pci_restore_state(pdev->dev.bsddev); pci_save_state(pdev->dev.bsddev); return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } /* wait for the device to show vital signs. For now we check * that we can read the device ID and that the health buffer * shows a non zero value which is different than 0xffffffff */ static void wait_vital(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_health *health = &dev->priv.health; const int niter = 100; u32 count; u16 did; int i; /* Wait for firmware to be ready after reset */ msleep(1000); for (i = 0; i < niter; i++) { if (pci_read_config_word(pdev, 2, &did)) { dev_warn(&pdev->dev, "failed reading config word\n"); break; } if (did == pdev->device) { dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); break; } msleep(50); } if (i == niter) dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); for (i = 0; i < niter; i++) { count = ioread32be(health->health_counter); if (count && count != 0xffffffff) { dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); break; } msleep(50); } if (i == niter) dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); } static void mlx5_pci_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; int err; dev_info(&pdev->dev, "%s was called\n", __func__); wait_vital(pdev); err = mlx5_load_one(dev, priv, false); if (err) dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" , __func__, err); else dev_info(&pdev->dev, "%s: device recovered\n", __func__); } static const struct pci_error_handlers mlx5_err_handler = { .error_detected = mlx5_pci_err_detected, .slot_reset = mlx5_pci_slot_reset, .resume = mlx5_pci_resume }; static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) { int err; if (!MLX5_CAP_GEN(dev, force_teardown)) { mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); return -EOPNOTSUPP; } if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); return -EAGAIN; } + + /* Panic tear down fw command will stop the PCI bus communication + * with the HCA, so the health polll is no longer needed. + */ + mlx5_drain_health_wq(dev); + mlx5_stop_health_poll(dev, false); err = mlx5_cmd_force_teardown_hca(dev); if (err) { mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); return err; } mlx5_enter_error_state(dev, true); return 0; } static void shutdown_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; int err; set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); err = mlx5_try_fast_unload(dev); if (err) mlx5_unload_one(dev, priv, false); mlx5_pci_disable_device(dev); } static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 4121) }, { PCI_VDEVICE(MELLANOX, 4122) }, { PCI_VDEVICE(MELLANOX, 4123) }, { PCI_VDEVICE(MELLANOX, 4124) }, { PCI_VDEVICE(MELLANOX, 4125) }, { PCI_VDEVICE(MELLANOX, 4126) }, { PCI_VDEVICE(MELLANOX, 4127) }, { PCI_VDEVICE(MELLANOX, 4128) }, { PCI_VDEVICE(MELLANOX, 4129) }, { PCI_VDEVICE(MELLANOX, 4130) }, { PCI_VDEVICE(MELLANOX, 4131) }, { PCI_VDEVICE(MELLANOX, 4132) }, { PCI_VDEVICE(MELLANOX, 4133) }, { PCI_VDEVICE(MELLANOX, 4134) }, { PCI_VDEVICE(MELLANOX, 4135) }, { PCI_VDEVICE(MELLANOX, 4136) }, { PCI_VDEVICE(MELLANOX, 4137) }, { PCI_VDEVICE(MELLANOX, 4138) }, { PCI_VDEVICE(MELLANOX, 4139) }, { PCI_VDEVICE(MELLANOX, 4140) }, { PCI_VDEVICE(MELLANOX, 4141) }, { PCI_VDEVICE(MELLANOX, 4142) }, { PCI_VDEVICE(MELLANOX, 4143) }, { PCI_VDEVICE(MELLANOX, 4144) }, { 0, } }; MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); void mlx5_disable_device(struct mlx5_core_dev *dev) { mlx5_pci_err_detected(dev->pdev, 0); } void mlx5_recover_device(struct mlx5_core_dev *dev) { mlx5_pci_disable_device(dev); if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) mlx5_pci_resume(dev->pdev); } struct pci_driver mlx5_core_driver = { .name = DRIVER_NAME, .id_table = mlx5_core_pci_table, .shutdown = shutdown_one, .probe = init_one, .remove = remove_one, .err_handler = &mlx5_err_handler }; static int __init init(void) { int err; err = pci_register_driver(&mlx5_core_driver); if (err) goto err_debug; err = mlx5_fwdump_init(); if (err) goto err_fwdump; return 0; err_fwdump: pci_unregister_driver(&mlx5_core_driver); err_debug: return err; } static void __exit cleanup(void) { mlx5_fwdump_fini(); pci_unregister_driver(&mlx5_core_driver); } module_init(init); module_exit(cleanup);