Index: head/sys/dev/mlx5/driver.h =================================================================== --- head/sys/dev/mlx5/driver.h (revision 312875) +++ head/sys/dev/mlx5/driver.h (revision 312876) @@ -1,1023 +1,1023 @@ /*- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MLX5_DRIVER_H #define MLX5_DRIVER_H #include #include #include #include #include #include #include #include #include #include #include #include #define MLX5_QCOUNTER_SETS_NETDEV 64 enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, }; enum { MLX5_CMD_TIMEOUT_MSEC = 8 * 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; enum { CMD_OWNER_SW = 0x0, CMD_OWNER_HW = 0x1, CMD_STATUS_SUCCESS = 0, }; enum mlx5_sqp_t { MLX5_SQP_SMI = 0, MLX5_SQP_GSI = 1, MLX5_SQP_IEEE_1588 = 2, MLX5_SQP_SNIFFER = 3, MLX5_SQP_SYNC_UMR = 4, }; enum { MLX5_MAX_PORTS = 2, }; enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, MLX5_EQ_VEC_COMP_BASE, }; enum { MLX5_MAX_IRQ_NAME = 32 }; enum { MLX5_ATOMIC_MODE_OFF = 16, MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF, MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF, }; enum { MLX5_ATOMIC_MODE_DCT_OFF = 20, MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF, MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF, }; enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2, MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3, }; enum { MLX5_REG_QETCR = 0x4005, MLX5_REG_QPDP = 0x4007, MLX5_REG_QTCT = 0x400A, MLX5_REG_QHLL = 0x4016, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PPTB = 0x500B, MLX5_REG_PBMC = 0x500C, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PMLP = 0x5002, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MPCNT = 0x9051, }; enum dbg_rsc_type { MLX5_DBG_RSC_QP, MLX5_DBG_RSC_EQ, MLX5_DBG_RSC_CQ, }; enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, MLX5_INTERFACE_NUMBER = 2, }; struct mlx5_field_desc { struct dentry *dent; int i; }; struct mlx5_rsc_debug { struct mlx5_core_dev *dev; void *object; enum dbg_rsc_type type; struct dentry *root; struct mlx5_field_desc fields[0]; }; enum mlx5_dev_event { MLX5_DEV_EVENT_SYS_ERROR, MLX5_DEV_EVENT_PORT_UP, MLX5_DEV_EVENT_PORT_DOWN, MLX5_DEV_EVENT_PORT_INITIALIZED, MLX5_DEV_EVENT_LID_CHANGE, MLX5_DEV_EVENT_PKEY_CHANGE, MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, MLX5_DEV_EVENT_VPORT_CHANGE, MLX5_DEV_EVENT_ERROR_STATE_DCBX, MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE, MLX5_DEV_EVENT_LOCAL_OPER_CHANGE, MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE, }; enum mlx5_port_status { MLX5_PORT_UP = 1 << 0, MLX5_PORT_DOWN = 1 << 1, }; enum mlx5_link_mode { MLX5_1000BASE_CX_SGMII = 0, MLX5_1000BASE_KX = 1, MLX5_10GBASE_CX4 = 2, MLX5_10GBASE_KX4 = 3, MLX5_10GBASE_KR = 4, MLX5_20GBASE_KR2 = 5, MLX5_40GBASE_CR4 = 6, MLX5_40GBASE_KR4 = 7, MLX5_56GBASE_R4 = 8, MLX5_10GBASE_CR = 12, MLX5_10GBASE_SR = 13, MLX5_10GBASE_ER = 14, MLX5_40GBASE_SR4 = 15, MLX5_40GBASE_LR4 = 16, MLX5_100GBASE_CR4 = 20, MLX5_100GBASE_SR4 = 21, MLX5_100GBASE_KR4 = 22, MLX5_100GBASE_LR4 = 23, MLX5_100BASE_TX = 24, MLX5_1000BASE_T = 25, MLX5_10GBASE_T = 26, MLX5_25GBASE_CR = 27, MLX5_25GBASE_KR = 28, MLX5_25GBASE_SR = 29, MLX5_50GBASE_CR2 = 30, MLX5_50GBASE_KR2 = 31, MLX5_LINK_MODES_NUMBER, }; #define MLX5_PROT_MASK(link_mode) (1 << link_mode) struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; int num_low_latency_uuars; unsigned long *bitmap; unsigned int *count; struct mlx5_bf *bfs; /* * protect uuar allocation data structs */ struct mutex lock; u32 ver; }; struct mlx5_bf { void __iomem *reg; void __iomem *regreg; int buf_size; struct mlx5_uar *uar; unsigned long offset; int need_lock; /* protect blue flame buffer selection when needed */ spinlock_t lock; /* serialize 64 bit writes when done as two 32 bit accesses */ spinlock_t lock32; int uuarn; }; struct mlx5_cmd_first { __be32 data[4]; }; struct mlx5_cmd_msg { struct list_head list; struct cache_ent *cache; u32 len; struct mlx5_cmd_first first; struct mlx5_cmd_mailbox *next; }; struct mlx5_cmd_debug { struct dentry *dbg_root; struct dentry *dbg_in; struct dentry *dbg_out; struct dentry *dbg_outlen; struct dentry *dbg_status; struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; u16 inlen; u16 outlen; }; struct cache_ent { /* protect block chain allocations */ spinlock_t lock; struct list_head head; }; struct cmd_msg_cache { struct cache_ent large; struct cache_ent med; }; struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; struct dentry *avg; struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; u8 log_sz; u8 log_stride; int max_reg_cmds; int events; u32 __iomem *vector; /* protect command queue allocations */ spinlock_t alloc_lock; /* protect token allocations */ spinlock_t token_lock; u8 token; unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; struct semaphore sem; struct semaphore pages_sem; int mode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct pci_pool *pool; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; int moving_to_polling; }; struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; }; struct mlx5_cmd_mailbox { void *buf; dma_addr_t dma; struct mlx5_cmd_mailbox *next; }; struct mlx5_buf_list { void *buf; dma_addr_t map; }; struct mlx5_buf { struct mlx5_buf_list direct; struct mlx5_buf_list *page_list; int nbufs; int npages; int size; u8 page_shift; }; struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; u32 cons_index; struct mlx5_buf buf; int size; u8 irqn; u8 eqn; int nent; u64 mask; struct list_head list; int index; struct mlx5_rsc_debug *dbg; }; struct mlx5_core_psv { u32 psv_idx; struct psv_layout { u32 pd; u16 syndrome; u16 reserved; u16 bg; u16 app_tag; u32 ref_tag; } psv; }; struct mlx5_core_sig_ctx { struct mlx5_core_psv psv_memory; struct mlx5_core_psv psv_wire; #if (__FreeBSD_version >= 1100000) struct ib_sig_err err_item; #endif bool sig_status_checked; bool sig_err_exists; u32 sigerr_count; }; struct mlx5_core_mr { u64 iova; u64 size; u32 key; u32 pd; }; enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_DCT = 5, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; atomic_t refcount; struct completion free; }; struct mlx5_core_srq { struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; int max_gs; int max_avail_gather; int wqe_shift; void (*event)(struct mlx5_core_srq *, int); atomic_t refcount; struct completion free; }; struct mlx5_eq_table { void __iomem *update_ci; void __iomem *update_arm_ci; struct list_head comp_eqs_list; struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; int num_comp_vectors; /* protect EQs list */ spinlock_t lock; }; struct mlx5_uar { u32 index; void __iomem *bf_map; void __iomem *map; }; struct mlx5_core_health { struct mlx5_health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; struct list_head list; u32 prev; int miss_counter; }; #define MLX5_CQ_LINEAR_ARRAY_SIZE 1024 struct mlx5_cq_linear_array_entry { spinlock_t lock; struct mlx5_core_cq * volatile cq; }; struct mlx5_cq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE]; }; struct mlx5_qp_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_srq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_mr_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_irq_info { char name[MLX5_MAX_IRQ_NAME]; }; struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); struct io_mapping *bf_mapping; /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; s64 fw_pages; atomic_t reg_pages; struct list_head free_list; struct mlx5_core_health health; struct mlx5_srq_table srq_table; /* start: qp staff */ struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; /* end: qp staff */ /* start: cq staff */ struct mlx5_cq_table cq_table; /* end: cq staff */ /* start: mr staff */ struct mlx5_mr_table mr_table; /* end: mr staff */ /* start: alloc staff */ int numa_node; struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ struct dentry *dbg_root; /* protect mkey key part */ spinlock_t mkey_lock; u8 mkey_key; struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; unsigned long pci_dev_data; }; enum mlx5_device_state { MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; struct mlx5_special_contexts { int resd_lkey; }; struct mlx5_core_dev { struct pci_dev *pdev; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; u32 issi; struct mlx5_special_contexts special_contexts; unsigned int module_status[MLX5_MAX_PORTS]; u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER]; }; enum { MLX5_WOL_DISABLE = 0, MLX5_WOL_SECURED_MAGIC = 1 << 1, MLX5_WOL_MAGIC = 1 << 2, MLX5_WOL_ARP = 1 << 3, MLX5_WOL_BROADCAST = 1 << 4, MLX5_WOL_MULTICAST = 1 << 5, MLX5_WOL_UNICAST = 1 << 6, MLX5_WOL_PHY_ACTIVITY = 1 << 7, }; struct mlx5_db { __be32 *db; union { struct mlx5_db_pgdir *pgdir; struct mlx5_ib_user_db_page *user_page; } u; dma_addr_t dma; int index; }; struct mlx5_net_counters { u64 packets; u64 octets; }; struct mlx5_ptys_reg { u8 an_dis_admin; u8 an_dis_ap; u8 local_port; u8 proto_mask; u32 eth_proto_cap; u16 ib_link_width_cap; u16 ib_proto_cap; u32 eth_proto_admin; u16 ib_link_width_admin; u16 ib_proto_admin; u32 eth_proto_oper; u16 ib_link_width_oper; u16 ib_proto_oper; u32 eth_proto_lp_advertise; }; struct mlx5_pvlc_reg { u8 local_port; u8 vl_hw_cap; u8 vl_admin; u8 vl_operational; }; struct mlx5_pmtu_reg { u8 local_port; u16 max_mtu; u16 admin_mtu; u16 oper_mtu; }; struct mlx5_vport_counters { struct mlx5_net_counters received_errors; struct mlx5_net_counters transmit_errors; struct mlx5_net_counters received_ib_unicast; struct mlx5_net_counters transmitted_ib_unicast; struct mlx5_net_counters received_ib_multicast; struct mlx5_net_counters transmitted_ib_multicast; struct mlx5_net_counters received_eth_broadcast; struct mlx5_net_counters transmitted_eth_broadcast; struct mlx5_net_counters received_eth_unicast; struct mlx5_net_counters transmitted_eth_unicast; struct mlx5_net_counters received_eth_multicast; struct mlx5_net_counters transmitted_eth_multicast; }; enum { MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, }; struct mlx5_core_dct { struct mlx5_core_rsc_common common; /* must be first */ void (*event)(struct mlx5_core_dct *, int); int dctn; struct completion drained; struct mlx5_rsc_debug *dbg; int pid; }; enum { MLX5_COMP_EQ_SIZE = 1024, }; enum { MLX5_PTYS_IB = 1 << 0, MLX5_PTYS_EN = 1 << 2, }; struct mlx5_db_pgdir { struct list_head list; DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); __be32 *db_page; dma_addr_t db_dma; }; typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; void *uout; int uout_size; mlx5_cmd_cbk_t callback; void *context; int idx; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; struct mlx5_cmd_layout *lay; int ret; int page_queue; u8 status; u8 token; u64 ts1; u64 ts2; u16 op; }; struct mlx5_pas { u64 pa; u8 log_sz; }; static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) { if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) return buf->direct.buf + offset; else return buf->page_list[offset >> PAGE_SHIFT].buf + (offset & (PAGE_SIZE - 1)); } extern struct workqueue_struct *mlx5_core_wq; #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) { return pci_get_drvdata(pdev); } extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) & 0xffff; } static inline u16 fw_rev_min(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) >> 16; } static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n"); return 0; } return 8 * (1 << param); } static inline void *mlx5_vzalloc(unsigned long size) { void *rtn; rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); return rtn; } static inline void *mlx5_vmalloc(unsigned long size) { void *rtn; rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!rtn) rtn = vmalloc(size); return rtn; } int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); int mlx5_cmd_status_to_err_v2(void *ptr); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(void); void __init mlx5_health_init(void); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_create_srq_mbox_in *in, int inlen, int is_xrc); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); void mlx5_init_mr_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_create_mkey_mbox_in *in, int inlen, mlx5_cmd_cbk_t callback, void *context, struct mlx5_create_mkey_mbox_out *out); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_query_mkey_mbox_out *out, int outlen); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, u16 opmod, u8 port); void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, u64 addr); int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask); int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask); int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, u8 *an_disable_cap, u8 *an_disable_status); int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable, u32 eth_proto_admin, int proto_mask); int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask); int mlx5_set_port_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port, u32 rx_pause, u32 tx_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port, u32 *rx_pause, u32 *tx_pause); int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu); int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu); unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num); int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num); int mlx5_query_eeprom(struct mlx5_core_dev *dev, int i2c_addr, int page_num, int device_addr, int size, int module_num, u32 *data, int *size_read); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct mlx5_query_eq_mbox_out *out, int outlen); int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev); int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode); int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode); int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, struct mlx5_pvlc_reg *pvlc, int write); int mlx5_core_access_ptys(struct mlx5_core_dev *dev, struct mlx5_ptys_reg *ptys, int write); int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, struct mlx5_pmtu_reg *pmtu, int write); int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port); int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port); int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int *is_enable); int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, int priority, int enable); int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, void *out, int out_size); int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, void *out, int out_size); int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, u8 num_of_samples, u16 sample_index, void *out, int out_size); static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; } static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) { return mkey_idx << 8; } static inline u8 mlx5_mkey_variant(u32 mkey) { return mkey & 0xff; } enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, }; enum { MAX_MR_CACHE_ENTRIES = 15, }; struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); void * (*get_dev)(void *context); int protocol; struct list_head list; }; void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); struct mlx5_profile { u64 mask; u8 log_max_qp; struct { int size; int limit; } mr_cache[MAX_MR_CACHE_ENTRIES]; }; enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) { return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } #define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00 #define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000 #endif /* MLX5_DRIVER_H */ Index: head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (revision 312875) +++ head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (revision 312876) @@ -1,1583 +1,1586 @@ /*- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include "mlx5_core.h" enum { CMD_IF_REV = 5, }; enum { CMD_MODE_POLLING, CMD_MODE_EVENTS }; enum { NUM_LONG_LISTS = 2, NUM_MED_LISTS = 64, LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + MLX5_CMD_DATA_BLOCK_SIZE, MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, }; enum { MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue) { gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; struct mlx5_cmd_work_ent *ent; ent = kzalloc(sizeof(*ent), alloc_flags); if (!ent) return ERR_PTR(-ENOMEM); ent->in = in; ent->out = out; ent->uout = uout; ent->uout_size = uout_size; ent->callback = cbk; ent->context = context; ent->cmd = cmd; ent->page_queue = page_queue; return ent; } static u8 alloc_token(struct mlx5_cmd *cmd) { u8 token; spin_lock(&cmd->token_lock); cmd->token++; if (cmd->token == 0) cmd->token++; token = cmd->token; spin_unlock(&cmd->token_lock); return token; } static int alloc_ent(struct mlx5_cmd *cmd) { unsigned long flags; int ret; spin_lock_irqsave(&cmd->alloc_lock, flags); ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); if (ret < cmd->max_reg_cmds) clear_bit(ret, &cmd->bitmask); spin_unlock_irqrestore(&cmd->alloc_lock, flags); return ret < cmd->max_reg_cmds ? ret : -1; } static void free_ent(struct mlx5_cmd *cmd, int idx) { unsigned long flags; spin_lock_irqsave(&cmd->alloc_lock, flags); set_bit(idx, &cmd->bitmask); spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) { return cmd->cmd_buf + (idx << cmd->log_stride); } static u8 xor8_buf(void *buf, int len) { u8 *ptr = buf; u8 sum = 0; int i; for (i = 0; i < len; i++) sum ^= ptr[i]; return sum; } static int verify_block_sig(struct mlx5_cmd_prot_block *block) { if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) return -EINVAL; if (xor8_buf(block, sizeof(*block)) != 0xff) return -EINVAL; return 0; } static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, int csum) { block->token = token; if (csum) { block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); block->sig = ~xor8_buf(block, sizeof(*block) - 1); } } static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) { struct mlx5_cmd_mailbox *next = msg->next; while (next) { calc_block_sig(next->buf, token, csum); next = next->next; } } static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); calc_chain_sig(ent->in, ent->token, csum); calc_chain_sig(ent->out, ent->token, csum); } static void poll_timeout(struct mlx5_cmd_work_ent *ent) { int poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); u8 own; do { own = ent->lay->status_own; if (!(own & CMD_OWNER_HW)) { ent->ret = 0; return; } usleep_range(5000, 10000); } while (time_before(jiffies, poll_end)); ent->ret = -ETIMEDOUT; } static void free_cmd(struct mlx5_cmd_work_ent *ent) { kfree(ent); } static int verify_signature(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd_mailbox *next = ent->out->next; int err; u8 sig; sig = xor8_buf(ent->lay, sizeof(*ent->lay)); if (sig != 0xff) return -EINVAL; while (next) { err = verify_block_sig(next->buf); if (err) return err; next = next->next; } return 0; } static void dump_buf(void *buf, int size, int data_only, int offset) { __be32 *p = buf; int i; for (i = 0; i < size; i += 16) { pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); p += 4; offset += 16; } if (!data_only) pr_debug("\n"); } const char *mlx5_command_str(int command) { switch (command) { case MLX5_CMD_OP_QUERY_HCA_CAP: return "QUERY_HCA_CAP"; case MLX5_CMD_OP_SET_HCA_CAP: return "SET_HCA_CAP"; case MLX5_CMD_OP_QUERY_ADAPTER: return "QUERY_ADAPTER"; case MLX5_CMD_OP_INIT_HCA: return "INIT_HCA"; case MLX5_CMD_OP_TEARDOWN_HCA: return "TEARDOWN_HCA"; case MLX5_CMD_OP_ENABLE_HCA: return "MLX5_CMD_OP_ENABLE_HCA"; case MLX5_CMD_OP_DISABLE_HCA: return "MLX5_CMD_OP_DISABLE_HCA"; case MLX5_CMD_OP_QUERY_PAGES: return "QUERY_PAGES"; case MLX5_CMD_OP_MANAGE_PAGES: return "MANAGE_PAGES"; case MLX5_CMD_OP_QUERY_ISSI: return "QUERY_ISSI"; case MLX5_CMD_OP_SET_ISSI: return "SET_ISSI"; case MLX5_CMD_OP_CREATE_MKEY: return "CREATE_MKEY"; case MLX5_CMD_OP_QUERY_MKEY: return "QUERY_MKEY"; case MLX5_CMD_OP_DESTROY_MKEY: return "DESTROY_MKEY"; case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: return "QUERY_SPECIAL_CONTEXTS"; case MLX5_CMD_OP_PAGE_FAULT_RESUME: return "PAGE_FAULT_RESUME"; case MLX5_CMD_OP_CREATE_EQ: return "CREATE_EQ"; case MLX5_CMD_OP_DESTROY_EQ: return "DESTROY_EQ"; case MLX5_CMD_OP_QUERY_EQ: return "QUERY_EQ"; case MLX5_CMD_OP_GEN_EQE: return "GEN_EQE"; case MLX5_CMD_OP_CREATE_CQ: return "CREATE_CQ"; case MLX5_CMD_OP_DESTROY_CQ: return "DESTROY_CQ"; case MLX5_CMD_OP_QUERY_CQ: return "QUERY_CQ"; case MLX5_CMD_OP_MODIFY_CQ: return "MODIFY_CQ"; case MLX5_CMD_OP_CREATE_QP: return "CREATE_QP"; case MLX5_CMD_OP_DESTROY_QP: return "DESTROY_QP"; case MLX5_CMD_OP_RST2INIT_QP: return "RST2INIT_QP"; case MLX5_CMD_OP_INIT2RTR_QP: return "INIT2RTR_QP"; case MLX5_CMD_OP_RTR2RTS_QP: return "RTR2RTS_QP"; case MLX5_CMD_OP_RTS2RTS_QP: return "RTS2RTS_QP"; case MLX5_CMD_OP_SQERR2RTS_QP: return "SQERR2RTS_QP"; case MLX5_CMD_OP_2ERR_QP: return "2ERR_QP"; case MLX5_CMD_OP_2RST_QP: return "2RST_QP"; case MLX5_CMD_OP_QUERY_QP: return "QUERY_QP"; case MLX5_CMD_OP_SQD_RTS_QP: return "SQD_RTS_QP"; case MLX5_CMD_OP_MAD_IFC: return "MAD_IFC"; case MLX5_CMD_OP_INIT2INIT_QP: return "INIT2INIT_QP"; case MLX5_CMD_OP_CREATE_PSV: return "CREATE_PSV"; case MLX5_CMD_OP_DESTROY_PSV: return "DESTROY_PSV"; case MLX5_CMD_OP_CREATE_SRQ: return "CREATE_SRQ"; case MLX5_CMD_OP_DESTROY_SRQ: return "DESTROY_SRQ"; case MLX5_CMD_OP_QUERY_SRQ: return "QUERY_SRQ"; case MLX5_CMD_OP_ARM_RQ: return "ARM_RQ"; case MLX5_CMD_OP_CREATE_XRC_SRQ: return "CREATE_XRC_SRQ"; case MLX5_CMD_OP_DESTROY_XRC_SRQ: return "DESTROY_XRC_SRQ"; case MLX5_CMD_OP_QUERY_XRC_SRQ: return "QUERY_XRC_SRQ"; case MLX5_CMD_OP_ARM_XRC_SRQ: return "ARM_XRC_SRQ"; case MLX5_CMD_OP_CREATE_DCT: return "CREATE_DCT"; case MLX5_CMD_OP_SET_DC_CNAK_TRACE: return "SET_DC_CNAK_TRACE"; case MLX5_CMD_OP_DESTROY_DCT: return "DESTROY_DCT"; case MLX5_CMD_OP_DRAIN_DCT: return "DRAIN_DCT"; case MLX5_CMD_OP_QUERY_DCT: return "QUERY_DCT"; case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: return "ARM_DCT_FOR_KEY_VIOLATION"; case MLX5_CMD_OP_QUERY_VPORT_STATE: return "QUERY_VPORT_STATE"; case MLX5_CMD_OP_MODIFY_VPORT_STATE: return "MODIFY_VPORT_STATE"; case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: return "QUERY_ESW_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: return "MODIFY_ESW_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: return "QUERY_NIC_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: return "MODIFY_NIC_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: return "QUERY_ROCE_ADDRESS"; case MLX5_CMD_OP_SET_ROCE_ADDRESS: return "SET_ROCE_ADDRESS"; case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: return "QUERY_HCA_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: return "MODIFY_HCA_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: return "QUERY_HCA_VPORT_GID"; case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: return "QUERY_HCA_VPORT_PKEY"; case MLX5_CMD_OP_QUERY_VPORT_COUNTER: return "QUERY_VPORT_COUNTER"; case MLX5_CMD_OP_SET_WOL_ROL: return "SET_WOL_ROL"; case MLX5_CMD_OP_QUERY_WOL_ROL: return "QUERY_WOL_ROL"; case MLX5_CMD_OP_ALLOC_Q_COUNTER: return "ALLOC_Q_COUNTER"; case MLX5_CMD_OP_DEALLOC_Q_COUNTER: return "DEALLOC_Q_COUNTER"; case MLX5_CMD_OP_QUERY_Q_COUNTER: return "QUERY_Q_COUNTER"; case MLX5_CMD_OP_ALLOC_PD: return "ALLOC_PD"; case MLX5_CMD_OP_DEALLOC_PD: return "DEALLOC_PD"; case MLX5_CMD_OP_ALLOC_UAR: return "ALLOC_UAR"; case MLX5_CMD_OP_DEALLOC_UAR: return "DEALLOC_UAR"; case MLX5_CMD_OP_CONFIG_INT_MODERATION: return "CONFIG_INT_MODERATION"; case MLX5_CMD_OP_ATTACH_TO_MCG: return "ATTACH_TO_MCG"; case MLX5_CMD_OP_DETACH_FROM_MCG: return "DETACH_FROM_MCG"; case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: return "GET_DROPPED_PACKET_LOG"; case MLX5_CMD_OP_QUERY_MAD_DEMUX: return "QUERY_MAD_DEMUX"; case MLX5_CMD_OP_SET_MAD_DEMUX: return "SET_MAD_DEMUX"; case MLX5_CMD_OP_NOP: return "NOP"; case MLX5_CMD_OP_ALLOC_XRCD: return "ALLOC_XRCD"; case MLX5_CMD_OP_DEALLOC_XRCD: return "DEALLOC_XRCD"; case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: return "ALLOC_TRANSPORT_DOMAIN"; case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: return "DEALLOC_TRANSPORT_DOMAIN"; case MLX5_CMD_OP_QUERY_CONG_STATUS: return "QUERY_CONG_STATUS"; case MLX5_CMD_OP_MODIFY_CONG_STATUS: return "MODIFY_CONG_STATUS"; case MLX5_CMD_OP_QUERY_CONG_PARAMS: return "QUERY_CONG_PARAMS"; case MLX5_CMD_OP_MODIFY_CONG_PARAMS: return "MODIFY_CONG_PARAMS"; case MLX5_CMD_OP_QUERY_CONG_STATISTICS: return "QUERY_CONG_STATISTICS"; case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: return "ADD_VXLAN_UDP_DPORT"; case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: return "DELETE_VXLAN_UDP_DPORT"; case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: return "SET_L2_TABLE_ENTRY"; case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: return "QUERY_L2_TABLE_ENTRY"; case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: return "DELETE_L2_TABLE_ENTRY"; case MLX5_CMD_OP_CREATE_RMP: return "CREATE_RMP"; case MLX5_CMD_OP_MODIFY_RMP: return "MODIFY_RMP"; case MLX5_CMD_OP_DESTROY_RMP: return "DESTROY_RMP"; case MLX5_CMD_OP_QUERY_RMP: return "QUERY_RMP"; case MLX5_CMD_OP_CREATE_RQT: return "CREATE_RQT"; case MLX5_CMD_OP_MODIFY_RQT: return "MODIFY_RQT"; case MLX5_CMD_OP_DESTROY_RQT: return "DESTROY_RQT"; case MLX5_CMD_OP_QUERY_RQT: return "QUERY_RQT"; case MLX5_CMD_OP_ACCESS_REG: return "MLX5_CMD_OP_ACCESS_REG"; case MLX5_CMD_OP_CREATE_SQ: return "CREATE_SQ"; case MLX5_CMD_OP_MODIFY_SQ: return "MODIFY_SQ"; case MLX5_CMD_OP_DESTROY_SQ: return "DESTROY_SQ"; case MLX5_CMD_OP_QUERY_SQ: return "QUERY_SQ"; case MLX5_CMD_OP_CREATE_RQ: return "CREATE_RQ"; case MLX5_CMD_OP_MODIFY_RQ: return "MODIFY_RQ"; case MLX5_CMD_OP_DESTROY_RQ: return "DESTROY_RQ"; case MLX5_CMD_OP_QUERY_RQ: return "QUERY_RQ"; case MLX5_CMD_OP_CREATE_TIR: return "CREATE_TIR"; case MLX5_CMD_OP_MODIFY_TIR: return "MODIFY_TIR"; case MLX5_CMD_OP_DESTROY_TIR: return "DESTROY_TIR"; case MLX5_CMD_OP_QUERY_TIR: return "QUERY_TIR"; case MLX5_CMD_OP_CREATE_TIS: return "CREATE_TIS"; case MLX5_CMD_OP_MODIFY_TIS: return "MODIFY_TIS"; case MLX5_CMD_OP_DESTROY_TIS: return "DESTROY_TIS"; case MLX5_CMD_OP_QUERY_TIS: return "QUERY_TIS"; case MLX5_CMD_OP_CREATE_FLOW_TABLE: return "CREATE_FLOW_TABLE"; case MLX5_CMD_OP_DESTROY_FLOW_TABLE: return "DESTROY_FLOW_TABLE"; case MLX5_CMD_OP_QUERY_FLOW_TABLE: return "QUERY_FLOW_TABLE"; case MLX5_CMD_OP_CREATE_FLOW_GROUP: return "CREATE_FLOW_GROUP"; case MLX5_CMD_OP_DESTROY_FLOW_GROUP: return "DESTROY_FLOW_GROUP"; case MLX5_CMD_OP_QUERY_FLOW_GROUP: return "QUERY_FLOW_GROUP"; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: return "SET_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: return "QUERY_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: return "DELETE_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_SET_DIAGNOSTICS: return "MLX5_CMD_OP_SET_DIAGNOSTICS"; case MLX5_CMD_OP_QUERY_DIAGNOSTICS: return "MLX5_CMD_OP_QUERY_DIAGNOSTICS"; default: return "unknown command opcode"; } } static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; struct mlx5_cmd_mailbox *next = msg->next; int data_only; u32 offset = 0; int dump_len; data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); if (data_only) mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, "dump command data %s(0x%x) %s\n", mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); else mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); if (data_only) { if (input) { dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); offset += sizeof(ent->lay->in); } else { dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); offset += sizeof(ent->lay->out); } } else { dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); offset += sizeof(*ent->lay); } while (next && offset < msg->len) { if (data_only) { dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); dump_buf(next->buf, dump_len, 1, offset); offset += MLX5_CMD_DATA_BLOCK_SIZE; } else { mlx5_core_dbg(dev, "command block:\n"); dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); offset += sizeof(struct mlx5_cmd_prot_block); } next = next->next; } if (data_only) pr_debug("\n"); } static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); struct mlx5_cmd_layout *lay; struct semaphore *sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; if (cmd->moving_to_polling) { mlx5_core_warn(dev, "not expecting command execution, ignoring...\n"); return; } down(sem); if (!ent->page_queue) { ent->idx = alloc_ent(cmd); if (ent->idx < 0) { mlx5_core_err(dev, "failed to allocate command entry\n"); up(sem); return; } } else { ent->idx = cmd->max_reg_cmds; } ent->token = alloc_token(cmd); cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->next) lay->in_ptr = cpu_to_be64(ent->in->next->dma); lay->inlen = cpu_to_be32(ent->in->len); if (ent->out->next) lay->out_ptr = cpu_to_be64(ent->out->next->dma); lay->outlen = cpu_to_be32(ent->out->len); lay->type = MLX5_PCI_CMD_XPORT; lay->token = ent->token; lay->status_own = CMD_OWNER_HW; set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point*/ if (cmd->mode == CMD_MODE_POLLING) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1U << ent->idx); } } static const char *deliv_status_to_str(u8 status) { switch (status) { case MLX5_CMD_DELIVERY_STAT_OK: return "no errors"; case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: return "signature error"; case MLX5_CMD_DELIVERY_STAT_TOK_ERR: return "token error"; case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: return "bad block number"; case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: return "output pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: return "input pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_FW_ERR: return "firmware internal error"; case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: return "command input length error"; case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: return "command ouput length error"; case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: return "reserved fields not cleared"; case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: return "bad command descriptor type"; default: return "unknown status code"; } } static u16 msg_to_opcode(struct mlx5_cmd_msg *in) { struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); return be16_to_cpu(hdr->opcode); } static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd *cmd = &dev->cmd; int err; if (cmd->mode == CMD_MODE_POLLING) { wait_for_completion(&ent->done); err = ent->ret; } else { if (!wait_for_completion_timeout(&ent->done, timeout)) err = -ETIMEDOUT; else err = 0; } if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); return err; } /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_stats *stats; int err = 0; s64 ds; u16 op; if (callback && page_queue) return -EINVAL; ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); if (!callback) init_completion(&ent->done); INIT_WORK(&ent->work, cmd_work_handler); if (page_queue) { cmd_work_handler(&ent->work); } else if (!queue_work(cmd->wq, &ent->work)) { mlx5_core_warn(dev, "failed to queue work\n"); err = -ENOMEM; goto out_free; } if (!callback) { err = wait_func(dev, ent); if (err == -ETIMEDOUT) goto out; ds = ent->ts2 - ent->ts1; op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; spin_unlock_irq(&stats->lock); } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", mlx5_command_str(op), (long long)ds); *status = ent->status; free_cmd(ent); } return err; out_free: free_cmd(ent); out: return err; } static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_mailbox *next; int copy; if (!to || !from) return -ENOMEM; copy = min_t(int, size, sizeof(to->first.data)); memcpy(to->first.data, from, copy); size -= copy; from += copy; next = to->next; while (size) { if (!next) { /* this is a BUG */ return -ENOMEM; } copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); block = next->buf; memcpy(block->data, from, copy); from += copy; size -= copy; next = next->next; } return 0; } static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_mailbox *next; int copy; if (!to || !from) return -ENOMEM; copy = min_t(int, size, sizeof(from->first.data)); memcpy(to, from->first.data, copy); size -= copy; to += copy; next = from->next; while (size) { if (!next) { /* this is a BUG */ return -ENOMEM; } copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); block = next->buf; memcpy(to, block->data, copy); to += copy; size -= copy; next = next->next; } return 0; } static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, gfp_t flags) { struct mlx5_cmd_mailbox *mailbox; mailbox = kmalloc(sizeof(*mailbox), flags); if (!mailbox) return ERR_PTR(-ENOMEM); mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, &mailbox->dma); if (!mailbox->buf) { mlx5_core_dbg(dev, "failed allocation\n"); kfree(mailbox); return ERR_PTR(-ENOMEM); } memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); mailbox->next = NULL; return mailbox; } static void free_cmd_box(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *mailbox) { pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); kfree(mailbox); } static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, int size) { struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_prot_block *block; struct mlx5_cmd_msg *msg; int blen; int err; int n; int i; msg = kzalloc(sizeof(*msg), flags); if (!msg) return ERR_PTR(-ENOMEM); blen = size - min_t(int, sizeof(msg->first.data), size); n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; for (i = 0; i < n; i++) { tmp = alloc_cmd_box(dev, flags); if (IS_ERR(tmp)) { mlx5_core_warn(dev, "failed allocating block\n"); err = PTR_ERR(tmp); goto err_alloc; } block = tmp->buf; tmp->next = head; block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); block->block_num = cpu_to_be32(n - i - 1); head = tmp; } msg->next = head; msg->len = size; return msg; err_alloc: while (head) { tmp = head->next; free_cmd_box(dev, head); head = tmp; } kfree(msg); return ERR_PTR(err); } static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *head = msg->next; struct mlx5_cmd_mailbox *next; while (head) { next = head->next; free_cmd_box(dev, head); head = next; } kfree(msg); } static void set_wqname(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", dev_name(&dev->pdev->dev)); } static void clean_debug_files(struct mlx5_core_dev *dev) { } void mlx5_cmd_use_events(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; int i; for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); down(&cmd->pages_sem); flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_EVENTS; up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; synchronize_irq(dev->priv.eq_table.pages_eq.irqn); flush_workqueue(dev->priv.pg_wq); cmd->moving_to_polling = 1; flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_POLLING; cmd->moving_to_polling = 0; } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { unsigned long flags; if (msg->cache) { spin_lock_irqsave(&msg->cache->lock, flags); list_add_tail(&msg->list, &msg->cache->head); spin_unlock_irqrestore(&msg->cache->lock, flags); } else { mlx5_free_cmd_msg(dev, msg); } } -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; mlx5_cmd_cbk_t callback; void *context; int err; int i; + struct semaphore *sem; s64 ds; struct mlx5_cmd_stats *stats; unsigned long flags; - for (i = 0; i < (1 << cmd->log_sz); i++) { - if (test_bit(i, &vector)) { - struct semaphore *sem; - - ent = cmd->ent_arr[i]; - if (ent->page_queue) - sem = &cmd->pages_sem; + while (vector != 0) { + i = ffs(vector) - 1; + vector &= ~(1U << i); + ent = cmd->ent_arr[i]; + if (ent->page_queue) + sem = &cmd->pages_sem; + else + sem = &cmd->sem; + ent->ts2 = ktime_get_ns(); + memcpy(ent->out->first.data, ent->lay->out, + sizeof(ent->lay->out)); + dump_command(dev, ent, 0); + if (!ent->ret) { + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); else - sem = &cmd->sem; - ent->ts2 = ktime_get_ns(); - memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); - dump_command(dev, ent, 0); - if (!ent->ret) { - if (!cmd->checksum_disabled) - ent->ret = verify_signature(ent); - else - ent->ret = 0; - ent->status = ent->lay->status_own >> 1; - mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", - ent->ret, deliv_status_to_str(ent->status), ent->status); + ent->ret = 0; + ent->status = ent->lay->status_own >> 1; + mlx5_core_dbg(dev, + "FW command ret 0x%x, status %s(0x%x)\n", + ent->ret, + deliv_status_to_str(ent->status), + ent->status); + } + free_ent(cmd, ent->idx); + if (ent->callback) { + ds = ent->ts2 - ent->ts1; + if (ent->op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[ent->op]; + spin_lock_irqsave(&stats->lock, flags); + stats->sum += ds; + ++stats->n; + spin_unlock_irqrestore(&stats->lock, flags); } - free_ent(cmd, ent->idx); - if (ent->callback) { - ds = ent->ts2 - ent->ts1; - if (ent->op < ARRAY_SIZE(cmd->stats)) { - stats = &cmd->stats[ent->op]; - spin_lock_irqsave(&stats->lock, flags); - stats->sum += ds; - ++stats->n; - spin_unlock_irqrestore(&stats->lock, flags); - } - callback = ent->callback; - context = ent->context; - err = ent->ret; - if (!err) - err = mlx5_copy_from_msg(ent->uout, - ent->out, - ent->uout_size); + callback = ent->callback; + context = ent->context; + err = ent->ret; + if (!err) + err = mlx5_copy_from_msg(ent->uout, + ent->out, + ent->uout_size); - mlx5_free_cmd_msg(dev, ent->out); - free_msg(dev, ent->in); + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); - free_cmd(ent); - callback(err, context); - } else { - complete(&ent->done); - } - up(sem); + free_cmd(ent); + callback(err, context); + } else { + complete(&ent->done); } + up(sem); } } EXPORT_SYMBOL(mlx5_cmd_comp_handler); static int status_to_err(u8 status) { return status ? -1 : 0; /* TBD more meaningful codes */ } static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd *cmd = &dev->cmd; struct cache_ent *ent = NULL; if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) ent = &cmd->cache.large; else if (in_size > 16 && in_size <= MED_LIST_SIZE) ent = &cmd->cache.med; if (ent) { spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { msg = list_entry(ent->head.next, struct mlx5_cmd_msg, list); /* For cached lists, we must explicitly state what is * the real size */ msg->len = in_size; list_del(&msg->list); } spin_unlock_irq(&ent->lock); } if (IS_ERR(msg)) msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); return msg; } static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; } static int cmd_exec_helper(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; int pages_queue; gfp_t gfp; int err; u8 status = 0; pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); return err; } err = mlx5_copy_to_msg(inb, in, in_size); if (err) { mlx5_core_warn(dev, "err %d\n", err); goto out_in; } outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, pages_queue, &status); if (err) { if (err == -ETIMEDOUT) return err; goto out_out; } mlx5_core_dbg(dev, "err %d, status %d\n", err, status); if (status) { err = status_to_err(status); goto out_out; } if (callback) return err; err = mlx5_copy_from_msg(out, outb, out_size); out_out: mlx5_free_cmd_msg(dev, outb); out_in: free_msg(dev, inb); return err; } int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); } EXPORT_SYMBOL(mlx5_cmd_exec); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); } EXPORT_SYMBOL(mlx5_cmd_exec_cb); static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *n; list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { list_del(&msg->list); mlx5_free_cmd_msg(dev, msg); } list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { list_del(&msg->list); mlx5_free_cmd_msg(dev, msg); } } static int create_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_msg *msg; int err; int i; spin_lock_init(&cmd->cache.large.lock); INIT_LIST_HEAD(&cmd->cache.large.head); spin_lock_init(&cmd->cache.med.lock); INIT_LIST_HEAD(&cmd->cache.med.head); for (i = 0; i < NUM_LONG_LISTS; i++) { msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; } msg->cache = &cmd->cache.large; list_add_tail(&msg->list, &cmd->cache.large.head); } for (i = 0; i < NUM_MED_LISTS; i++) { msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; } msg->cache = &cmd->cache.med; list_add_tail(&msg->list, &cmd->cache.med.head); } return 0; ex_err: destroy_msg_cache(dev); return err; } static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { struct device *ddev = &dev->pdev->dev; cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, &cmd->alloc_dma, GFP_KERNEL); if (!cmd->cmd_alloc_buf) return -ENOMEM; /* make sure it is aligned to 4K */ if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { cmd->cmd_buf = cmd->cmd_alloc_buf; cmd->dma = cmd->alloc_dma; cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; return 0; } dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, cmd->alloc_dma); cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 2 * MLX5_ADAPTER_PAGE_SIZE - 1, &cmd->alloc_dma, GFP_KERNEL); if (!cmd->cmd_alloc_buf) return -ENOMEM; cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; return 0; } static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { struct device *ddev = &dev->pdev->dev; dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma); } int mlx5_cmd_init(struct mlx5_core_dev *dev) { int size = sizeof(struct mlx5_cmd_prot_block); int align = roundup_pow_of_two(size); struct mlx5_cmd *cmd = &dev->cmd; u32 cmd_h, cmd_l; u16 cmd_if_rev; int err; int i; cmd_if_rev = cmdif_rev_get(dev); if (cmd_if_rev != CMD_IF_REV) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); return -EINVAL; } cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); if (!cmd->pool) return -ENOMEM; err = alloc_cmd_page(dev, cmd); if (err) goto err_free_pool; cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; cmd->log_sz = cmd_l >> 4 & 0xf; cmd->log_stride = cmd_l & 0xf; if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); err = -EINVAL; goto err_free_page; } if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); err = -EINVAL; goto err_free_page; } cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); err = -ENOTSUPP; goto err_free_page; } spin_lock_init(&cmd->alloc_lock); spin_lock_init(&cmd->token_lock); for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) spin_lock_init(&cmd->stats[i].lock); sema_init(&cmd->sem, cmd->max_reg_cmds); sema_init(&cmd->pages_sem, 1); cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); if (cmd_l & 0xfff) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); err = -ENOMEM; goto err_free_page; } iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); /* Make sure firmware sees the complete address before we proceed */ wmb(); mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); cmd->mode = CMD_MODE_POLLING; err = create_msg_cache(dev); if (err) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); goto err_free_page; } set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); if (!cmd->wq) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); err = -ENOMEM; goto err_cache; } return 0; err_cache: destroy_msg_cache(dev); err_free_page: free_cmd_page(dev, cmd); err_free_pool: pci_pool_destroy(cmd->pool); return err; } EXPORT_SYMBOL(mlx5_cmd_init); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; clean_debug_files(dev); destroy_workqueue(cmd->wq); destroy_msg_cache(dev); free_cmd_page(dev, cmd); pci_pool_destroy(cmd->pool); } EXPORT_SYMBOL(mlx5_cmd_cleanup); static const char *cmd_status_str(u8 status) { switch (status) { case MLX5_CMD_STAT_OK: return "OK"; case MLX5_CMD_STAT_INT_ERR: return "internal error"; case MLX5_CMD_STAT_BAD_OP_ERR: return "bad operation"; case MLX5_CMD_STAT_BAD_PARAM_ERR: return "bad parameter"; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return "bad system state"; case MLX5_CMD_STAT_BAD_RES_ERR: return "bad resource"; case MLX5_CMD_STAT_RES_BUSY: return "resource busy"; case MLX5_CMD_STAT_LIM_ERR: return "limits exceeded"; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return "bad resource state"; case MLX5_CMD_STAT_IX_ERR: return "bad index"; case MLX5_CMD_STAT_NO_RES_ERR: return "no resources"; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return "bad input length"; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return "bad output length"; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return "bad QP state"; case MLX5_CMD_STAT_BAD_PKT_ERR: return "bad packet (discarded)"; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return "bad size too many outstanding CQEs"; default: return "unknown status"; } } static int cmd_status_to_err_helper(u8 status) { switch (status) { case MLX5_CMD_STAT_OK: return 0; case MLX5_CMD_STAT_INT_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; default: return -EIO; } } /* this will be available till all the commands use set/get macros */ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) { if (!hdr->status) return 0; printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); return cmd_status_to_err_helper(hdr->status); } int mlx5_cmd_status_to_err_v2(void *ptr) { u32 syndrome; u8 status; status = be32_to_cpu(*(__be32 *)ptr) >> 24; if (!status) return 0; syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); return cmd_status_to_err_helper(status); }