Index: stable/10/sys/ofed/drivers/net/mlx4/eq.c =================================================================== --- stable/10/sys/ofed/drivers/net/mlx4/eq.c (revision 318535) +++ stable/10/sys/ofed/drivers/net/mlx4/eq.c (revision 318536) @@ -1,1406 +1,1410 @@ /* * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include #include #include "mlx4.h" #include "fw.h" enum { MLX4_IRQNAME_SIZE = 32 }; enum { MLX4_NUM_ASYNC_EQE = 0x100, MLX4_NUM_SPARE_EQE = 0x80, MLX4_EQ_ENTRY_SIZE = 0x20 }; #define MLX4_EQ_STATUS_OK ( 0 << 28) #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) #define MLX4_EQ_OWNER_SW ( 0 << 24) #define MLX4_EQ_OWNER_HW ( 1 << 24) #define MLX4_EQ_FLAG_EC ( 1 << 18) #define MLX4_EQ_FLAG_OI ( 1 << 17) #define MLX4_EQ_STATE_ARMED ( 9 << 8) #define MLX4_EQ_STATE_FIRED (10 << 8) #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD) | \ (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) static u64 get_async_ev_mask(struct mlx4_dev *dev) { u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); return async_ev_mask; } static void eq_set_ci(struct mlx4_eq *eq, int req_not) { __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | req_not << 31), eq->doorbell); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) { /* (entry & (eq->nent - 1)) gives us a cyclic array */ unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); /* CX3 is capable of extending the EQE from 32 to 64 bytes. * When this feature is enabled, the first (in the lower addresses) * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes * contain the legacy EQE information. */ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; } static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) { struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) { struct mlx4_eqe *eqe = &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; return (!!(eqe->owner & 0x80) ^ !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? eqe : NULL; } void mlx4_gen_slave_eqe(struct work_struct *work) { struct mlx4_mfunc_master_ctx *master = container_of(work, struct mlx4_mfunc_master_ctx, slave_event_work); struct mlx4_mfunc *mfunc = container_of(master, struct mlx4_mfunc, master); struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); struct mlx4_dev *dev = &priv->dev; struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; struct mlx4_eqe *eqe; u8 slave; int i; for (eqe = next_slave_event_eqe(slave_eq); eqe; eqe = next_slave_event_eqe(slave_eq)) { slave = eqe->slave_id; /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { for (i = 0; i < dev->num_slaves; i++) { if (mlx4_GEN_EQE(dev, i, eqe)) mlx4_warn(dev, "Failed to generate " "event for slave %d\n", i); } } else { if (mlx4_GEN_EQE(dev, slave, eqe)) mlx4_warn(dev, "Failed to generate event " "for slave %d\n", slave); } ++slave_eq->cons; } } static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; struct mlx4_eqe *s_eqe; unsigned long flags; spin_lock_irqsave(&slave_eq->event_lock, flags); s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; if ((!!(s_eqe->owner & 0x80)) ^ (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " "No free EQE on slave events queue\n", slave); spin_unlock_irqrestore(&slave_eq->event_lock, flags); return; } memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); s_eqe->slave_id = slave; /* ensure all information is written before setting the ownersip bit */ wmb(); s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; ++slave_eq->prod; queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_event_work); spin_unlock_irqrestore(&slave_eq->event_lock, flags); } static void mlx4_slave_event(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) { struct mlx4_priv *priv = mlx4_priv(dev); if (slave < 0 || slave >= dev->num_slaves || slave == dev->caps.function) return; if (!priv->mfunc.master.slave_state[slave].active) return; slave_event(dev, slave, eqe); } int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) { struct mlx4_eqe eqe; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; if (!s_slave->active) return 0; memset(&eqe, 0, sizeof eqe); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; eqe.event.port_mgmt_change.port = port; return mlx4_GEN_EQE(dev, slave, &eqe); } EXPORT_SYMBOL(mlx4_gen_pkey_eqe); int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) { struct mlx4_eqe eqe; /*don't send if we don't have the that slave */ if (dev->num_vfs < slave) return 0; memset(&eqe, 0, sizeof eqe); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; eqe.event.port_mgmt_change.port = port; return mlx4_GEN_EQE(dev, slave, &eqe); } EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change) { struct mlx4_eqe eqe; /*don't send if we don't have the that slave */ if (dev->num_vfs < slave) return 0; memset(&eqe, 0, sizeof eqe); eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; eqe.subtype = port_subtype_change; eqe.event.port_change.port = cpu_to_be32(port << 28); mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, port_subtype_change, slave, port); return mlx4_GEN_EQE(dev, slave, &eqe); } EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return SLAVE_PORT_DOWN; } return s_state[slave].port_state[port]; } EXPORT_SYMBOL(mlx4_get_slave_port_state); static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, enum slave_port_state state) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return -1; } s_state[slave].port_state[port] = state; return 0; } static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) { int i; enum slave_port_gen_event gen_event; for (i = 0; i < dev->num_slaves; i++) set_and_calc_slave_port_state(dev, i, port, event, &gen_event); } /************************************************************************** The function get as input the new event to that port, and according to the prev state change the slave's port state. The events are: MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, MLX4_PORT_STATE_DEV_EVENT_PORT_UP MLX4_PORT_STATE_IB_EVENT_GID_VALID MLX4_PORT_STATE_IB_EVENT_GID_INVALID ***************************************************************************/ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *ctx = NULL; unsigned long flags; int ret = -1; enum slave_port_state cur_state = mlx4_get_slave_port_state(dev, slave, port); *gen_event = SLAVE_PORT_GEN_EVENT_NONE; if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return ret; } ctx = &priv->mfunc.master.slave_state[slave]; spin_lock_irqsave(&ctx->lock, flags); switch (cur_state) { case SLAVE_PORT_DOWN: if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) mlx4_set_slave_port_state(dev, slave, port, SLAVE_PENDING_UP); break; case SLAVE_PENDING_UP: if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) mlx4_set_slave_port_state(dev, slave, port, SLAVE_PORT_DOWN); else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { mlx4_set_slave_port_state(dev, slave, port, SLAVE_PORT_UP); *gen_event = SLAVE_PORT_GEN_EVENT_UP; } break; case SLAVE_PORT_UP: if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { mlx4_set_slave_port_state(dev, slave, port, SLAVE_PORT_DOWN); *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == event) { mlx4_set_slave_port_state(dev, slave, port, SLAVE_PENDING_UP); *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; } break; default: pr_err("%s: BUG!!! UNKNOWN state: " "slave:%d, port:%d\n", __func__, slave, port); goto out; } ret = mlx4_get_slave_port_state(dev, slave, port); out: spin_unlock_irqrestore(&ctx->lock, flags); return ret; } EXPORT_SYMBOL(set_and_calc_slave_port_state); int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr, u16 sm_lid, u8 sm_sl) { struct mlx4_eqe eqe; memset(&eqe, 0, sizeof eqe); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; eqe.event.port_mgmt_change.port = port; eqe.event.port_mgmt_change.params.port_info.changed_attr = cpu_to_be32((u32) attr); if (attr & MSTR_SM_CHANGE_MASK) { eqe.event.port_mgmt_change.params.port_info.mstr_sm_lid = cpu_to_be16(sm_lid); eqe.event.port_mgmt_change.params.port_info.mstr_sm_sl = sm_sl; } slave_event(dev, ALL_SLAVES, &eqe); return 0; } EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); void mlx4_master_handle_slave_flr(struct work_struct *work) { struct mlx4_mfunc_master_ctx *master = container_of(work, struct mlx4_mfunc_master_ctx, slave_flr_event_work); struct mlx4_mfunc *mfunc = container_of(master, struct mlx4_mfunc, master); struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); struct mlx4_dev *dev = &priv->dev; struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; int i; int err; unsigned long flags; mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); for (i = 0 ; i < dev->num_slaves; i++) { if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { mlx4_dbg(dev, "mlx4_handle_slave_flr: " "clean slave: %d\n", i); mlx4_delete_all_resources_for_slave(dev, i); /*return the slave to running mode*/ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; slave_state[i].is_slave_going_down = 0; spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); /*notify the FW:*/ err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) mlx4_warn(dev, "Failed to notify FW on " "FLR done (slave:%d)\n", i); } } } static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; int cqn; int eqes_found = 0; int set_ci = 0; int port; int slave = 0; int ret; u32 flr_slave; u8 update_slave_state; int i; enum slave_port_gen_event gen_event; unsigned long flags; struct mlx4_vport_state *s_info; while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. */ rmb(); switch (eqe->type) { case MLX4_EVENT_TYPE_COMP: cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; mlx4_cq_completion(dev, cqn); break; case MLX4_EVENT_TYPE_PATH_MIG: case MLX4_EVENT_TYPE_COMM_EST: case MLX4_EVENT_TYPE_SQ_DRAINED: case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: case MLX4_EVENT_TYPE_PATH_MIG_FAILED: case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: mlx4_dbg(dev, "event %d arrived\n", eqe->type); if (mlx4_is_master(dev)) { /* forward only to slave owning the QP */ ret = mlx4_get_slave_from_resource_id(dev, RES_QP, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, &slave); if (ret && ret != -ENOENT) { mlx4_dbg(dev, "QP event %02x(%02x) on " "EQ %d at index %u: could " "not get slave id (%d)\n", eqe->type, eqe->subtype, eq->eqn, eq->cons_index, ret); break; } if (!ret && slave != dev->caps.function) { mlx4_slave_event(dev, slave, eqe); break; } } mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, eqe->type); break; case MLX4_EVENT_TYPE_SRQ_LIMIT: mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", __func__); /* fall through */ case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ ret = mlx4_get_slave_from_resource_id(dev, RES_SRQ, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, &slave); if (ret && ret != -ENOENT) { mlx4_warn(dev, "SRQ event %02x(%02x) " "on EQ %d at index %u: could" " not get slave id (%d)\n", eqe->type, eqe->subtype, eq->eqn, eq->cons_index, ret); break; } mlx4_dbg(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", __func__, slave, be32_to_cpu(eqe->event.srq.srqn), eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { mlx4_dbg(dev, "%s: sending event %02x(%02x) to slave:%d\n", __func__, eqe->type, eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } } mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, eqe->type); break; case MLX4_EVENT_TYPE_CMD: mlx4_cmd_event(dev, be16_to_cpu(eqe->event.cmd.token), eqe->event.cmd.status, be64_to_cpu(eqe->event.cmd.out_param)); break; case MLX4_EVENT_TYPE_PORT_CHANGE: port = be32_to_cpu(eqe->event.port_change.port) >> 28; if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; for (i = 0; i < dev->num_slaves; i++) { if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) continue; mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" " to slave: %d, port:%d\n", __func__, i, port); s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) mlx4_slave_event(dev, i, eqe); } else { /* IB port */ set_and_calc_slave_port_state(dev, i, port, MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, &gen_event); /*we can be in pending state, then do not send port_down event*/ if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { if (i == mlx4_master_func_num(dev)) continue; mlx4_slave_event(dev, i, eqe); } } } } else { mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); mlx4_priv(dev)->sense.do_sense_port[port] = 0; if (!mlx4_is_master(dev)) break; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) for (i = 0; i < dev->num_slaves; i++) { if (i == mlx4_master_func_num(dev)) continue; s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) mlx4_slave_event(dev, i, eqe); } else /* IB port */ /* port-up event will be sent to a slave when the * slave's alias-guid is set. This is done in alias_GUID.c */ set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); } break; case MLX4_EVENT_TYPE_CQ_ERROR: mlx4_warn(dev, "CQ %s on CQN %06x\n", eqe->event.cq_err.syndrome == 1 ? "overrun" : "access violation", be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); if (mlx4_is_master(dev)) { ret = mlx4_get_slave_from_resource_id(dev, RES_CQ, be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff, &slave); if (ret && ret != -ENOENT) { mlx4_dbg(dev, "CQ event %02x(%02x) on " "EQ %d at index %u: could " "not get slave id (%d)\n", eqe->type, eqe->subtype, eq->eqn, eq->cons_index, ret); break; } if (!ret && slave != dev->caps.function) { mlx4_slave_event(dev, slave, eqe); break; } } mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff, eqe->type); break; case MLX4_EVENT_TYPE_EQ_OVERFLOW: mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; case MLX4_EVENT_TYPE_OP_REQUIRED: atomic_inc(&priv->opreq_count); /* FW commands can't be executed from interrupt context working in deferred task */ queue_work(mlx4_wq, &priv->opreq_task); break; case MLX4_EVENT_TYPE_COMM_CHANNEL: if (!mlx4_is_master(dev)) { mlx4_warn(dev, "Received comm channel event " "for non master device\n"); break; } memcpy(&priv->mfunc.master.comm_arm_bit_vector, eqe->event.comm_channel_arm.bit_vec, sizeof eqe->event.comm_channel_arm.bit_vec); if (!queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.comm_work)) mlx4_warn(dev, "Failed to queue comm channel work\n"); if (!queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.arm_comm_work)) mlx4_warn(dev, "Failed to queue arm comm channel work\n"); break; case MLX4_EVENT_TYPE_FLR_EVENT: flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); if (!mlx4_is_master(dev)) { mlx4_warn(dev, "Non-master function received" "FLR event\n"); break; } mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); if (flr_slave >= dev->num_slaves) { mlx4_warn(dev, "Got FLR for unknown function: %d\n", flr_slave); update_slave_state = 0; } else update_slave_state = 1; spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); if (update_slave_state) { priv->mfunc.master.slave_state[flr_slave].active = false; priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; } spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; case MLX4_EVENT_TYPE_FATAL_WARNING: if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { if (mlx4_is_master(dev)) for (i = 0; i < dev->num_slaves; i++) { mlx4_dbg(dev, "%s: Sending " "MLX4_FATAL_WARNING_SUBTYPE_WARMING" " to slave: %d\n", __func__, i); if (i == dev->caps.function) continue; mlx4_slave_event(dev, i, eqe); } mlx4_err(dev, "Temperature Threshold was reached! " "Threshold: %d celsius degrees; " "Current Temperature: %d\n", be16_to_cpu(eqe->event.warming.warning_threshold), be16_to_cpu(eqe->event.warming.current_temperature)); } else mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " "subtype %02x on EQ %d at index %u. owner=%x, " "nent=0x%x, slave=%x, ownership=%s\n", eqe->type, eqe->subtype, eq->eqn, eq->cons_index, eqe->owner, eq->nent, eqe->slave_id, !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? "HW" : "SW"); break; case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, (unsigned long) eqe); break; case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: switch (eqe->subtype) { case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: mlx4_warn(dev, "Bad cable detected on port %u\n", eqe->event.bad_cable.port); break; case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: mlx4_warn(dev, "Unsupported cable detected\n"); break; default: mlx4_dbg(dev, "Unhandled recoverable error event " "detected: %02x(%02x) on EQ %d at index %u. " "owner=%x, nent=0x%x, ownership=%s\n", eqe->type, eqe->subtype, eq->eqn, eq->cons_index, eqe->owner, eq->nent, !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? "HW" : "SW"); break; } break; case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " "index %u. owner=%x, nent=0x%x, slave=%x, " "ownership=%s\n", eqe->type, eqe->subtype, eq->eqn, eq->cons_index, eqe->owner, eq->nent, eqe->slave_id, !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? "HW" : "SW"); break; }; ++eq->cons_index; eqes_found = 1; ++set_ci; /* * The HCA will think the queue has overflowed if we * don't tell it we've been processing events. We * create our EQs with MLX4_NUM_SPARE_EQE extra * entries, so we must update our consumer index at * least that often. */ if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { eq_set_ci(eq, 0); set_ci = 0; } } eq_set_ci(eq, 1); return eqes_found; } static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) { struct mlx4_dev *dev = dev_ptr; struct mlx4_priv *priv = mlx4_priv(dev); int work = 0; int i; writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); return IRQ_RETVAL(work); } static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) { struct mlx4_eq *eq = eq_ptr; struct mlx4_dev *dev = eq->dev; mlx4_eq_int(dev, eq); /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_event_eq_info *event_eq = priv->mfunc.master.slave_state[slave].event_eq; u32 in_modifier = vhcr->in_modifier; u32 eqn = in_modifier & 0x3FF; u64 in_param = vhcr->in_param; int err = 0; int i; if (slave == dev->caps.function) err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (!err) for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) if (in_param & (1LL << i)) event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; return err; } static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, int eq_num) { return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_num_eq_uar(struct mlx4_dev *dev) { /* * Each UAR holds 4 EQ doorbells. To figure out how many UARs * we need to map, take the difference of highest index and * the lowest index we'll use and add 1. */ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; } static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); int index; index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; if (!priv->eq_table.uar_map[index]) { priv->eq_table.uar_map[index] = ioremap(pci_resource_start(dev->pdev, 2) + ((eq->eqn / 4) << PAGE_SHIFT), PAGE_SIZE); if (!priv->eq_table.uar_map[index]) { mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", eq->eqn); return NULL; } } return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); } static void mlx4_unmap_uar(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; for (i = 0; i < mlx4_num_eq_uar(dev); ++i) if (priv->eq_table.uar_map[i]) { iounmap(priv->eq_table.uar_map[i]); priv->eq_table.uar_map[i] = NULL; } } static int mlx4_create_eq(struct mlx4_dev *dev, int nent, u8 intr, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; struct mlx4_eq_context *eq_context; int npages; u64 *dma_list = NULL; dma_addr_t t; u64 mtt_addr; int err = -ENOMEM; int i; eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); /* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes */ npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); if (!dma_list) goto err_out_free; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) goto err_out_free; eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, &t, GFP_KERNEL); if (!eq->page_list[i].buf) goto err_out_free_pages; dma_list[i] = t; eq->page_list[i].map = t; memset(eq->page_list[i].buf, 0, PAGE_SIZE); } eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); if (eq->eqn == -1) goto err_out_free_pages; eq->doorbell = mlx4_get_eq_uar(dev, eq); if (!eq->doorbell) { err = -ENOMEM; goto err_out_free_eq; } err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); if (err) goto err_out_free_eq; err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); if (err) goto err_out_free_mtt; memset(eq_context, 0, sizeof *eq_context); eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | MLX4_EQ_STATE_ARMED); eq_context->log_eq_size = ilog2(eq->nent); eq_context->intr = intr; eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); eq_context->mtt_base_addr_h = mtt_addr >> 32; eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); if (err) { mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); goto err_out_free_mtt; } kfree(dma_list); mlx4_free_cmd_mailbox(dev, mailbox); eq->cons_index = 0; return err; err_out_free_mtt: mlx4_mtt_cleanup(dev, &eq->mtt); err_out_free_eq: mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); err_out_free_pages: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map); mlx4_free_cmd_mailbox(dev, mailbox); err_out_free: kfree(eq->page_list); kfree(dma_list); err_out: return err; } static void mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; int err; int i; /* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes */ int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return; err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); if (err) mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); if (0) { mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { if (i % 4 == 0) pr_cont("[%02x] ", i * 4); pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); if ((i + 1) % 4 == 0) pr_cont("\n"); } } mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map); kfree(eq->page_list); mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); mlx4_free_cmd_mailbox(dev, mailbox); } static void mlx4_free_irqs(struct mlx4_dev *dev) { struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; struct mlx4_priv *priv = mlx4_priv(dev); int i, vec; if (eq_table->have_irq) free_irq(dev->pdev->irq, dev); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) { free_irq(eq_table->eq[i].irq, eq_table->eq + i); eq_table->eq[i].have_irq = 0; } for (i = 0; i < dev->caps.comp_pool; i++) { /* * Freeing the assigned irq's * all bits should be 0, but we need to validate */ if (priv->msix_ctl.pool_bm & 1ULL << i) { /* NO need protecting*/ vec = dev->caps.num_comp_vectors + 1 + i; free_irq(priv->eq_table.eq[vec].irq, &priv->eq_table.eq[vec]); } } kfree(eq_table->irq_names); } static int mlx4_map_clr_int(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); if (!priv->clr_base) { mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); return -ENOMEM; } return 0; } static void mlx4_unmap_clr_int(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); iounmap(priv->clr_base); } int mlx4_alloc_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, sizeof *priv->eq_table.eq, GFP_KERNEL); if (!priv->eq_table.eq) return -ENOMEM; return 0; } void mlx4_free_eq_table(struct mlx4_dev *dev) { kfree(mlx4_priv(dev)->eq_table.eq); } int mlx4_init_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; int i; priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), sizeof *priv->eq_table.uar_map, GFP_KERNEL); if (!priv->eq_table.uar_map) { err = -ENOMEM; goto err_out_free; } - err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, - dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); + err = mlx4_bitmap_init(&priv->eq_table.bitmap, + roundup_pow_of_two(dev->caps.num_eqs), + dev->caps.num_eqs - 1, + dev->caps.reserved_eqs, + roundup_pow_of_two(dev->caps.num_eqs) - + dev->caps.num_eqs); if (err) goto err_out_free; for (i = 0; i < mlx4_num_eq_uar(dev); ++i) priv->eq_table.uar_map[i] = NULL; if (!mlx4_is_slave(dev)) { err = mlx4_map_clr_int(dev); if (err) goto err_out_bitmap; priv->eq_table.clr_mask = swab32(1 << (priv->eq_table.inta_pin & 31)); priv->eq_table.clr_int = priv->clr_base + (priv->eq_table.inta_pin < 32 ? 4 : 0); } priv->eq_table.irq_names = kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + dev->caps.comp_pool), GFP_KERNEL); if (!priv->eq_table.irq_names) { err = -ENOMEM; goto err_out_clr_int; } for (i = 0; i < dev->caps.num_comp_vectors; ++i) { err = mlx4_create_eq(dev, dev->caps.num_cqs - dev->caps.reserved_cqs + MLX4_NUM_SPARE_EQE, (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, &priv->eq_table.eq[i]); if (err) { --i; goto err_out_unmap; } } err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, &priv->eq_table.eq[dev->caps.num_comp_vectors]); if (err) goto err_out_comp; /*if additional completion vectors poolsize is 0 this loop will not run*/ for (i = dev->caps.num_comp_vectors + 1; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { err = mlx4_create_eq(dev, dev->caps.num_cqs - dev->caps.reserved_cqs + MLX4_NUM_SPARE_EQE, (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, &priv->eq_table.eq[i]); if (err) { --i; goto err_out_unmap; } } if (dev->flags & MLX4_FLAG_MSI_X) { const char *eq_name; for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { if (i < dev->caps.num_comp_vectors) { snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "mlx4-comp-%d@pci:%s", i, pci_name(dev->pdev)); } else { snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "mlx4-async@pci:%s", pci_name(dev->pdev)); } eq_name = priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE; err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0, eq_name, priv->eq_table.eq + i); if (err) goto err_out_async; priv->eq_table.eq[i].have_irq = 1; } } else { snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE, DRV_NAME "@pci:%s", pci_name(dev->pdev)); err = request_irq(dev->pdev->irq, mlx4_interrupt, IRQF_SHARED, priv->eq_table.irq_names, dev); if (err) goto err_out_async; priv->eq_table.have_irq = 1; } err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) eq_set_ci(&priv->eq_table.eq[i], 1); return 0; err_out_async: mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); err_out_comp: i = dev->caps.num_comp_vectors - 1; err_out_unmap: while (i >= 0) { mlx4_free_eq(dev, &priv->eq_table.eq[i]); --i; } mlx4_free_irqs(dev); err_out_clr_int: if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); err_out_bitmap: mlx4_unmap_uar(dev); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); err_out_free: kfree(priv->eq_table.uar_map); return err; } void mlx4_cleanup_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); mlx4_free_irqs(dev); for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); mlx4_unmap_uar(dev); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); kfree(priv->eq_table.uar_map); } /* A test that verifies that we can accept interrupts on all * the irq vectors of the device. * Interrupts are checked using the NOP command. */ int mlx4_test_interrupts(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; int err; err = mlx4_NOP(dev); /* When not in MSI_X, there is only one irq to check */ if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) return err; /* A loop over all completion vectors, for each vector we will check * whether it works by mapping command completions to that vector * and performing a NOP command */ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { /* Temporary use polling for command completions */ mlx4_cmd_use_polling(dev); /* Map the new eq to handle all asyncronous events */ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[i].eqn); if (err) { mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); mlx4_cmd_use_events(dev); break; } /* Go back to using events */ mlx4_cmd_use_events(dev); err = mlx4_NOP(dev); } /* Return to default */ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); return err; } EXPORT_SYMBOL(mlx4_test_interrupts); int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) { struct mlx4_priv *priv = mlx4_priv(dev); int vec = 0, err = 0, i; mutex_lock(&priv->msix_ctl.pool_lock); for (i = 0; !vec && i < dev->caps.comp_pool; i++) { if (~priv->msix_ctl.pool_bm & 1ULL << i) { priv->msix_ctl.pool_bm |= 1ULL << i; vec = dev->caps.num_comp_vectors + 1 + i; snprintf(priv->eq_table.irq_names + vec * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "%s", name); err = request_irq(priv->eq_table.eq[vec].irq, mlx4_msi_x_interrupt, 0, &priv->eq_table.irq_names[vec<<5], priv->eq_table.eq + vec); if (err) { /*zero out bit by fliping it*/ priv->msix_ctl.pool_bm ^= 1 << i; vec = 0; continue; /*we dont want to break here*/ } eq_set_ci(&priv->eq_table.eq[vec], 1); } } mutex_unlock(&priv->msix_ctl.pool_lock); if (vec) { *vector = vec; } else { *vector = 0; err = (i == dev->caps.comp_pool) ? -ENOSPC : err; } return err; } EXPORT_SYMBOL(mlx4_assign_eq); void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); /*bm index*/ int i = vec - dev->caps.num_comp_vectors - 1; if (likely(i >= 0)) { /*sanity check , making sure were not trying to free irq's Belonging to a legacy EQ*/ mutex_lock(&priv->msix_ctl.pool_lock); if (priv->msix_ctl.pool_bm & 1ULL << i) { free_irq(priv->eq_table.eq[vec].irq, &priv->eq_table.eq[vec]); priv->msix_ctl.pool_bm &= ~(1ULL << i); } mutex_unlock(&priv->msix_ctl.pool_lock); } } EXPORT_SYMBOL(mlx4_release_eq); Index: stable/10/sys/ofed/drivers/net/mlx4/fw.c =================================================================== --- stable/10/sys/ofed/drivers/net/mlx4/fw.c (revision 318535) +++ stable/10/sys/ofed/drivers/net/mlx4/fw.c (revision 318536) @@ -1,1972 +1,2054 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include "fw.h" #include "icm.h" enum { MLX4_COMMAND_INTERFACE_MIN_REV = 2, MLX4_COMMAND_INTERFACE_MAX_REV = 3, MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, }; extern void __buggy_use_of_MLX4_GET(void); extern void __buggy_use_of_MLX4_PUT(void); static bool enable_qos; module_param(enable_qos, bool, 0444); MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); #define MLX4_GET(dest, source, offset) \ do { \ void *__p = (char *) (source) + (offset); \ switch (sizeof (dest)) { \ case 1: (dest) = *(u8 *) __p; break; \ case 2: (dest) = be16_to_cpup(__p); break; \ case 4: (dest) = be32_to_cpup(__p); break; \ case 8: (dest) = be64_to_cpup(__p); break; \ default: __buggy_use_of_MLX4_GET(); \ } \ } while (0) #define MLX4_PUT(dest, source, offset) \ do { \ void *__d = ((char *) (dest) + (offset)); \ switch (sizeof(source)) { \ case 1: *(u8 *) __d = (source); break; \ case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ default: __buggy_use_of_MLX4_PUT(); \ } \ } while (0) static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) { static const char *fname[] = { [ 0] = "RC transport", [ 1] = "UC transport", [ 2] = "UD transport", [ 3] = "XRC transport", [ 4] = "reliable multicast", [ 5] = "FCoIB support", [ 6] = "SRQ support", [ 7] = "IPoIB checksum offload", [ 8] = "P_Key violation counter", [ 9] = "Q_Key violation counter", [10] = "VMM", [12] = "DPDP", [15] = "Big LSO headers", [16] = "MW support", [17] = "APM support", [18] = "Atomic ops support", [19] = "Raw multicast support", [20] = "Address vector port checking support", [21] = "UD multicast support", [24] = "Demand paging support", [25] = "Router support", [30] = "IBoE support", [32] = "Unicast loopback support", [34] = "FCS header control", [38] = "Wake On LAN support", [40] = "UDP RSS support", [41] = "Unicast VEP steering support", [42] = "Multicast VEP steering support", [44] = "Cross-channel (sync_qp) operations support", [48] = "Counters support", [59] = "Port management change event support", [60] = "eSwitch support", [61] = "64 byte EQE support", [62] = "64 byte CQE support", }; int i; mlx4_dbg(dev, "DEV_CAP flags:\n"); for (i = 0; i < ARRAY_SIZE(fname); ++i) if (fname[i] && (flags & (1LL << i))) mlx4_dbg(dev, " %s\n", fname[i]); } static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) { static const char * const fname[] = { [0] = "RSS support", [1] = "RSS Toeplitz Hash Function support", [2] = "RSS XOR Hash Function support", [3] = "Device manage flow steering support", [4] = "FSM (MAC unti-spoofing) support", [5] = "VST (control vlan insertion/stripping) support", [6] = "Dynamic QP updates support", [7] = "Loopback source checks support", [8] = "Device managed flow steering IPoIB support", [9] = "ETS configuration support", [10] = "ETH backplane autoneg report", [11] = "Ethernet Flow control statistics support", [12] = "Recoverable error events support", [13] = "Time stamping support", [14] = "Report driver version to FW support" }; int i; for (i = 0; i < ARRAY_SIZE(fname); ++i) if (fname[i] && (flags & (1LL << i))) mlx4_dbg(dev, " %s\n", fname[i]); } int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) { struct mlx4_cmd_mailbox *mailbox; u32 *inbox; int err = 0; #define MOD_STAT_CFG_IN_SIZE 0x100 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, MOD_STAT_CFG_IN_SIZE); MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); return err; } +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 in_modifier; + u8 field; + u16 field16; + int err; + +#define QUERY_FUNC_BUS_OFFSET 0x00 +#define QUERY_FUNC_DEVICE_OFFSET 0x01 +#define QUERY_FUNC_FUNCTION_OFFSET 0x01 +#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 +#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 +#define QUERY_FUNC_MAX_EQ_OFFSET 0x06 +#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + in_modifier = slave; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, + MLX4_CMD_QUERY_FUNC, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); + func->bus = field & 0xf; + MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); + func->device = field & 0xf1; + MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); + func->function = field & 0x7; + MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); + func->physical_function = field & 0xf; + MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); + func->rsvd_eqs = field16 & 0xffff; + MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); + func->max_eq = field16 & 0xffff; + MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); + func->rsvd_uars = field & 0x0f; + + mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", + func->bus, func->device, func->function, func->physical_function, + func->max_eq, func->rsvd_eqs, func->rsvd_uars); +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); u8 field, port; u32 size; int err = 0; + struct mlx4_func func; #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c #define QUERY_FUNC_CAP_FMR_FLAG 0x80 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) /* when opcode modifier = 1 */ #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc #define QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET 0xd #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 #define QUERY_FUNC_CAP_PROPS_DEF_COUNTER 0x20 #define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 +#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) if (vhcr->op_modifier == 1) { port = vhcr->in_modifier; /* phys-port = logical-port */ MLX4_PUT(outbox->buf, port, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); field = 0; /* ensure that phy_wqe_gid bit is not set */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET); /* ensure force vlan and force mac bits are not set * and that default counter bit is set */ field = QUERY_FUNC_CAP_PROPS_DEF_COUNTER; /* def counter */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); /* There is always default counter legal or sink counter */ field = mlx4_get_default_counter_index(dev, slave, vhcr->in_modifier); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET); /* size is now the QP number */ size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); size += 2; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); size = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY); size += 2; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); } else if (vhcr->op_modifier == 0) { /* enable rdma and ethernet interfaces, and new quota locations */ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); field = dev->caps.num_ports; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); size = dev->caps.function_caps; /* set PF behaviours */ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); field = 0; /* protected FMR support not available as yet */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); size = dev->caps.num_qps; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); size = dev->caps.num_srqs; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); size = dev->caps.num_cqs; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); - size = dev->caps.num_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || + mlx4_QUERY_FUNC(dev, &func, slave)) { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + dev->caps.num_eqs : + rounddown_pow_of_two(dev->caps.num_eqs); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = dev->caps.reserved_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } else { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + func.max_eq : + rounddown_pow_of_two(func.max_eq); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = func.rsvd_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } - size = dev->caps.reserved_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); - size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); size = dev->caps.num_mpts; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); size = dev->caps.num_mtts; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); size = dev->caps.num_mgms + dev->caps.num_amgms; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); } else err = -EINVAL; return err; } int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, struct mlx4_func_cap *func_cap) { struct mlx4_cmd_mailbox *mailbox; u32 *outbox; u8 field, op_modifier; u32 size; int err = 0, quotas = 0; + u32 in_modifier; op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ + in_modifier = op_modifier ? gen_or_port : + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier, + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, MLX4_CMD_QUERY_FUNC_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) goto out; outbox = mailbox->buf; if (!op_modifier) { MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); err = -EPROTONOSUPPORT; goto out; } func_cap->flags = field; quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); func_cap->num_ports = field; MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); func_cap->pf_context_behaviour = size; if (quotas) { MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); func_cap->qp_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); func_cap->srq_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); func_cap->cq_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); func_cap->mpt_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); func_cap->mtt_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); func_cap->mcg_quota = size & 0xFFFFFF; } else { MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); func_cap->qp_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); func_cap->srq_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); func_cap->cq_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); func_cap->mpt_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); func_cap->mtt_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); func_cap->mcg_quota = size & 0xFFFFFF; } MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); func_cap->max_eq = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); func_cap->reserved_eq = size & 0xFFFFFF; func_cap->extra_flags = 0; /* Mailbox data from 0x6c and onward should only be treated if * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags */ if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; } goto out; } /* logical port query */ if (gen_or_port > dev->caps.num_ports) { err = -EINVAL; goto out; } if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { mlx4_err(dev, "VLAN is enforced on this port\n"); err = -EPROTONOSUPPORT; goto out; } if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) { mlx4_err(dev, "Force mac is enabled on this port\n"); err = -EPROTONOSUPPORT; goto out; } } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) { mlx4_err(dev, "phy_wqe_gid is " "enforced on this ib port\n"); err = -EPROTONOSUPPORT; goto out; } } MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); func_cap->physical_port = field; if (func_cap->physical_port != gen_or_port) { err = -ENOSYS; goto out; } MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); if (field & QUERY_FUNC_CAP_PROPS_DEF_COUNTER) { MLX4_GET(field, outbox, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET); func_cap->def_counter_index = field; } else { func_cap->def_counter_index = MLX4_SINK_COUNTER_INDEX; } MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); func_cap->qp0_proxy_qpn = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); func_cap->qp1_proxy_qpn = size & 0xFFFFFF; /* All other resources are allocated by the master, but we still report * 'num' and 'reserved' capabilities as follows: * - num remains the maximum resource index * - 'num - reserved' is the total available objects of a resource, but * resource indices may be less than 'reserved' * TODO: set per-resource quotas */ out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { struct mlx4_cmd_mailbox *mailbox; u32 *outbox; u8 field; u32 field32, flags, ext_flags; u16 size; u16 stat_rate; int err; int i; #define QUERY_DEV_CAP_OUT_SIZE 0x100 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d #define QUERY_DEV_CAP_RSS_OFFSET 0x2e #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 #define QUERY_DEV_CAP_SYNC_QP_OFFSET 0x42 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b #define QUERY_DEV_CAP_BF_OFFSET 0x4c #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET 0x68 #define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET 0x6c #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_ETS_CFG_OFFSET 0x9c #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); dev_cap->reserved_qps = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); dev_cap->max_qps = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); dev_cap->reserved_srqs = 1 << (field >> 4); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); dev_cap->max_srqs = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); dev_cap->max_cq_sz = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); dev_cap->reserved_cqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); dev_cap->max_cqs = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); dev_cap->max_mpts = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); dev_cap->reserved_eqs = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); dev_cap->max_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); dev_cap->reserved_mtts = 1 << (field >> 4); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); dev_cap->max_mrw_sz = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); dev_cap->reserved_mrws = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); + dev_cap->num_sys_eqs = size & 0xfff; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); dev_cap->max_requester_per_qp = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); dev_cap->max_responder_per_qp = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); field &= 0x1f; if (!field) dev_cap->max_gso_sz = 0; else dev_cap->max_gso_sz = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); if (field & 0x20) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; if (field & 0x10) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; field &= 0xf; if (field) { dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; dev_cap->max_rss_tbl_sz = 1 << field; } else dev_cap->max_rss_tbl_sz = 0; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); dev_cap->max_rdma_global = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); dev_cap->local_ca_ack_delay = field & 0x1f; MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); dev_cap->num_ports = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); dev_cap->max_msg_sz = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); if (field & 0x10) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); dev_cap->fs_max_num_qp_per_entry = field; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); dev_cap->flags = flags | (u64)ext_flags << 32; MLX4_GET(field, outbox, QUERY_DEV_CAP_SYNC_QP_OFFSET); dev_cap->sync_qp = field & 0x10; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); dev_cap->reserved_uars = field >> 4; MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); dev_cap->uar_size = 1 << ((field & 0x3f) + 20); MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); dev_cap->min_page_sz = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); if (field & 0x80) { MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); dev_cap->bf_reg_size = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) field = 3; dev_cap->bf_regs_per_page = 1 << (field & 0x3f); mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); } else { dev_cap->bf_reg_size = 0; mlx4_dbg(dev, "BlueFlame not available\n"); } MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); dev_cap->max_sq_sg = field; MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); dev_cap->max_sq_desc_sz = size; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); dev_cap->max_qp_per_mcg = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); dev_cap->reserved_mgms = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); dev_cap->max_mcgs = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); dev_cap->reserved_pds = field >> 4; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); dev_cap->max_pds = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); dev_cap->reserved_xrcds = field >> 4; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); dev_cap->max_xrcds = 1 << (field & 0x1f); MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); dev_cap->rdmarc_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); dev_cap->qpc_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); dev_cap->aux_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); dev_cap->altc_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); dev_cap->eqc_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); dev_cap->cqc_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); dev_cap->srq_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); dev_cap->cmpt_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); dev_cap->mtt_entry_sz = size; MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); dev_cap->dmpt_entry_sz = size; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); dev_cap->max_srq_sz = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); dev_cap->max_qp_sz = 1 << field; MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); dev_cap->resize_srq = field & 1; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); dev_cap->max_rq_sg = field; MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETS_CFG_OFFSET); if (field32 & (1 << 0)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; if (field32 & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; if (field32 & (1 << 8)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW; if (field32 & (1 << 13)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) MLX4_GET(dev_cap->max_basic_counters, outbox, QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET); /* FW reports 256 however real value is 255 */ dev_cap->max_basic_counters = min_t(u32, dev_cap->max_basic_counters, 255); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT) MLX4_GET(dev_cap->max_extended_counters, outbox, QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; if (field32 & (1 << 19)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK; if (field32 & (1 << 20)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; if (field32 & (1 << 26)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { for (i = 1; i <= dev_cap->num_ports; ++i) { MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); dev_cap->max_vl[i] = field >> 4; MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); dev_cap->ib_mtu[i] = field >> 4; dev_cap->max_port_width[i] = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); dev_cap->max_gids[i] = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); dev_cap->max_pkeys[i] = 1 << (field & 0xf); } } else { #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 #define QUERY_PORT_MTU_OFFSET 0x01 #define QUERY_PORT_ETH_MTU_OFFSET 0x02 #define QUERY_PORT_WIDTH_OFFSET 0x06 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a #define QUERY_PORT_MAX_VL_OFFSET 0x0b #define QUERY_PORT_MAC_OFFSET 0x10 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 for (i = 1; i <= dev_cap->num_ports; ++i) { err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); dev_cap->supported_port_types[i] = field & 3; dev_cap->suggested_type[i] = (field >> 3) & 1; dev_cap->default_sense[i] = (field >> 4) & 1; MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); dev_cap->ib_mtu[i] = field & 0xf; MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); dev_cap->max_port_width[i] = field & 0xf; MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); dev_cap->max_gids[i] = 1 << (field >> 4); dev_cap->max_pkeys[i] = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); dev_cap->max_vl[i] = field & 0xf; MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); dev_cap->log_max_macs[i] = field & 0xf; dev_cap->log_max_vlans[i] = field >> 4; MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); dev_cap->trans_type[i] = field32 >> 24; dev_cap->vendor_oui[i] = field32 & 0xffffff; MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET); MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET); } } mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", dev_cap->bmme_flags, dev_cap->reserved_lkey); /* * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then * we can't use any EQs whose doorbell falls on that page, * even if the EQ itself isn't reserved. */ - dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, - dev_cap->reserved_eqs); + if (dev_cap->num_sys_eqs == 0) + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + else + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; mlx4_dbg(dev, "Max ICM size %lld MB\n", (unsigned long long) dev_cap->max_icm_sz >> 20); mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); - mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", - dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, + dev_cap->eqc_entry_sz); mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", dev_cap->reserved_mrws, dev_cap->reserved_mtts); mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", dev_cap->max_pds, dev_cap->reserved_mgms); mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], dev_cap->max_port_width[1]); mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); mlx4_dbg(dev, "Max basic counters: %d\n", dev_cap->max_basic_counters); mlx4_dbg(dev, "Max extended counters: %d\n", dev_cap->max_extended_counters); mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); dump_dev_cap_flags(dev, dev_cap->flags); dump_dev_cap_flags2(dev, dev_cap->flags2); out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { u64 flags; int err = 0; u8 field; err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; /* add port mng change event capability unconditionally to slaves */ MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); /* For guests, report Blueflame disabled */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); /* turn off device-managed steering capability if not enabled */ if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); } return 0; } int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); u64 def_mac; u8 port_type; u16 short_field; int err; int admin_link_state; #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 #define MLX4_PORT_LINK_UP_MASK 0x80 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (!err && dev->caps.function != slave) { /* set slave default_mac address to be zero MAC */ def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); /* get port type - currently only eth is enabled */ MLX4_GET(port_type, outbox->buf, QUERY_PORT_SUPPORTED_TYPE_OFFSET); /* No link sensing allowed */ port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; /* set port type to currently operating port type */ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) port_type |= MLX4_PORT_LINK_UP_MASK; else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) port_type &= ~MLX4_PORT_LINK_UP_MASK; MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) short_field = mlx4_get_slave_num_gids(dev, slave); else short_field = 1; /* slave max gids */ MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_GID_OFFSET); short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_PKEY_OFFSET); } return err; } int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, int *gid_tbl_len, int *pkey_tbl_len) { struct mlx4_cmd_mailbox *mailbox; u32 *outbox; u16 field; int err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err) goto out; outbox = mailbox->buf; MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); *gid_tbl_len = field; MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); *pkey_tbl_len = field; out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_icm_iter iter; __be64 *pages; int lg; int nent = 0; int i; int err = 0; int ts = 0, tc = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); pages = mailbox->buf; for (mlx4_icm_first(icm, &iter); !mlx4_icm_last(&iter); mlx4_icm_next(&iter)) { /* * We have to pass pages that are aligned to their * size, so find the least significant 1 in the * address or size and use that as our log2 size. */ lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; if (lg < MLX4_ICM_PAGE_SHIFT) { mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", MLX4_ICM_PAGE_SIZE, (unsigned long long) mlx4_icm_addr(&iter), mlx4_icm_size(&iter)); err = -EINVAL; goto out; } for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { if (virt != -1) { pages[nent * 2] = cpu_to_be64(virt); virt += 1 << lg; } pages[nent * 2 + 1] = cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | (lg - MLX4_ICM_PAGE_SHIFT)); ts += 1 << (lg - 10); ++tc; if (++nent == MLX4_MAILBOX_SIZE / 16) { err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; nent = 0; } } } if (nent) err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; switch (op) { case MLX4_CMD_MAP_FA: mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); break; case MLX4_CMD_MAP_ICM_AUX: mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); break; case MLX4_CMD_MAP_ICM: mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", tc, ts, (unsigned long long) virt - (ts << 10)); break; } out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) { return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); } int mlx4_UNMAP_FA(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_RUN_FW(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } int mlx4_QUERY_FW(struct mlx4_dev *dev) { struct mlx4_fw *fw = &mlx4_priv(dev)->fw; struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; struct mlx4_cmd_mailbox *mailbox; u32 *outbox; int err = 0; u64 fw_ver; u16 cmd_if_rev; u8 lg; #define QUERY_FW_OUT_SIZE 0x100 #define QUERY_FW_VER_OFFSET 0x00 #define QUERY_FW_PPF_ID 0x09 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a #define QUERY_FW_MAX_CMD_OFFSET 0x0f #define QUERY_FW_ERR_START_OFFSET 0x30 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 #define QUERY_FW_ERR_BAR_OFFSET 0x3c #define QUERY_FW_SIZE_OFFSET 0x00 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 #define QUERY_FW_COMM_BASE_OFFSET 0x40 #define QUERY_FW_COMM_BAR_OFFSET 0x48 #define QUERY_FW_CLOCK_OFFSET 0x50 #define QUERY_FW_CLOCK_BAR 0x58 mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); /* * FW subminor version is at more significant bits than minor * version, so swap here. */ dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); dev->caps.function = lg; if (mlx4_is_slave(dev)) goto out; MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { mlx4_err(dev, "Installed FW has unsupported " "command interface revision %d.\n", cmd_if_rev); mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", (int) (dev->caps.fw_ver >> 32), (int) (dev->caps.fw_ver >> 16) & 0xffff, (int) dev->caps.fw_ver & 0xffff); mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); err = -ENODEV; goto out; } if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); cmd->max_cmds = 1 << lg; mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", (int) (dev->caps.fw_ver >> 32), (int) (dev->caps.fw_ver >> 16) & 0xffff, (int) dev->caps.fw_ver & 0xffff, cmd_if_rev, cmd->max_cmds); MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); fw->catas_bar = (fw->catas_bar >> 6) * 2; mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); fw->comm_bar = (fw->comm_bar >> 6) * 2; mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", fw->comm_bar, (unsigned long long)fw->comm_base); mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); fw->clock_bar = (fw->clock_bar >> 6) * 2; mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", fw->comm_bar, (unsigned long long)fw->comm_base); /* * Round up number of system pages needed in case * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. */ fw->fw_pages = ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", (unsigned long long) fw->clr_int_base, fw->clr_int_bar); out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { u8 *outbuf; int err; outbuf = outbox->buf; err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; /* for slaves, set pci PPF ID to invalid and zero out everything * else except FW version */ outbuf[0] = outbuf[1] = 0; memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; return 0; } static void get_board_id(void *vsd, char *board_id, char *vsdstr) { int i; #define VSD_OFFSET_SIG1 0x00 #define VSD_OFFSET_SIG2 0xde #define VSD_OFFSET_MLX_BOARD_ID 0xd0 #define VSD_OFFSET_TS_BOARD_ID 0x20 #define VSD_LEN 0xd0 #define VSD_SIGNATURE_TOPSPIN 0x5ad memset(vsdstr, 0, MLX4_VSD_LEN); for (i = 0; i < VSD_LEN / 4; i++) ((u32 *)vsdstr)[i] = swab32(*(u32 *)(vsd + i * 4)); memset(board_id, 0, MLX4_BOARD_ID_LEN); if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); } else { /* * The board ID is a string but the firmware byte * swaps each 4-byte word before passing it back to * us. Therefore we need to swab it before printing. */ for (i = 0; i < 4; ++i) ((u32 *) board_id)[i] = swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); } } int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) { struct mlx4_cmd_mailbox *mailbox; u32 *outbox; int err; #define QUERY_ADAPTER_OUT_SIZE 0x100 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 #define QUERY_ADAPTER_VSD_OFFSET 0x20 #define QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET 0x1e mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); adapter->vsd_vendor_id = be16_to_cpup((u16 *)outbox + QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET / 2); get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, adapter->board_id, adapter->vsd); out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) { struct mlx4_cmd_mailbox *mailbox; __be32 *inbox; u32 mw_enable; int err; #define INIT_HCA_IN_SIZE 0x200 #define INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE 64 #define INIT_HCA_VERSION_OFFSET 0x000 #define INIT_HCA_VERSION 2 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e #define INIT_HCA_FLAGS_OFFSET 0x014 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 #define INIT_HCA_QPC_OFFSET 0x020 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) #define INIT_HCA_MCAST_OFFSET 0x0c0 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 #define INIT_HCA_DRIVER_VERSION_OFFSET 0x140 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) #define INIT_HCA_TPT_OFFSET 0x0f0 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) #define INIT_HCA_TPT_MW_ENABLE (1 << 31) #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) #define INIT_HCA_UAR_OFFSET 0x120 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, INIT_HCA_IN_SIZE); *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); #if defined(__LITTLE_ENDIAN) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); #elif defined(__BIG_ENDIAN) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); #else #error Host endianness not defined #endif /* Check port for UD address vector: */ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); /* Enable IPoIB checksumming if we can: */ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); /* Enable QoS support if module parameter set */ if (enable_qos) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); /* Enable fast drop performance optimization */ if (dev->caps.fast_drop) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 7); /* enable counters */ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); dev->caps.eqe_size = 64; dev->caps.eqe_factor = 1; } else { dev->caps.eqe_size = 32; dev->caps.eqe_factor = 0; } if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); dev->caps.cqe_size = 64; dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; } else { dev->caps.cqe_size = 32; } if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) { strncpy((u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET, DRV_NAME_FOR_FW, INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE - 1); mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", (u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET); } /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); /* steering attributes */ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); /* Enable Ethernet flow steering * with udp unicast and tcp unicast */ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), INIT_HCA_FS_ETH_BITS_OFFSET); MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); /* Enable IPoIB flow steering * with udp unicast and tcp unicast */ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), INIT_HCA_FS_IB_BITS_OFFSET); MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); } else { MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); } /* TPT attributes */ MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); mw_enable = param->mw_enable ? INIT_HCA_TPT_MW_ENABLE : 0; MLX4_PUT(inbox, mw_enable, INIT_HCA_TPT_MW_OFFSET); MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); /* UAR attributes */ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, MLX4_CMD_NATIVE); if (err) mlx4_err(dev, "INIT_HCA returns %d\n", err); mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) { struct mlx4_cmd_mailbox *mailbox; __be32 *outbox; u32 dword_field; u32 mw_enable; int err; u8 byte_field; #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_HCA, MLX4_CMD_TIME_CLASS_B, !mlx4_is_slave(dev)); if (err) goto out; MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); + MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; } else { MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); if (byte_field & 0x8) param->steering_mode = MLX4_STEERING_MODE_B0; else param->steering_mode = MLX4_STEERING_MODE_A0; } /* steering attributes */ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); MLX4_GET(param->log_mc_entry_sz, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); MLX4_GET(param->log_mc_table_sz, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); } else { MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); MLX4_GET(param->log_mc_entry_sz, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); MLX4_GET(param->log_mc_hash_sz, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); MLX4_GET(param->log_mc_table_sz, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); } /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */ MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); if (byte_field & 0x20) /* 64-bytes eqe enabled */ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; if (byte_field & 0x40) /* 64-bytes cqe enabled */ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); MLX4_GET(mw_enable, outbox, INIT_HCA_TPT_MW_OFFSET); param->mw_enable = (mw_enable & INIT_HCA_TPT_MW_ENABLE) == INIT_HCA_TPT_MW_ENABLE; MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); /* UAR attributes */ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 * and real QP0 are active, so that the paravirtualized QP0 is ready * to operate */ static int check_qp0_state(struct mlx4_dev *dev, int function, int port) { struct mlx4_priv *priv = mlx4_priv(dev); /* irrelevant if not infiniband */ if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && priv->mfunc.master.qp0_state[port].qp0_active) return 1; return 0; } int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); int port = vhcr->in_modifier; int err; if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) return 0; if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { /* Enable port only if it was previously disabled */ if (!priv->mfunc.master.init_port_ref[port]) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; } priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); } else { if (slave == mlx4_master_func_num(dev)) { if (check_qp0_state(dev, slave, port) && !priv->mfunc.master.qp0_state[port].port_active) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; priv->mfunc.master.qp0_state[port].port_active = 1; priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); } } else priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); } ++priv->mfunc.master.init_port_ref[port]; return 0; } int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) { struct mlx4_cmd_mailbox *mailbox; u32 *inbox; int err; u32 flags; u16 field; if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { #define INIT_PORT_IN_SIZE 256 #define INIT_PORT_FLAGS_OFFSET 0x00 #define INIT_PORT_FLAG_SIG (1 << 18) #define INIT_PORT_FLAG_NG (1 << 17) #define INIT_PORT_FLAG_G0 (1 << 16) #define INIT_PORT_VL_SHIFT 4 #define INIT_PORT_PORT_WIDTH_SHIFT 8 #define INIT_PORT_MTU_OFFSET 0x04 #define INIT_PORT_MAX_GID_OFFSET 0x06 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a #define INIT_PORT_GUID0_OFFSET 0x10 #define INIT_PORT_NODE_GUID_OFFSET 0x18 #define INIT_PORT_SI_GUID_OFFSET 0x20 mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, INIT_PORT_IN_SIZE); flags = 0; flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); field = 128 << dev->caps.ib_mtu_cap[port]; MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); field = dev->caps.gid_table_len[port]; MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); field = dev->caps.pkey_table_len[port]; MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); } else err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); return err; } EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); int port = vhcr->in_modifier; int err; if (!(priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))) return 0; if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { if (priv->mfunc.master.init_port_ref[port] == 1) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, MLX4_CMD_NATIVE); if (err) return err; } priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); } else { /* infiniband port */ if (slave == mlx4_master_func_num(dev)) { if (!priv->mfunc.master.qp0_state[port].qp0_active && priv->mfunc.master.qp0_state[port].port_active) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, MLX4_CMD_NATIVE); if (err) return err; priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); priv->mfunc.master.qp0_state[port].port_active = 0; } } else priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); } --priv->mfunc.master.init_port_ref[port]; return 0; } int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) { return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, MLX4_CMD_WRAPPED); } EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) { return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, MLX4_CMD_NATIVE); } int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, MLX4_CMD_SET_ICM_SIZE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (ret) return ret; /* * Round up number of system pages needed in case * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. */ *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); return 0; } int mlx4_NOP(struct mlx4_dev *dev) { /* Input modifier of 0x1f means "finish as soon as possible." */ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } int mlx4_query_diag_counters(struct mlx4_dev *dev, int array_length, u8 op_modifier, u32 in_offset[], u32 counter_out[]) { struct mlx4_cmd_mailbox *mailbox; u32 *outbox; int ret; int i; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; ret = mlx4_cmd_box(dev, 0, mailbox->dma, 0, op_modifier, MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (ret) goto out; for (i = 0; i < array_length; i++) { if (in_offset[i] > MLX4_MAILBOX_SIZE) { ret = -EINVAL; goto out; } MLX4_GET(counter_out[i], outbox, in_offset[i]); } out: mlx4_free_cmd_mailbox(dev, mailbox); return ret; } EXPORT_SYMBOL_GPL(mlx4_query_diag_counters); int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { return -EPERM; } #define MLX4_WOL_SETUP_MODE (5 << 28) int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) { u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_wol_read); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) { u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_wol_write); enum { ADD_TO_MCG = 0x26, }; void mlx4_opreq_action(struct work_struct *work) { struct mlx4_priv *priv = container_of(work, struct mlx4_priv, opreq_task); struct mlx4_dev *dev = &priv->dev; int num_tasks = atomic_read(&priv->opreq_count); struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 *outbox; u32 modifier; u16 token; u16 type_m; u16 type; int err; u32 num_qps; struct mlx4_qp qp; int i; u8 rem_mcg; u8 prot; #define GET_OP_REQ_MODIFIER_OFFSET 0x08 #define GET_OP_REQ_TOKEN_OFFSET 0x14 #define GET_OP_REQ_TYPE_OFFSET 0x1a #define GET_OP_REQ_DATA_OFFSET 0x20 mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); return; } outbox = mailbox->buf; while (num_tasks) { err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) { mlx4_err(dev, "Failed to retreive required operation: %d\n", err); return; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); type_m = type >> 12; type &= 0xfff; switch (type) { case ADD_TO_MCG: if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { mlx4_warn(dev, "ADD MCG operation is not supported in " "DEVICE_MANAGED steerign mode\n"); err = EPERM; break; } mgm = (struct mlx4_mgm *) ((u8 *) (outbox) + GET_OP_REQ_DATA_OFFSET); num_qps = be32_to_cpu(mgm->members_count) & MGM_QPN_MASK; rem_mcg = ((u8 *) (&mgm->members_count))[0] & 1; prot = ((u8 *) (&mgm->members_count))[0] >> 6; for (i = 0; i < num_qps; i++) { qp.qpn = be32_to_cpu(mgm->qp[i]); if (rem_mcg) err = mlx4_multicast_detach(dev, &qp, mgm->gid, prot, 0); else err = mlx4_multicast_attach(dev, &qp, mgm->gid, mgm->gid[5] ,0, prot, NULL); if (err) break; } break; default: mlx4_warn(dev, "Bad type for required operation\n"); err = EINVAL; break; } err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) { mlx4_err(dev, "Failed to acknowledge required request: %d\n", err); goto out; } memset(outbox, 0, 0xffc); num_tasks = atomic_dec_return(&priv->opreq_count); } out: mlx4_free_cmd_mailbox(dev, mailbox); } Index: stable/10/sys/ofed/drivers/net/mlx4/fw.h =================================================================== --- stable/10/sys/ofed/drivers/net/mlx4/fw.h (revision 318535) +++ stable/10/sys/ofed/drivers/net/mlx4/fw.h (revision 318536) @@ -1,231 +1,244 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX4_FW_H #define MLX4_FW_H #include "mlx4.h" #include "icm.h" struct mlx4_mod_stat_cfg { u8 log_pg_sz; u8 log_pg_sz_m; }; struct mlx4_dev_cap { int max_srq_sz; int max_qp_sz; int reserved_qps; int max_qps; int reserved_srqs; int max_srqs; int max_cq_sz; int reserved_cqs; int max_cqs; int max_mpts; int reserved_eqs; int max_eqs; + int num_sys_eqs; int reserved_mtts; int max_mrw_sz; int reserved_mrws; int max_mtt_seg; int max_requester_per_qp; int max_responder_per_qp; int max_rdma_global; int local_ca_ack_delay; int num_ports; u32 max_msg_sz; int ib_mtu[MLX4_MAX_PORTS + 1]; int max_port_width[MLX4_MAX_PORTS + 1]; int max_vl[MLX4_MAX_PORTS + 1]; int max_gids[MLX4_MAX_PORTS + 1]; int max_pkeys[MLX4_MAX_PORTS + 1]; u64 def_mac[MLX4_MAX_PORTS + 1]; u16 eth_mtu[MLX4_MAX_PORTS + 1]; int trans_type[MLX4_MAX_PORTS + 1]; int vendor_oui[MLX4_MAX_PORTS + 1]; u16 wavelength[MLX4_MAX_PORTS + 1]; u64 trans_code[MLX4_MAX_PORTS + 1]; u16 stat_rate_support; int fs_log_max_ucast_qp_range_size; int fs_max_num_qp_per_entry; u64 flags; u64 flags2; int reserved_uars; int uar_size; int min_page_sz; int bf_reg_size; int bf_regs_per_page; int max_sq_sg; int max_sq_desc_sz; int max_rq_sg; int max_rq_desc_sz; int max_qp_per_mcg; int reserved_mgms; int max_mcgs; int reserved_pds; int max_pds; int reserved_xrcds; int max_xrcds; int qpc_entry_sz; int rdmarc_entry_sz; int altc_entry_sz; int aux_entry_sz; int srq_entry_sz; int cqc_entry_sz; int eqc_entry_sz; int dmpt_entry_sz; int cmpt_entry_sz; int mtt_entry_sz; int resize_srq; u32 bmme_flags; u32 reserved_lkey; u64 max_icm_sz; int max_gso_sz; int max_rss_tbl_sz; u8 supported_port_types[MLX4_MAX_PORTS + 1]; u8 suggested_type[MLX4_MAX_PORTS + 1]; u8 default_sense[MLX4_MAX_PORTS + 1]; u8 log_max_macs[MLX4_MAX_PORTS + 1]; u8 log_max_vlans[MLX4_MAX_PORTS + 1]; u32 max_basic_counters; u32 sync_qp; u8 timestamp_support; u32 max_extended_counters; }; struct mlx4_func_cap { u8 num_ports; u8 flags; u32 pf_context_behaviour; int qp_quota; int cq_quota; int srq_quota; int mpt_quota; int mtt_quota; int max_eq; int reserved_eq; int mcg_quota; u32 qp0_tunnel_qpn; u32 qp0_proxy_qpn; u32 qp1_tunnel_qpn; u32 qp1_proxy_qpn; u8 physical_port; u8 port_flags; u8 def_counter_index; u8 extra_flags; }; +struct mlx4_func { + int bus; + int device; + int function; + int physical_function; + int rsvd_eqs; + int max_eq; + int rsvd_uars; +}; + struct mlx4_adapter { u16 vsd_vendor_id; char board_id[MLX4_BOARD_ID_LEN]; char vsd[MLX4_VSD_LEN]; u8 inta_pin; }; struct mlx4_init_hca_param { u64 qpc_base; u64 rdmarc_base; u64 auxc_base; u64 altc_base; u64 srqc_base; u64 cqc_base; u64 eqc_base; u64 mc_base; u64 dmpt_base; u64 cmpt_base; u64 mtt_base; u64 global_caps; u16 log_mc_entry_sz; u16 log_mc_hash_sz; u16 hca_core_clock; u8 log_num_qps; u8 log_num_srqs; u8 log_num_cqs; u8 log_num_eqs; + u16 num_sys_eqs; u8 log_rd_per_qp; u8 log_mc_table_sz; u8 log_mpt_sz; u8 log_uar_sz; u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 mw_enable; /* Enable memory windows */ u8 fs_hash_enable_bits; u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; }; struct mlx4_init_ib_param { int port_width; int vl_cap; int mtu_cap; u16 gid_cap; u16 pkey_cap; int set_guid0; u64 guid0; int set_node_guid; u64 node_guid; int set_si_guid; u64 si_guid; }; struct mlx4_set_ib_param { int set_si_guid; int reset_qkey_viol; u64 si_guid; u32 cap_mask; }; int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, struct mlx4_func_cap *func_cap); int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_FA(struct mlx4_dev *dev); int mlx4_RUN_FW(struct mlx4_dev *dev); int mlx4_QUERY_FW(struct mlx4_dev *dev); int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); int mlx4_NOP(struct mlx4_dev *dev); int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); void mlx4_opreq_action(struct work_struct *work); #endif /* MLX4_FW_H */ Index: stable/10/sys/ofed/drivers/net/mlx4/main.c =================================================================== --- stable/10/sys/ofed/drivers/net/mlx4/main.c (revision 318535) +++ stable/10/sys/ofed/drivers/net/mlx4/main.c (revision 318536) @@ -1,3907 +1,3942 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include /* * kmod.h must be included before module.h since it includes (indirectly) sys/module.h * To use the FBSD macro sys/module.h should define MODULE_VERSION before linux/module does. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "mlx4.h" #include "fw.h" #include "icm.h" #include "mlx4_stats.h" /* Mellanox ConnectX HCA low-level driver */ struct workqueue_struct *mlx4_wq; #ifdef CONFIG_MLX4_DEBUG int mlx4_debug_level = 0; module_param_named(debug_level, mlx4_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #endif /* CONFIG_MLX4_DEBUG */ #ifdef CONFIG_PCI_MSI static int msi_x = 1; module_param(msi_x, int, 0444); MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x (non-SRIOV only)"); #else /* CONFIG_PCI_MSI */ #define msi_x (0) #endif /* CONFIG_PCI_MSI */ static int enable_sys_tune = 0; module_param(enable_sys_tune, int, 0444); MODULE_PARM_DESC(enable_sys_tune, "Tune the cpu's for better performance (default 0)"); int mlx4_blck_lb = 1; module_param_named(block_loopback, mlx4_blck_lb, int, 0644); MODULE_PARM_DESC(block_loopback, "Block multicast loopback packets if > 0 " "(default: 1)"); enum { DEFAULT_DOMAIN = 0, BDF_STR_SIZE = 8, /* bb:dd.f- */ DBDF_STR_SIZE = 13 /* mmmm:bb:dd.f- */ }; enum { NUM_VFS, PROBE_VF, PORT_TYPE_ARRAY }; enum { VALID_DATA, INVALID_DATA, INVALID_STR }; struct param_data { int id; struct mlx4_dbdf2val_lst dbdf2val; }; static struct param_data num_vfs = { .id = NUM_VFS, .dbdf2val = { .name = "num_vfs param", .num_vals = 1, .def_val = {0}, .range = {0, MLX4_MAX_NUM_VF} } }; module_param_string(num_vfs, num_vfs.dbdf2val.str, sizeof(num_vfs.dbdf2val.str), 0444); MODULE_PARM_DESC(num_vfs, "Either single value (e.g. '5') to define uniform num_vfs value for all devices functions\n" "\t\tor a string to map device function numbers to their num_vfs values (e.g. '0000:04:00.0-5,002b:1c:0b.a-15').\n" "\t\tHexadecimal digits for the device function (e.g. 002b:1c:0b.a) and decimal for num_vfs value (e.g. 15)."); static struct param_data probe_vf = { .id = PROBE_VF, .dbdf2val = { .name = "probe_vf param", .num_vals = 1, .def_val = {0}, .range = {0, MLX4_MAX_NUM_VF} } }; module_param_string(probe_vf, probe_vf.dbdf2val.str, sizeof(probe_vf.dbdf2val.str), 0444); MODULE_PARM_DESC(probe_vf, "Either single value (e.g. '3') to define uniform number of VFs to probe by the pf driver for all devices functions\n" "\t\tor a string to map device function numbers to their probe_vf values (e.g. '0000:04:00.0-3,002b:1c:0b.a-13').\n" "\t\tHexadecimal digits for the device function (e.g. 002b:1c:0b.a) and decimal for probe_vf value (e.g. 13)."); int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; module_param_named(log_num_mgm_entry_size, mlx4_log_num_mgm_entry_size, int, 0444); MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" " of qp per mcg, for example:" " 10 gives 248.range: 7 <=" " log_num_mgm_entry_size <= 12." " To activate device managed" " flow steering when available, set to -1"); static int high_rate_steer; module_param(high_rate_steer, int, 0444); MODULE_PARM_DESC(high_rate_steer, "Enable steering mode for higher packet rate" " (default off)"); static int fast_drop; module_param_named(fast_drop, fast_drop, int, 0444); MODULE_PARM_DESC(fast_drop, "Enable fast packet drop when no recieve WQEs are posted"); int mlx4_enable_64b_cqe_eqe = 1; module_param_named(enable_64b_cqe_eqe, mlx4_enable_64b_cqe_eqe, int, 0644); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the the FW supports this if non-zero (default: 1)"); #define HCA_GLOBAL_CAP_MASK 0 #define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE static char mlx4_version[] __devinitdata = DRV_NAME ": Mellanox ConnectX VPI driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static int log_num_mac = 7; module_param_named(log_num_mac, log_num_mac, int, 0444); MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); static int log_num_vlan; module_param_named(log_num_vlan, log_num_vlan, int, 0444); MODULE_PARM_DESC(log_num_vlan, "(Obsolete) Log2 max number of VLANs per ETH port (0-7)"); /* Log2 max number of VLANs per ETH port (0-7) */ #define MLX4_LOG_NUM_VLANS 7 int log_mtts_per_seg = ilog2(1); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment " "(0-7) (default: 0)"); static struct param_data port_type_array = { .id = PORT_TYPE_ARRAY, .dbdf2val = { .name = "port_type_array param", .num_vals = 2, .def_val = {MLX4_PORT_TYPE_ETH, MLX4_PORT_TYPE_ETH}, .range = {MLX4_PORT_TYPE_IB, MLX4_PORT_TYPE_NA} } }; module_param_string(port_type_array, port_type_array.dbdf2val.str, sizeof(port_type_array.dbdf2val.str), 0444); MODULE_PARM_DESC(port_type_array, "Either pair of values (e.g. '1,2') to define uniform port1/port2 types configuration for all devices functions\n" "\t\tor a string to map device function numbers to their pair of port types values (e.g. '0000:04:00.0-1;2,002b:1c:0b.a-1;1').\n" "\t\tValid port types: 1-ib, 2-eth, 3-auto, 4-N/A\n" "\t\tIn case that only one port is available use the N/A port type for port2 (e.g '1,4')."); struct mlx4_port_config { struct list_head list; enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; struct pci_dev *pdev; }; #define MLX4_LOG_NUM_MTT 20 /* We limit to 30 as of a bit map issue which uses int and not uint. see mlx4_buddy_init -> bitmap_zero which gets int. */ #define MLX4_MAX_LOG_NUM_MTT 30 static struct mlx4_profile mod_param_profile = { .num_qp = 19, .num_srq = 16, .rdmarc_per_qp = 4, .num_cq = 16, .num_mcg = 13, .num_mpt = 19, .num_mtt_segs = 0, /* max(20, 2*MTTs for host memory)) */ }; module_param_named(log_num_qp, mod_param_profile.num_qp, int, 0444); MODULE_PARM_DESC(log_num_qp, "log maximum number of QPs per HCA (default: 19)"); module_param_named(log_num_srq, mod_param_profile.num_srq, int, 0444); MODULE_PARM_DESC(log_num_srq, "log maximum number of SRQs per HCA " "(default: 16)"); module_param_named(log_rdmarc_per_qp, mod_param_profile.rdmarc_per_qp, int, 0444); MODULE_PARM_DESC(log_rdmarc_per_qp, "log number of RDMARC buffers per QP " "(default: 4)"); module_param_named(log_num_cq, mod_param_profile.num_cq, int, 0444); MODULE_PARM_DESC(log_num_cq, "log maximum number of CQs per HCA (default: 16)"); module_param_named(log_num_mcg, mod_param_profile.num_mcg, int, 0444); MODULE_PARM_DESC(log_num_mcg, "log maximum number of multicast groups per HCA " "(default: 13)"); module_param_named(log_num_mpt, mod_param_profile.num_mpt, int, 0444); MODULE_PARM_DESC(log_num_mpt, "log maximum number of memory protection table entries per " "HCA (default: 19)"); module_param_named(log_num_mtt, mod_param_profile.num_mtt_segs, int, 0444); MODULE_PARM_DESC(log_num_mtt, "log maximum number of memory translation table segments per " "HCA (default: max(20, 2*MTTs for register all of the host memory limited to 30))"); enum { MLX4_IF_STATE_BASIC, MLX4_IF_STATE_EXTENDED }; static inline u64 dbdf_to_u64(int domain, int bus, int dev, int fn) { return (domain << 20) | (bus << 12) | (dev << 4) | fn; } static inline void pr_bdf_err(const char *dbdf, const char *pname) { pr_warn("mlx4_core: '%s' is not valid bdf in '%s'\n", dbdf, pname); } static inline void pr_val_err(const char *dbdf, const char *pname, const char *val) { pr_warn("mlx4_core: value '%s' of bdf '%s' in '%s' is not valid\n" , val, dbdf, pname); } static inline void pr_out_of_range_bdf(const char *dbdf, int val, struct mlx4_dbdf2val_lst *dbdf2val) { pr_warn("mlx4_core: value %d in bdf '%s' of '%s' is out of its valid range (%d,%d)\n" , val, dbdf, dbdf2val->name , dbdf2val->range.min, dbdf2val->range.max); } static inline void pr_out_of_range(struct mlx4_dbdf2val_lst *dbdf2val) { pr_warn("mlx4_core: value of '%s' is out of its valid range (%d,%d)\n" , dbdf2val->name , dbdf2val->range.min, dbdf2val->range.max); } static inline int is_in_range(int val, struct mlx4_range *r) { return (val >= r->min && val <= r->max); } static int update_defaults(struct param_data *pdata) { long int val[MLX4_MAX_BDF_VALS]; int ret; char *t, *p = pdata->dbdf2val.str; char sval[32]; int val_len; if (!strlen(p) || strchr(p, ':') || strchr(p, '.') || strchr(p, ';')) return INVALID_STR; switch (pdata->id) { case PORT_TYPE_ARRAY: t = strchr(p, ','); if (!t || t == p || (t - p) > sizeof(sval)) return INVALID_STR; val_len = t - p; strncpy(sval, p, val_len); sval[val_len] = 0; ret = kstrtol(sval, 0, &val[0]); if (ret == -EINVAL) return INVALID_STR; if (ret || !is_in_range(val[0], &pdata->dbdf2val.range)) { pr_out_of_range(&pdata->dbdf2val); return INVALID_DATA; } ret = kstrtol(t + 1, 0, &val[1]); if (ret == -EINVAL) return INVALID_STR; if (ret || !is_in_range(val[1], &pdata->dbdf2val.range)) { pr_out_of_range(&pdata->dbdf2val); return INVALID_DATA; } pdata->dbdf2val.tbl[0].val[0] = val[0]; pdata->dbdf2val.tbl[0].val[1] = val[1]; break; case NUM_VFS: case PROBE_VF: ret = kstrtol(p, 0, &val[0]); if (ret == -EINVAL) return INVALID_STR; if (ret || !is_in_range(val[0], &pdata->dbdf2val.range)) { pr_out_of_range(&pdata->dbdf2val); return INVALID_DATA; } pdata->dbdf2val.tbl[0].val[0] = val[0]; break; } pdata->dbdf2val.tbl[1].dbdf = MLX4_ENDOF_TBL; return VALID_DATA; } int mlx4_fill_dbdf2val_tbl(struct mlx4_dbdf2val_lst *dbdf2val_lst) { int domain, bus, dev, fn; u64 dbdf; char *p, *t, *v; char tmp[32]; char sbdf[32]; char sep = ','; int j, k, str_size, i = 1; int prfx_size; p = dbdf2val_lst->str; for (j = 0; j < dbdf2val_lst->num_vals; j++) dbdf2val_lst->tbl[0].val[j] = dbdf2val_lst->def_val[j]; dbdf2val_lst->tbl[1].dbdf = MLX4_ENDOF_TBL; str_size = strlen(dbdf2val_lst->str); if (str_size == 0) return 0; while (strlen(p)) { prfx_size = BDF_STR_SIZE; sbdf[prfx_size] = 0; strncpy(sbdf, p, prfx_size); domain = DEFAULT_DOMAIN; if (sscanf(sbdf, "%02x:%02x.%x-", &bus, &dev, &fn) != 3) { prfx_size = DBDF_STR_SIZE; sbdf[prfx_size] = 0; strncpy(sbdf, p, prfx_size); if (sscanf(sbdf, "%04x:%02x:%02x.%x-", &domain, &bus, &dev, &fn) != 4) { pr_bdf_err(sbdf, dbdf2val_lst->name); goto err; } sprintf(tmp, "%04x:%02x:%02x.%x-", domain, bus, dev, fn); } else { sprintf(tmp, "%02x:%02x.%x-", bus, dev, fn); } if (strnicmp(sbdf, tmp, sizeof(tmp))) { pr_bdf_err(sbdf, dbdf2val_lst->name); goto err; } dbdf = dbdf_to_u64(domain, bus, dev, fn); for (j = 1; j < i; j++) if (dbdf2val_lst->tbl[j].dbdf == dbdf) { pr_warn("mlx4_core: in '%s', %s appears multiple times\n" , dbdf2val_lst->name, sbdf); goto err; } if (i >= MLX4_DEVS_TBL_SIZE) { pr_warn("mlx4_core: Too many devices in '%s'\n" , dbdf2val_lst->name); goto err; } p += prfx_size; t = strchr(p, sep); t = t ? t : p + strlen(p); if (p >= t) { pr_val_err(sbdf, dbdf2val_lst->name, ""); goto err; } for (k = 0; k < dbdf2val_lst->num_vals; k++) { char sval[32]; long int val; int ret, val_len; char vsep = ';'; v = (k == dbdf2val_lst->num_vals - 1) ? t : strchr(p, vsep); if (!v || v > t || v == p || (v - p) > sizeof(sval)) { pr_val_err(sbdf, dbdf2val_lst->name, p); goto err; } val_len = v - p; strncpy(sval, p, val_len); sval[val_len] = 0; ret = kstrtol(sval, 0, &val); if (ret) { if (strchr(p, vsep)) pr_warn("mlx4_core: too many vals in bdf '%s' of '%s'\n" , sbdf, dbdf2val_lst->name); else pr_val_err(sbdf, dbdf2val_lst->name, sval); goto err; } if (!is_in_range(val, &dbdf2val_lst->range)) { pr_out_of_range_bdf(sbdf, val, dbdf2val_lst); goto err; } dbdf2val_lst->tbl[i].val[k] = val; p = v; if (p[0] == vsep) p++; } dbdf2val_lst->tbl[i].dbdf = dbdf; if (strlen(p)) { if (p[0] != sep) { pr_warn("mlx4_core: expect separator '%c' before '%s' in '%s'\n" , sep, p, dbdf2val_lst->name); goto err; } p++; } i++; if (i < MLX4_DEVS_TBL_SIZE) dbdf2val_lst->tbl[i].dbdf = MLX4_ENDOF_TBL; } return 0; err: dbdf2val_lst->tbl[1].dbdf = MLX4_ENDOF_TBL; pr_warn("mlx4_core: The value of '%s' is incorrect. The value is discarded!\n" , dbdf2val_lst->name); return -EINVAL; } EXPORT_SYMBOL(mlx4_fill_dbdf2val_tbl); int mlx4_get_val(struct mlx4_dbdf2val *tbl, struct pci_dev *pdev, int idx, int *val) { u64 dbdf; int i = 1; *val = tbl[0].val[idx]; if (!pdev) return -EINVAL; dbdf = dbdf_to_u64(pci_get_domain(pdev->dev.bsddev), pci_get_bus(pdev->dev.bsddev), PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); while ((i < MLX4_DEVS_TBL_SIZE) && (tbl[i].dbdf != MLX4_ENDOF_TBL)) { if (tbl[i].dbdf == dbdf) { *val = tbl[i].val[idx]; return 0; } i++; } return 0; } EXPORT_SYMBOL(mlx4_get_val); static void process_mod_param_profile(struct mlx4_profile *profile) { vm_size_t hwphyssz; hwphyssz = 0; TUNABLE_ULONG_FETCH("hw.realmem", (u_long *) &hwphyssz); profile->num_qp = 1 << mod_param_profile.num_qp; profile->num_srq = 1 << mod_param_profile.num_srq; profile->rdmarc_per_qp = 1 << mod_param_profile.rdmarc_per_qp; profile->num_cq = 1 << mod_param_profile.num_cq; profile->num_mcg = 1 << mod_param_profile.num_mcg; profile->num_mpt = 1 << mod_param_profile.num_mpt; /* * We want to scale the number of MTTs with the size of the * system memory, since it makes sense to register a lot of * memory on a system with a lot of memory. As a heuristic, * make sure we have enough MTTs to register twice the system * memory (with PAGE_SIZE entries). * * This number has to be a power of two and fit into 32 bits * due to device limitations. We cap this at 2^30 as of bit map * limitation to work with int instead of uint (mlx4_buddy_init -> bitmap_zero) * That limits us to 4TB of memory registration per HCA with * 4KB pages, which is probably OK for the next few months. */ if (mod_param_profile.num_mtt_segs) profile->num_mtt_segs = 1 << mod_param_profile.num_mtt_segs; else { profile->num_mtt_segs = roundup_pow_of_two(max_t(unsigned, 1 << (MLX4_LOG_NUM_MTT - log_mtts_per_seg), min(1UL << (MLX4_MAX_LOG_NUM_MTT - log_mtts_per_seg), (hwphyssz << 1) >> log_mtts_per_seg))); /* set the actual value, so it will be reflected to the user using the sysfs */ mod_param_profile.num_mtt_segs = ilog2(profile->num_mtt_segs); } } int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { int i; for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { mlx4_err(dev, "Only same port types supported " "on this HCA, aborting.\n"); return -EINVAL; } } } for (i = 0; i < dev->caps.num_ports; i++) { if (!(port_type[i] & dev->caps.supported_type[i+1])) { mlx4_err(dev, "Requested port type for port %d is not " "supported on this HCA\n", i + 1); return -EINVAL; } } return 0; } static void mlx4_set_port_mask(struct mlx4_dev *dev) { int i; for (i = 1; i <= dev->caps.num_ports; ++i) dev->caps.port_mask[i] = dev->caps.port_type[i]; } +enum { + MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, +}; + +static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err = 0; + struct mlx4_func func; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_QUERY_FUNC(dev, &func, 0); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + dev_cap->max_eqs = func.max_eq; + dev_cap->reserved_eqs = func.rsvd_eqs; + dev_cap->reserved_uars = func.rsvd_uars; + err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; + } + return err; +} + static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; int i; err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); return err; } if (dev_cap->min_page_sz > PAGE_SIZE) { mlx4_err(dev, "HCA minimum page size of %d bigger than " "kernel PAGE_SIZE of %d, aborting.\n", dev_cap->min_page_sz, (int)PAGE_SIZE); return -ENODEV; } if (dev_cap->num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev_cap->num_ports, MLX4_MAX_PORTS); return -ENODEV; } if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " "PCI resource 2 size of 0x%llx, aborting.\n", dev_cap->uar_size, (unsigned long long) pci_resource_len(dev->pdev, 2)); return -ENODEV; } dev->caps.num_ports = dev_cap->num_ports; - dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; + dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; + dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? + dev->caps.num_sys_eqs : + MLX4_MAX_EQ_NUM; for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; /* set gid and pkey table operating lengths by default * to non-sriov values */ dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; dev->caps.def_mac[i] = dev_cap->def_mac[i]; dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; dev->caps.default_sense[i] = dev_cap->default_sense[i]; dev->caps.trans_type[i] = dev_cap->trans_type[i]; dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; dev->caps.wavelength[i] = dev_cap->wavelength[i]; dev->caps.trans_code[i] = dev_cap->trans_code[i]; } dev->caps.uar_page_size = PAGE_SIZE; dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; dev->caps.bf_reg_size = dev_cap->bf_reg_size; dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; dev->caps.max_sq_sg = dev_cap->max_sq_sg; dev->caps.max_rq_sg = dev_cap->max_rq_sg; dev->caps.max_wqes = dev_cap->max_qp_sz; dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; dev->caps.max_srq_wqes = dev_cap->max_srq_sz; dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; dev->caps.reserved_srqs = dev_cap->reserved_srqs; dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; /* * Subtract 1 from the limit because we need to allocate a * spare CQE to enable resizing the CQ */ dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; dev->caps.reserved_mtts = dev_cap->reserved_mtts; dev->caps.reserved_mrws = dev_cap->reserved_mrws; /* The first 128 UARs are used for EQ doorbells */ dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->reserved_xrcds : 0; dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->max_xrcds : 0; dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; dev->caps.flags2 = dev_cap->flags2; dev->caps.bmme_flags = dev_cap->bmme_flags; dev->caps.reserved_lkey = dev_cap->reserved_lkey; dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.cq_timestamp = dev_cap->timestamp_support; dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; /* Don't do sense port on multifunction devices (for now at least) */ if (mlx4_is_mfunc(dev)) dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; dev->caps.fast_drop = fast_drop ? !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FAST_DROP) : 0; for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; if (dev->caps.supported_type[i]) { /* if only ETH is supported - assign ETH */ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; /* if only IB is supported, assign IB */ else if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_IB) dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; else { /* * if IB and ETH are supported, we set the port * type according to user selection of port type; * if there is no user selection, take the FW hint */ int pta; mlx4_get_val(port_type_array.dbdf2val.tbl, pci_physfn(dev->pdev), i - 1, &pta); if (pta == MLX4_PORT_TYPE_NONE) { dev->caps.port_type[i] = dev->caps.suggested_type[i] ? MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; } else if (pta == MLX4_PORT_TYPE_NA) { mlx4_err(dev, "Port %d is valid port. " "It is not allowed to configure its type to N/A(%d)\n", i, MLX4_PORT_TYPE_NA); return -EINVAL; } else { dev->caps.port_type[i] = pta; } } } /* * Link sensing is allowed on the port if 3 conditions are true: * 1. Both protocols are supported on the port. * 2. Different types are supported on the port * 3. FW declared that it supports link sensing */ mlx4_priv(dev)->sense.sense_allowed[i] = ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); /* Disablling auto sense for default Eth ports support */ mlx4_priv(dev)->sense.sense_allowed[i] = 0; /* * If "default_sense" bit is set, we move the port to "AUTO" mode * and perform sense_port FW command to try and set the correct * port type from beginning */ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; mlx4_SENSE_PORT(dev, i, &sensed_port); if (sensed_port != MLX4_PORT_TYPE_NONE) dev->caps.port_type[i] = sensed_port; } else { dev->caps.possible_type[i] = dev->caps.port_type[i]; } if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { dev->caps.log_num_macs = dev_cap->log_max_macs[i]; mlx4_warn(dev, "Requested number of MACs is too much " "for port %d, reducing to %d.\n", i, 1 << dev->caps.log_num_macs); } if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; mlx4_warn(dev, "Requested number of VLANs is too much " "for port %d, reducing to %d.\n", i, 1 << dev->caps.log_num_vlans); } } dev->caps.max_basic_counters = dev_cap->max_basic_counters; dev->caps.max_extended_counters = dev_cap->max_extended_counters; /* support extended counters if available */ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT) dev->caps.max_counters = dev->caps.max_extended_counters; else dev->caps.max_counters = dev->caps.max_basic_counters; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = (1 << dev->caps.log_num_macs) * (1 << dev->caps.log_num_vlans) * dev->caps.num_ports; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; dev->caps.sync_qp = dev_cap->sync_qp; if (dev->pdev->device == 0x1003) dev->caps.cq_flags |= MLX4_DEV_CAP_CQ_FLAG_IO; dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; if (!mlx4_enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { if (dev_cap->flags & (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; } } if ((dev->caps.flags & (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; if (!mlx4_is_slave(dev)) { for (i = 0; i < dev->caps.num_ports; ++i) dev->caps.def_counter_index[i] = i << 1; dev->caps.alloc_res_qp_mask = (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0); } else { dev->caps.alloc_res_qp_mask = 0; } return 0; } /*The function checks if there are live vf, return the num of them*/ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state; int i; int ret = 0; for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { s_state = &priv->mfunc.master.slave_state[i]; if (s_state->active && s_state->last_cmd != MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "%s: slave: %d is still active\n", __func__, i); ret++; } } return ret; } int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) { u32 qk = MLX4_RESERVED_QKEY_BASE; if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || qpn < dev->phys_caps.base_proxy_sqpn) return -EINVAL; if (qpn >= dev->phys_caps.base_tunnel_sqpn) /* tunnel qp */ qk += qpn - dev->phys_caps.base_tunnel_sqpn; else qk += qpn - dev->phys_caps.base_proxy_sqpn; *qkey = qk; return 0; } EXPORT_SYMBOL(mlx4_get_parav_qkey); void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) { struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); if (!mlx4_is_master(dev)) return; priv->virt2phys_pkey[slave][port - 1][i] = val; } EXPORT_SYMBOL(mlx4_sync_pkey_table); void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) { struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); if (!mlx4_is_master(dev)) return; priv->slave_node_guids[slave] = guid; } EXPORT_SYMBOL(mlx4_put_slave_node_guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); if (!mlx4_is_master(dev)) return 0; return priv->slave_node_guids[slave]; } EXPORT_SYMBOL(mlx4_get_slave_node_guid); int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_slave; if (!mlx4_is_master(dev)) return 0; s_slave = &priv->mfunc.master.slave_state[slave]; return !!s_slave->active; } EXPORT_SYMBOL(mlx4_is_slave_active); static void slave_adjust_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *hca_param) { dev->caps.steering_mode = hca_param->steering_mode; if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; else dev->caps.num_qp_per_mgm = 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); mlx4_dbg(dev, "Steering mode is: %s\n", mlx4_steering_mode_str(dev->caps.steering_mode)); } static int mlx4_slave_cap(struct mlx4_dev *dev) { int err; u32 page_size; struct mlx4_dev_cap dev_cap; struct mlx4_func_cap func_cap; struct mlx4_init_hca_param hca_param; int i; memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); if (err) { mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); return err; } /*fail if the hca has an unknown capability */ if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != HCA_GLOBAL_CAP_MASK) { mlx4_err(dev, "Unknown hca global capabilities\n"); return -ENOSYS; } mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; dev->caps.hca_core_clock = hca_param.hca_core_clock; memset(&dev_cap, 0, sizeof(dev_cap)); dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); return err; } err = mlx4_QUERY_FW(dev); if (err) mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); if (!hca_param.mw_enable) { dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; dev->caps.bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; } page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { mlx4_err(dev, "HCA minimum page size of %d bigger than " "kernel PAGE_SIZE of %d, aborting.\n", page_size, (int)PAGE_SIZE); return -ENODEV; } /* slave gets uar page size from QUERY_HCA fw command */ dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); /* TODO: relax this assumption */ if (dev->caps.uar_page_size != PAGE_SIZE) { mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %d\n", dev->caps.uar_page_size, (int)PAGE_SIZE); return -ENODEV; } memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", err); return err; } if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour\n"); return -ENOSYS; } dev->caps.num_ports = func_cap.num_ports; dev->quotas.qp = func_cap.qp_quota; dev->quotas.srq = func_cap.srq_quota; dev->quotas.cq = func_cap.cq_quota; dev->quotas.mpt = func_cap.mpt_quota; dev->quotas.mtt = func_cap.mtt_quota; dev->caps.num_qps = 1 << hca_param.log_num_qps; dev->caps.num_srqs = 1 << hca_param.log_num_srqs; dev->caps.num_cqs = 1 << hca_param.log_num_cqs; dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; dev->caps.num_eqs = func_cap.max_eq; dev->caps.reserved_eqs = func_cap.reserved_eq; dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; if (dev->caps.num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; } dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { err = -ENOMEM; goto err_mem; } for (i = 1; i <= dev->caps.num_ports; ++i) { err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" " port %d, aborting (%d).\n", i, err); goto err_mem; } dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; dev->caps.def_counter_index[i - 1] = func_cap.def_counter_index; dev->caps.port_mask[i] = dev->caps.port_type[i]; err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, &dev->caps.gid_table_len[i], &dev->caps.pkey_table_len[i]); if (err) goto err_mem; } if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->pdev, 2)) { mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " "PCI resource 2 size of 0x%llx, aborting.\n", dev->caps.uar_page_size * dev->caps.num_uars, (unsigned long long) pci_resource_len(dev->pdev, 2)); err = -ENOMEM; goto err_mem; } if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { dev->caps.eqe_size = 64; dev->caps.eqe_factor = 1; } else { dev->caps.eqe_size = 32; dev->caps.eqe_factor = 0; } if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; } else { dev->caps.cqe_size = 32; } dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); slave_adjust_steering_mode(dev, &dev_cap, &hca_param); if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && dev->caps.bf_reg_size) dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; return 0; err_mem: kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); dev->caps.qp0_tunnel = dev->caps.qp0_proxy = dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; return err; } static void mlx4_request_modules(struct mlx4_dev *dev) { int port; int has_ib_port = false; int has_eth_port = false; #define EN_DRV_NAME "mlx4_en" #define IB_DRV_NAME "mlx4_ib" for (port = 1; port <= dev->caps.num_ports; port++) { if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) has_ib_port = true; else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) has_eth_port = true; } if (has_ib_port) request_module_nowait(IB_DRV_NAME); if (has_eth_port) request_module_nowait(EN_DRV_NAME); } /* * Change the port configuration of the device. * Every user of this function must hold the port mutex. */ int mlx4_change_port_types(struct mlx4_dev *dev, enum mlx4_port_type *port_types) { int err = 0; int change = 0; int port; for (port = 0; port < dev->caps.num_ports; port++) { /* Change the port type only if the new type is different * from the current, and not set to Auto */ if (port_types[port] != dev->caps.port_type[port + 1]) change = 1; } if (change) { mlx4_unregister_device(dev); for (port = 1; port <= dev->caps.num_ports; port++) { mlx4_CLOSE_PORT(dev, port); dev->caps.port_type[port] = port_types[port - 1]; err = mlx4_SET_PORT(dev, port, -1); if (err) { mlx4_err(dev, "Failed to set port %d, " "aborting\n", port); goto out; } } mlx4_set_port_mask(dev); err = mlx4_register_device(dev); if (err) { mlx4_err(dev, "Failed to register device\n"); goto out; } mlx4_request_modules(dev); } out: return err; } static ssize_t show_port_type(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_attr); struct mlx4_dev *mdev = info->dev; char type[8]; sprintf(type, "%s", (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? "ib" : "eth"); if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) sprintf(buf, "auto (%s)\n", type); else sprintf(buf, "%s\n", type); return strlen(buf); } static ssize_t set_port_type(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_attr); struct mlx4_dev *mdev = info->dev; struct mlx4_priv *priv = mlx4_priv(mdev); enum mlx4_port_type types[MLX4_MAX_PORTS]; enum mlx4_port_type new_types[MLX4_MAX_PORTS]; int i; int err = 0; if (!strcmp(buf, "ib\n")) info->tmp_type = MLX4_PORT_TYPE_IB; else if (!strcmp(buf, "eth\n")) info->tmp_type = MLX4_PORT_TYPE_ETH; else if (!strcmp(buf, "auto\n")) info->tmp_type = MLX4_PORT_TYPE_AUTO; else { mlx4_err(mdev, "%s is not supported port type\n", buf); return -EINVAL; } if ((info->tmp_type & mdev->caps.supported_type[info->port]) != info->tmp_type) { mlx4_err(mdev, "Requested port type for port %d is not supported on this HCA\n", info->port); return -EINVAL; } mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); /* Possible type is always the one that was delivered */ mdev->caps.possible_type[info->port] = info->tmp_type; for (i = 0; i < mdev->caps.num_ports; i++) { types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : mdev->caps.possible_type[i+1]; if (types[i] == MLX4_PORT_TYPE_AUTO) types[i] = mdev->caps.port_type[i+1]; } if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { for (i = 1; i <= mdev->caps.num_ports; i++) { if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { mdev->caps.possible_type[i] = mdev->caps.port_type[i]; err = -EINVAL; } } } if (err) { mlx4_err(mdev, "Auto sensing is not supported on this HCA. " "Set only 'eth' or 'ib' for both ports " "(should be the same)\n"); goto out; } mlx4_do_sense_ports(mdev, new_types, types); err = mlx4_check_port_params(mdev, new_types); if (err) goto out; /* We are about to apply the changes after the configuration * was verified, no need to remember the temporary types * any more */ for (i = 0; i < mdev->caps.num_ports; i++) priv->port[i + 1].tmp_type = 0; err = mlx4_change_port_types(mdev, new_types); out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); return err ? err : count; } enum ibta_mtu { IB_MTU_256 = 1, IB_MTU_512 = 2, IB_MTU_1024 = 3, IB_MTU_2048 = 4, IB_MTU_4096 = 5 }; static inline int int_to_ibta_mtu(int mtu) { switch (mtu) { case 256: return IB_MTU_256; case 512: return IB_MTU_512; case 1024: return IB_MTU_1024; case 2048: return IB_MTU_2048; case 4096: return IB_MTU_4096; default: return -1; } } static inline int ibta_mtu_to_int(enum ibta_mtu mtu) { switch (mtu) { case IB_MTU_256: return 256; case IB_MTU_512: return 512; case IB_MTU_1024: return 1024; case IB_MTU_2048: return 2048; case IB_MTU_4096: return 4096; default: return -1; } } static ssize_t show_board(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info, board_attr); struct mlx4_dev *mdev = info->dev; return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, mdev->board_id); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info, hca_attr); struct mlx4_dev *mdev = info->dev; return sprintf(buf, "MT%d\n", mdev->pdev->device); } static ssize_t show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info, firmware_attr); struct mlx4_dev *mdev = info->dev; return sprintf(buf, "%d.%d.%d\n", (int)(mdev->caps.fw_ver >> 32), (int)(mdev->caps.fw_ver >> 16) & 0xffff, (int)mdev->caps.fw_ver & 0xffff); } static ssize_t show_port_ib_mtu(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_mtu_attr); struct mlx4_dev *mdev = info->dev; /* When port type is eth, port mtu value isn't used. */ if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) return -EINVAL; sprintf(buf, "%d\n", ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); return strlen(buf); } static ssize_t set_port_ib_mtu(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_mtu_attr); struct mlx4_dev *mdev = info->dev; struct mlx4_priv *priv = mlx4_priv(mdev); int err, port, mtu, ibta_mtu = -1; if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); return -EINVAL; } mtu = (int) simple_strtol(buf, NULL, 0); ibta_mtu = int_to_ibta_mtu(mtu); if (ibta_mtu < 0) { mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); return -EINVAL; } mdev->caps.port_ib_mtu[info->port] = ibta_mtu; mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); mlx4_unregister_device(mdev); for (port = 1; port <= mdev->caps.num_ports; port++) { mlx4_CLOSE_PORT(mdev, port); err = mlx4_SET_PORT(mdev, port, -1); if (err) { mlx4_err(mdev, "Failed to set port %d, " "aborting\n", port); goto err_set_port; } } err = mlx4_register_device(mdev); err_set_port: mutex_unlock(&priv->port_mutex); mlx4_start_sense(mdev); return err ? err : count; } static int mlx4_load_fw(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err, unmap_flag = 0; priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.fw_icm) { mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); return -ENOMEM; } err = mlx4_MAP_FA(dev, priv->fw.fw_icm); if (err) { mlx4_err(dev, "MAP_FA command failed, aborting.\n"); goto err_free; } err = mlx4_RUN_FW(dev); if (err) { mlx4_err(dev, "RUN_FW command failed, aborting.\n"); goto err_unmap_fa; } return 0; err_unmap_fa: unmap_flag = mlx4_UNMAP_FA(dev); if (unmap_flag) pr_warn("mlx4_core: mlx4_UNMAP_FA failed.\n"); err_free: if (!unmap_flag) mlx4_free_icm(dev, priv->fw.fw_icm, 0); return err; } static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, int cmpt_entry_sz) { struct mlx4_priv *priv = mlx4_priv(dev); int err; int num_eqs; err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_QP * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) goto err; err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_SRQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) goto err_qp; err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_CQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) goto err_srq; - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : - dev->caps.num_eqs; + num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, num_eqs, num_eqs, 0, 0); if (err) goto err_cq; return 0; err_cq: mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); err_srq: mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); err_qp: mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); err: return err; } static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *init_hca, u64 icm_size) { struct mlx4_priv *priv = mlx4_priv(dev); u64 aux_pages; int num_eqs; int err, unmap_flag = 0; err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); if (err) { mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); return err; } mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2); priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.aux_icm) { mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); return -ENOMEM; } err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); if (err) { mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); goto err_free_aux; } err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); if (err) { mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); goto err_unmap_aux; } - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : - dev->caps.num_eqs; + num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_cmpt; } /* * Reserved MTT entries must be aligned up to a cacheline * boundary, since the FW will write to them, while the driver * writes to all other MTT entries. (The variable * dev->caps.mtt_entry_sz below is really the MTT segment * size, not the raw entry size) */ dev->caps.reserved_mtts = ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, init_hca->mtt_base, dev->caps.mtt_entry_sz, dev->caps.num_mtts, dev->caps.reserved_mtts, 1, 0); if (err) { mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); goto err_unmap_eq; } err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, init_hca->dmpt_base, dev_cap->dmpt_entry_sz, dev->caps.num_mpts, dev->caps.reserved_mrws, 1, 1); if (err) { mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); goto err_unmap_mtt; } err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, init_hca->qpc_base, dev_cap->qpc_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); goto err_unmap_dmpt; } err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, init_hca->auxc_base, dev_cap->aux_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); goto err_unmap_qp; } err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, init_hca->altc_base, dev_cap->altc_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); goto err_unmap_auxc; } err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, init_hca->rdmarc_base, dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); goto err_unmap_altc; } err = mlx4_init_icm_table(dev, &priv->cq_table.table, init_hca->cqc_base, dev_cap->cqc_entry_sz, dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); goto err_unmap_rdmarc; } err = mlx4_init_icm_table(dev, &priv->srq_table.table, init_hca->srqc_base, dev_cap->srq_entry_sz, dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); goto err_unmap_cq; } /* * For flow steering device managed mode it is required to use * mlx4_init_icm_table. For B0 steering mode it's not strictly * required, but for simplicity just map the whole multicast * group table now. The table isn't very big and it's a lot * easier than trying to track ref counts. */ err = mlx4_init_icm_table(dev, &priv->mcg_table.table, init_hca->mc_base, mlx4_get_mgm_entry_size(dev), dev->caps.num_mgms + dev->caps.num_amgms, dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); if (err) { mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); goto err_unmap_srq; } return 0; err_unmap_srq: mlx4_cleanup_icm_table(dev, &priv->srq_table.table); err_unmap_cq: mlx4_cleanup_icm_table(dev, &priv->cq_table.table); err_unmap_rdmarc: mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); err_unmap_altc: mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); err_unmap_auxc: mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); err_unmap_qp: mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); err_unmap_dmpt: mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); err_unmap_mtt: mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); err_unmap_eq: mlx4_cleanup_icm_table(dev, &priv->eq_table.table); err_unmap_cmpt: mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); err_unmap_aux: unmap_flag = mlx4_UNMAP_ICM_AUX(dev); if (unmap_flag) pr_warn("mlx4_core: mlx4_UNMAP_ICM_AUX failed.\n"); err_free_aux: if (!unmap_flag) mlx4_free_icm(dev, priv->fw.aux_icm, 0); return err; } static void mlx4_free_icms(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); mlx4_cleanup_icm_table(dev, &priv->srq_table.table); mlx4_cleanup_icm_table(dev, &priv->cq_table.table); mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); mlx4_cleanup_icm_table(dev, &priv->eq_table.table); mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); if (!mlx4_UNMAP_ICM_AUX(dev)) mlx4_free_icm(dev, priv->fw.aux_icm, 0); else pr_warn("mlx4_core: mlx4_UNMAP_ICM_AUX failed.\n"); } static void mlx4_slave_exit(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) mlx4_warn(dev, "Failed to close slave function.\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); } static int map_bf_area(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); resource_size_t bf_start; resource_size_t bf_len; int err = 0; if (!dev->caps.bf_reg_size) return -ENXIO; bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); if (!priv->bf_mapping) err = -ENOMEM; return err; } static void unmap_bf_area(struct mlx4_dev *dev) { if (mlx4_priv(dev)->bf_mapping) io_mapping_free(mlx4_priv(dev)->bf_mapping); } int mlx4_read_clock(struct mlx4_dev *dev) { u32 clockhi, clocklo, clockhi1; cycle_t cycles; int i; struct mlx4_priv *priv = mlx4_priv(dev); if (!priv->clock_mapping) return -ENOTSUPP; for (i = 0; i < 10; i++) { clockhi = swab32(readl(priv->clock_mapping)); clocklo = swab32(readl(priv->clock_mapping + 4)); clockhi1 = swab32(readl(priv->clock_mapping)); if (clockhi == clockhi1) break; } cycles = (u64) clockhi << 32 | (u64) clocklo; return cycles; } EXPORT_SYMBOL_GPL(mlx4_read_clock); static int map_internal_clock(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); priv->clock_mapping = ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + priv->fw.clock_offset, MLX4_CLOCK_SIZE); if (!priv->clock_mapping) return -ENOMEM; return 0; } int mlx4_get_internal_clock_params(struct mlx4_dev *dev, struct mlx4_clock_params *params) { struct mlx4_priv *priv = mlx4_priv(dev); if (mlx4_is_slave(dev)) return -ENOTSUPP; if (!params) return -EINVAL; params->bar = priv->fw.clock_bar; params->offset = priv->fw.clock_offset; params->size = MLX4_CLOCK_SIZE; return 0; } EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); static void unmap_internal_clock(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); if (priv->clock_mapping) iounmap(priv->clock_mapping); } static void mlx4_close_hca(struct mlx4_dev *dev) { unmap_internal_clock(dev); unmap_bf_area(dev); if (mlx4_is_slave(dev)) { mlx4_slave_exit(dev); } else { mlx4_CLOSE_HCA(dev, 0); mlx4_free_icms(dev); if (!mlx4_UNMAP_FA(dev)) mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); else pr_warn("mlx4_core: mlx4_UNMAP_FA failed.\n"); } } static int mlx4_init_slave(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); u64 dma = (u64) priv->mfunc.vhcr_dma; int num_of_reset_retries = NUM_OF_RESET_RETRIES; int ret_from_reset = 0; u32 slave_read; u32 cmd_channel_ver; mutex_lock(&priv->cmd.slave_cmd_mutex); priv->cmd.max_cmds = 1; mlx4_warn(dev, "Sending reset\n"); ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME); /* if we are in the middle of flr the slave will try * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { msleep(SLEEP_TIME_IN_RESET); while (ret_from_reset && num_of_reset_retries) { mlx4_warn(dev, "slave is currently in the" "middle of FLR. retrying..." "(try num:%d)\n", (NUM_OF_RESET_RETRIES - num_of_reset_retries + 1)); ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME); num_of_reset_retries = num_of_reset_retries - 1; } } else goto err; } /* check the driver version - the slave I/F revision * must match the master's */ slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); cmd_channel_ver = mlx4_comm_get_version(); if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != MLX4_COMM_GET_IF_REV(slave_read)) { mlx4_err(dev, "slave driver version is not supported" " by the master\n"); goto err; } mlx4_warn(dev, "Sending vhcr0\n"); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, MLX4_COMM_TIME)) goto err; if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, MLX4_COMM_TIME)) goto err; if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, MLX4_COMM_TIME)) goto err; if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) goto err; mutex_unlock(&priv->cmd.slave_cmd_mutex); return 0; err: mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EIO; } static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) { int i; for (i = 1; i <= dev->caps.num_ports; i++) { if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) dev->caps.gid_table_len[i] = mlx4_get_slave_num_gids(dev, 0); else dev->caps.gid_table_len[i] = 1; dev->caps.pkey_table_len[i] = dev->phys_caps.pkey_phys_table_len[i] - 1; } } static int choose_log_fs_mgm_entry_size(int qp_per_entry) { int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; i++) { if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) break; } return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; } static void choose_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int nvfs; mlx4_get_val(num_vfs.dbdf2val.tbl, pci_physfn(dev->pdev), 0, &nvfs); if (high_rate_steer && !mlx4_is_mfunc(dev)) { dev->caps.flags &= ~(MLX4_DEV_CAP_FLAG_VEP_MC_STEER | MLX4_DEV_CAP_FLAG_VEP_UC_STEER); dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_FS_EN; } if (mlx4_log_num_mgm_entry_size == -1 && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || (dev_cap->fs_max_num_qp_per_entry >= (nvfs + 1))) && choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= MLX4_MIN_MGM_LOG_ENTRY_SIZE) { dev->oper_log_mgm_entry_size = choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; } else { if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) dev->caps.steering_mode = MLX4_STEERING_MODE_B0; else { dev->caps.steering_mode = MLX4_STEERING_MODE_A0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " "set to use B0 steering. Falling back to A0 steering mode.\n"); } dev->oper_log_mgm_entry_size = mlx4_log_num_mgm_entry_size > 0 ? mlx4_log_num_mgm_entry_size : MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); } mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " "log_num_mgm_entry_size = %d\n", mlx4_steering_mode_str(dev->caps.steering_mode), dev->oper_log_mgm_entry_size, mlx4_log_num_mgm_entry_size); } static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_dev_cap *dev_cap = NULL; struct mlx4_adapter adapter; struct mlx4_mod_stat_cfg mlx4_cfg; struct mlx4_profile profile; struct mlx4_init_hca_param init_hca; u64 icm_size; int err; if (!mlx4_is_slave(dev)) { err = mlx4_QUERY_FW(dev); if (err) { if (err == -EACCES) mlx4_info(dev, "non-primary physical function, skipping.\n"); else mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); return err; } err = mlx4_load_fw(dev); if (err) { mlx4_err(dev, "Failed to start FW, aborting.\n"); return err; } mlx4_cfg.log_pg_sz_m = 1; mlx4_cfg.log_pg_sz = 0; err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); if (err) mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); dev_cap = kzalloc(sizeof *dev_cap, GFP_KERNEL); if (!dev_cap) { mlx4_err(dev, "Failed to allocate memory for dev_cap\n"); err = -ENOMEM; goto err_stop_fw; } err = mlx4_dev_cap(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); goto err_stop_fw; } choose_steering_mode(dev, dev_cap); if (mlx4_is_master(dev)) mlx4_parav_master_pf_caps(dev); process_mod_param_profile(&profile); if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) profile.num_mcg = MLX4_FS_NUM_MCG; icm_size = mlx4_make_profile(dev, &profile, dev_cap, &init_hca); if ((long long) icm_size < 0) { err = icm_size; goto err_stop_fw; } dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; init_hca.log_uar_sz = ilog2(dev->caps.num_uars); init_hca.uar_page_sz = PAGE_SHIFT - 12; err = mlx4_init_icm(dev, dev_cap, &init_hca, icm_size); if (err) goto err_stop_fw; init_hca.mw_enable = 1; err = mlx4_INIT_HCA(dev, &init_hca); if (err) { mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); goto err_free_icm; } + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, dev_cap); + if (err < 0) { + mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); + goto err_stop_fw; + } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { + dev->caps.num_eqs = dev_cap->max_eqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_uars = dev_cap->reserved_uars; + } + } + /* * Read HCA frequency by QUERY_HCA command */ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { memset(&init_hca, 0, sizeof(init_hca)); err = mlx4_QUERY_HCA(dev, &init_hca); if (err) { mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = init_hca.hca_core_clock; } /* In case we got HCA frequency 0 - disable timestamping * to avoid dividing by zero */ if (!dev->caps.hca_core_clock) { dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, "HCA frequency is 0. Timestamping is not supported."); } else if (map_internal_clock(dev)) { /* Map internal clock, * in case of failure disable timestamping */ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); } } } else { err = mlx4_init_slave(dev); if (err) { mlx4_err(dev, "Failed to initialize slave\n"); return err; } err = mlx4_slave_cap(dev); if (err) { mlx4_err(dev, "Failed to obtain slave caps\n"); goto err_close; } } if (map_bf_area(dev)) mlx4_dbg(dev, "Failed to map blue flame area\n"); /* Only the master set the ports, all the rest got it from it.*/ if (!mlx4_is_slave(dev)) mlx4_set_port_mask(dev); err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); goto unmap_bf; } priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); memcpy(dev->vsd, adapter.vsd, sizeof(dev->vsd)); dev->vsd_vendor_id = adapter.vsd_vendor_id; if (!mlx4_is_slave(dev)) kfree(dev_cap); return 0; unmap_bf: if (!mlx4_is_slave(dev)) unmap_internal_clock(dev); unmap_bf_area(dev); if (mlx4_is_slave(dev)) { kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); } err_close: if (mlx4_is_slave(dev)) mlx4_slave_exit(dev); else mlx4_CLOSE_HCA(dev, 0); err_free_icm: if (!mlx4_is_slave(dev)) mlx4_free_icms(dev); err_stop_fw: if (!mlx4_is_slave(dev)) { if (!mlx4_UNMAP_FA(dev)) mlx4_free_icm(dev, priv->fw.fw_icm, 0); else pr_warn("mlx4_core: mlx4_UNMAP_FA failed.\n"); kfree(dev_cap); } return err; } static int mlx4_init_counters_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int nent_pow2, port_indx, vf_index, num_counters; int res, index = 0; struct counter_index *new_counter_index; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return -ENOENT; if (!mlx4_is_slave(dev) && dev->caps.max_counters == dev->caps.max_extended_counters) { res = mlx4_cmd(dev, MLX4_IF_STATE_EXTENDED, 0, 0, MLX4_CMD_SET_IF_STAT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (res) { mlx4_err(dev, "Failed to set extended counters (err=%d)\n", res); return res; } } mutex_init(&priv->counters_table.mutex); if (mlx4_is_slave(dev)) { for (port_indx = 0; port_indx < dev->caps.num_ports; port_indx++) { INIT_LIST_HEAD(&priv->counters_table.global_port_list[port_indx]); if (dev->caps.def_counter_index[port_indx] != 0xFF) { new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) return -ENOMEM; new_counter_index->index = dev->caps.def_counter_index[port_indx]; list_add_tail(&new_counter_index->list, &priv->counters_table.global_port_list[port_indx]); } } mlx4_dbg(dev, "%s: slave allocated %d counters for %d ports\n", __func__, dev->caps.num_ports, dev->caps.num_ports); return 0; } nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); for (port_indx = 0; port_indx < dev->caps.num_ports; port_indx++) { INIT_LIST_HEAD(&priv->counters_table.global_port_list[port_indx]); /* allocating 2 counters per port for PFs */ /* For the PF, the ETH default counters are 0,2; */ /* and the RoCE default counters are 1,3 */ for (num_counters = 0; num_counters < 2; num_counters++, index++) { new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) return -ENOMEM; new_counter_index->index = index; list_add_tail(&new_counter_index->list, &priv->counters_table.global_port_list[port_indx]); } } if (mlx4_is_master(dev)) { for (vf_index = 0; vf_index < dev->num_vfs; vf_index++) { for (port_indx = 0; port_indx < dev->caps.num_ports; port_indx++) { INIT_LIST_HEAD(&priv->counters_table.vf_list[vf_index][port_indx]); new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) return -ENOMEM; if (index < nent_pow2 - 2) { new_counter_index->index = index; index++; } else { new_counter_index->index = MLX4_SINK_COUNTER_INDEX; } list_add_tail(&new_counter_index->list, &priv->counters_table.vf_list[vf_index][port_indx]); } } res = mlx4_bitmap_init(&priv->counters_table.bitmap, nent_pow2, nent_pow2 - 1, index, 1); mlx4_dbg(dev, "%s: master allocated %d counters for %d VFs\n", __func__, index, dev->num_vfs); } else { res = mlx4_bitmap_init(&priv->counters_table.bitmap, nent_pow2, nent_pow2 - 1, index, 1); mlx4_dbg(dev, "%s: native allocated %d counters for %d ports\n", __func__, index, dev->caps.num_ports); } return 0; } static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i, j; struct counter_index *port, *tmp_port; struct counter_index *vf, *tmp_vf; mutex_lock(&priv->counters_table.mutex); if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) { for (i = 0; i < dev->caps.num_ports; i++) { list_for_each_entry_safe(port, tmp_port, &priv->counters_table.global_port_list[i], list) { list_del(&port->list); kfree(port); } } if (!mlx4_is_slave(dev)) { for (i = 0; i < dev->num_vfs; i++) { for (j = 0; j < dev->caps.num_ports; j++) { list_for_each_entry_safe(vf, tmp_vf, &priv->counters_table.vf_list[i][j], list) { /* clear the counter statistic */ if (__mlx4_clear_if_stat(dev, vf->index)) mlx4_dbg(dev, "%s: reset counter %d failed\n", __func__, vf->index); list_del(&vf->list); kfree(vf); } } } mlx4_bitmap_cleanup(&priv->counters_table.bitmap); } } mutex_unlock(&priv->counters_table.mutex); } int __mlx4_slave_counters_free(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); int i, first; struct counter_index *vf, *tmp_vf; /* clean VF's counters for the next useg */ if (slave > 0 && slave <= dev->num_vfs) { mlx4_dbg(dev, "%s: free counters of slave(%d)\n" , __func__, slave); mutex_lock(&priv->counters_table.mutex); for (i = 0; i < dev->caps.num_ports; i++) { first = 0; list_for_each_entry_safe(vf, tmp_vf, &priv->counters_table.vf_list[slave - 1][i], list) { /* clear the counter statistic */ if (__mlx4_clear_if_stat(dev, vf->index)) mlx4_dbg(dev, "%s: reset counter %d failed\n", __func__, vf->index); if (first++ && vf->index != MLX4_SINK_COUNTER_INDEX) { mlx4_dbg(dev, "%s: delete counter index %d for slave %d and port %d\n" , __func__, vf->index, slave, i + 1); mlx4_bitmap_free(&priv->counters_table.bitmap, vf->index, MLX4_USE_RR); list_del(&vf->list); kfree(vf); } else { mlx4_dbg(dev, "%s: can't delete default counter index %d for slave %d and port %d\n" , __func__, vf->index, slave, i + 1); } } } mutex_unlock(&priv->counters_table.mutex); } return 0; } int __mlx4_counter_alloc(struct mlx4_dev *dev, int slave, int port, u32 *idx) { struct mlx4_priv *priv = mlx4_priv(dev); struct counter_index *new_counter_index; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return -ENOENT; if ((slave > MLX4_MAX_NUM_VF) || (slave < 0) || (port < 0) || (port > MLX4_MAX_PORTS)) { mlx4_dbg(dev, "%s: invalid slave(%d) or port(%d) index\n", __func__, slave, port); return -EINVAL; } /* handle old guest request does not support request by port index */ if (port == 0) { *idx = MLX4_SINK_COUNTER_INDEX; mlx4_dbg(dev, "%s: allocated default counter index %d for slave %d port %d\n" , __func__, *idx, slave, port); return 0; } mutex_lock(&priv->counters_table.mutex); *idx = mlx4_bitmap_alloc(&priv->counters_table.bitmap); /* if no resources return the default counter of the slave and port */ if (*idx == -1) { if (slave == 0) { /* its the ethernet counter ?????? */ new_counter_index = list_entry(priv->counters_table.global_port_list[port - 1].next, struct counter_index, list); } else { new_counter_index = list_entry(priv->counters_table.vf_list[slave - 1][port - 1].next, struct counter_index, list); } *idx = new_counter_index->index; mlx4_dbg(dev, "%s: allocated defualt counter index %d for slave %d port %d\n" , __func__, *idx, slave, port); goto out; } if (slave == 0) { /* native or master */ new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) goto no_mem; new_counter_index->index = *idx; list_add_tail(&new_counter_index->list, &priv->counters_table.global_port_list[port - 1]); } else { new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) goto no_mem; new_counter_index->index = *idx; list_add_tail(&new_counter_index->list, &priv->counters_table.vf_list[slave - 1][port - 1]); } mlx4_dbg(dev, "%s: allocated counter index %d for slave %d port %d\n" , __func__, *idx, slave, port); out: mutex_unlock(&priv->counters_table.mutex); return 0; no_mem: mlx4_bitmap_free(&priv->counters_table.bitmap, *idx, MLX4_USE_RR); mutex_unlock(&priv->counters_table.mutex); *idx = MLX4_SINK_COUNTER_INDEX; mlx4_dbg(dev, "%s: failed err (%d)\n" , __func__, -ENOMEM); return -ENOMEM; } int mlx4_counter_alloc(struct mlx4_dev *dev, u8 port, u32 *idx) { u64 out_param; int err; struct mlx4_priv *priv = mlx4_priv(dev); struct counter_index *new_counter_index, *c_index; if (mlx4_is_mfunc(dev)) { err = mlx4_cmd_imm(dev, 0, &out_param, ((u32) port) << 8 | (u32) RES_COUNTER, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) { *idx = get_param_l(&out_param); if (*idx == MLX4_SINK_COUNTER_INDEX) return -ENOSPC; mutex_lock(&priv->counters_table.mutex); c_index = list_entry(priv->counters_table.global_port_list[port - 1].next, struct counter_index, list); mutex_unlock(&priv->counters_table.mutex); if (c_index->index == *idx) return -EEXIST; if (mlx4_is_slave(dev)) { new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); if (!new_counter_index) { mlx4_counter_free(dev, port, *idx); return -ENOMEM; } new_counter_index->index = *idx; mutex_lock(&priv->counters_table.mutex); list_add_tail(&new_counter_index->list, &priv->counters_table.global_port_list[port - 1]); mutex_unlock(&priv->counters_table.mutex); mlx4_dbg(dev, "%s: allocated counter index %d for port %d\n" , __func__, *idx, port); } } return err; } return __mlx4_counter_alloc(dev, 0, port, idx); } EXPORT_SYMBOL_GPL(mlx4_counter_alloc); void __mlx4_counter_free(struct mlx4_dev *dev, int slave, int port, u32 idx) { /* check if native or slave and deletes acordingly */ struct mlx4_priv *priv = mlx4_priv(dev); struct counter_index *pf, *tmp_pf; struct counter_index *vf, *tmp_vf; int first; if (idx == MLX4_SINK_COUNTER_INDEX) { mlx4_dbg(dev, "%s: try to delete default counter index %d for port %d\n" , __func__, idx, port); return; } if ((slave > MLX4_MAX_NUM_VF) || (slave < 0) || (port < 0) || (port > MLX4_MAX_PORTS)) { mlx4_warn(dev, "%s: deletion failed due to invalid slave(%d) or port(%d) index\n" , __func__, slave, idx); return; } mutex_lock(&priv->counters_table.mutex); if (slave == 0) { first = 0; list_for_each_entry_safe(pf, tmp_pf, &priv->counters_table.global_port_list[port - 1], list) { /* the first 2 counters are reserved */ if (pf->index == idx) { /* clear the counter statistic */ if (__mlx4_clear_if_stat(dev, pf->index)) mlx4_dbg(dev, "%s: reset counter %d failed\n", __func__, pf->index); if (1 < first && idx != MLX4_SINK_COUNTER_INDEX) { list_del(&pf->list); kfree(pf); mlx4_dbg(dev, "%s: delete counter index %d for native device (%d) port %d\n" , __func__, idx, slave, port); mlx4_bitmap_free(&priv->counters_table.bitmap, idx, MLX4_USE_RR); goto out; } else { mlx4_dbg(dev, "%s: can't delete default counter index %d for native device (%d) port %d\n" , __func__, idx, slave, port); goto out; } } first++; } mlx4_dbg(dev, "%s: can't delete counter index %d for native device (%d) port %d\n" , __func__, idx, slave, port); } else { first = 0; list_for_each_entry_safe(vf, tmp_vf, &priv->counters_table.vf_list[slave - 1][port - 1], list) { /* the first element is reserved */ if (vf->index == idx) { /* clear the counter statistic */ if (__mlx4_clear_if_stat(dev, vf->index)) mlx4_dbg(dev, "%s: reset counter %d failed\n", __func__, vf->index); if (first) { list_del(&vf->list); kfree(vf); mlx4_dbg(dev, "%s: delete counter index %d for slave %d port %d\n", __func__, idx, slave, port); mlx4_bitmap_free(&priv->counters_table.bitmap, idx, MLX4_USE_RR); goto out; } else { mlx4_dbg(dev, "%s: can't delete default slave (%d) counter index %d for port %d\n" , __func__, slave, idx, port); goto out; } } first++; } mlx4_dbg(dev, "%s: can't delete slave (%d) counter index %d for port %d\n" , __func__, slave, idx, port); } out: mutex_unlock(&priv->counters_table.mutex); } void mlx4_counter_free(struct mlx4_dev *dev, u8 port, u32 idx) { u64 in_param = 0; struct mlx4_priv *priv = mlx4_priv(dev); struct counter_index *counter, *tmp_counter; int first = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, idx); mlx4_cmd(dev, in_param, ((u32) port) << 8 | (u32) RES_COUNTER, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (mlx4_is_slave(dev) && idx != MLX4_SINK_COUNTER_INDEX) { mutex_lock(&priv->counters_table.mutex); list_for_each_entry_safe(counter, tmp_counter, &priv->counters_table.global_port_list[port - 1], list) { if (counter->index == idx && first++) { list_del(&counter->list); kfree(counter); mlx4_dbg(dev, "%s: delete counter index %d for port %d\n" , __func__, idx, port); mutex_unlock(&priv->counters_table.mutex); return; } } mutex_unlock(&priv->counters_table.mutex); } return; } __mlx4_counter_free(dev, 0, port, idx); } EXPORT_SYMBOL_GPL(mlx4_counter_free); int __mlx4_clear_if_stat(struct mlx4_dev *dev, u8 counter_index) { struct mlx4_cmd_mailbox *if_stat_mailbox = NULL; int err = 0; u32 if_stat_in_mod = (counter_index & 0xff) | (1 << 31); if (counter_index == MLX4_SINK_COUNTER_INDEX) return -EINVAL; if (mlx4_is_slave(dev)) return 0; if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(if_stat_mailbox)) { err = PTR_ERR(if_stat_mailbox); return err; } err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, if_stat_mailbox); return err; } u8 mlx4_get_default_counter_index(struct mlx4_dev *dev, int slave, int port) { struct mlx4_priv *priv = mlx4_priv(dev); struct counter_index *new_counter_index; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) { mlx4_dbg(dev, "%s: return counter index %d for slave %d port (MLX4_PORT_TYPE_IB) %d\n", __func__, MLX4_SINK_COUNTER_INDEX, slave, port); return (u8)MLX4_SINK_COUNTER_INDEX; } mutex_lock(&priv->counters_table.mutex); if (slave == 0) { new_counter_index = list_entry(priv->counters_table.global_port_list[port - 1].next, struct counter_index, list); } else { new_counter_index = list_entry(priv->counters_table.vf_list[slave - 1][port - 1].next, struct counter_index, list); } mutex_unlock(&priv->counters_table.mutex); mlx4_dbg(dev, "%s: return counter index %d for slave %d port %d\n", __func__, new_counter_index->index, slave, port); return (u8)new_counter_index->index; } int mlx4_get_vport_ethtool_stats(struct mlx4_dev *dev, int port, struct mlx4_en_vport_stats *vport_stats, int reset) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *if_stat_mailbox = NULL; union mlx4_counter *counter; int err = 0; u32 if_stat_in_mod; struct counter_index *vport, *tmp_vport; if (!vport_stats) return -EINVAL; if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(if_stat_mailbox)) { err = PTR_ERR(if_stat_mailbox); return err; } mutex_lock(&priv->counters_table.mutex); list_for_each_entry_safe(vport, tmp_vport, &priv->counters_table.global_port_list[port - 1], list) { if (vport->index == MLX4_SINK_COUNTER_INDEX) continue; memset(if_stat_mailbox->buf, 0, sizeof(union mlx4_counter)); if_stat_in_mod = (vport->index & 0xff) | ((reset & 1) << 31); err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) { mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n", __func__, vport->index); goto if_stat_out; } counter = (union mlx4_counter *)if_stat_mailbox->buf; if ((counter->control.cnt_mode & 0xf) == 1) { vport_stats->rx_broadcast_packets += be64_to_cpu(counter->ext.counters[0].IfRxBroadcastFrames); vport_stats->rx_unicast_packets += be64_to_cpu(counter->ext.counters[0].IfRxUnicastFrames); vport_stats->rx_multicast_packets += be64_to_cpu(counter->ext.counters[0].IfRxMulticastFrames); vport_stats->tx_broadcast_packets += be64_to_cpu(counter->ext.counters[0].IfTxBroadcastFrames); vport_stats->tx_unicast_packets += be64_to_cpu(counter->ext.counters[0].IfTxUnicastFrames); vport_stats->tx_multicast_packets += be64_to_cpu(counter->ext.counters[0].IfTxMulticastFrames); vport_stats->rx_broadcast_bytes += be64_to_cpu(counter->ext.counters[0].IfRxBroadcastOctets); vport_stats->rx_unicast_bytes += be64_to_cpu(counter->ext.counters[0].IfRxUnicastOctets); vport_stats->rx_multicast_bytes += be64_to_cpu(counter->ext.counters[0].IfRxMulticastOctets); vport_stats->tx_broadcast_bytes += be64_to_cpu(counter->ext.counters[0].IfTxBroadcastOctets); vport_stats->tx_unicast_bytes += be64_to_cpu(counter->ext.counters[0].IfTxUnicastOctets); vport_stats->tx_multicast_bytes += be64_to_cpu(counter->ext.counters[0].IfTxMulticastOctets); vport_stats->rx_errors += be64_to_cpu(counter->ext.counters[0].IfRxErrorFrames); vport_stats->rx_dropped += be64_to_cpu(counter->ext.counters[0].IfRxNoBufferFrames); vport_stats->tx_errors += be64_to_cpu(counter->ext.counters[0].IfTxDroppedFrames); } } if_stat_out: mutex_unlock(&priv->counters_table.mutex); mlx4_free_cmd_mailbox(dev, if_stat_mailbox); return err; } EXPORT_SYMBOL_GPL(mlx4_get_vport_ethtool_stats); static int mlx4_setup_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; int port; __be32 ib_port_default_caps; err = mlx4_init_uar_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "user access region table (err=%d), aborting.\n", err); return err; } err = mlx4_uar_alloc(dev, &priv->driver_uar); if (err) { mlx4_err(dev, "Failed to allocate driver access region " "(err=%d), aborting.\n", err); goto err_uar_table_free; } priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!priv->kar) { mlx4_err(dev, "Couldn't map kernel access region, " "aborting.\n"); err = -ENOMEM; goto err_uar_free; } err = mlx4_init_pd_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "protection domain table (err=%d), aborting.\n", err); goto err_kar_unmap; } err = mlx4_init_xrcd_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "reliable connection domain table (err=%d), " "aborting.\n", err); goto err_pd_table_free; } err = mlx4_init_mr_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "memory region table (err=%d), aborting.\n", err); goto err_xrcd_table_free; } if (!mlx4_is_slave(dev)) { err = mlx4_init_mcg_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "multicast group table (err=%d), aborting.\n", err); goto err_mr_table_free; } } err = mlx4_init_eq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "event queue table (err=%d), aborting.\n", err); goto err_mcg_table_free; } err = mlx4_cmd_use_events(dev); if (err) { mlx4_err(dev, "Failed to switch to event-driven " "firmware commands (err=%d), aborting.\n", err); goto err_eq_table_free; } err = mlx4_NOP(dev); if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { mlx4_warn(dev, "NOP command failed to generate MSI-X " "interrupt IRQ %d).\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_warn(dev, "Trying again without MSI-X.\n"); } else { mlx4_err(dev, "NOP command failed to generate interrupt " "(IRQ %d), aborting.\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } goto err_cmd_poll; } mlx4_dbg(dev, "NOP command IRQ test passed\n"); err = mlx4_init_cq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "completion queue table (err=%d), aborting.\n", err); goto err_cmd_poll; } err = mlx4_init_srq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "shared receive queue table (err=%d), aborting.\n", err); goto err_cq_table_free; } err = mlx4_init_qp_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "queue pair table (err=%d), aborting.\n", err); goto err_srq_table_free; } err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { mlx4_err(dev, "Failed to initialize counters table (err=%d), " "aborting.\n", err); goto err_qp_table_free; } if (!mlx4_is_slave(dev)) { for (port = 1; port <= dev->caps.num_ports; port++) { ib_port_default_caps = 0; err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); if (err) mlx4_warn(dev, "failed to get port %d default " "ib capabilities (%d). Continuing " "with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; /* initialize per-slave default ib port capabilities */ if (mlx4_is_master(dev)) { int i; for (i = 0; i < dev->num_slaves; i++) { if (i == mlx4_master_func_num(dev)) continue; priv->mfunc.master.slave_state[i].ib_cap_mask[port] = ib_port_default_caps; } } dev->caps.port_ib_mtu[port] = IB_MTU_4096; err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? dev->caps.pkey_table_len[port] : -1); if (err) { mlx4_err(dev, "Failed to set port %d (err=%d), " "aborting\n", port, err); goto err_counters_table_free; } } } return 0; err_counters_table_free: mlx4_cleanup_counters_table(dev); err_qp_table_free: mlx4_cleanup_qp_table(dev); err_srq_table_free: mlx4_cleanup_srq_table(dev); err_cq_table_free: mlx4_cleanup_cq_table(dev); err_cmd_poll: mlx4_cmd_use_polling(dev); err_eq_table_free: mlx4_cleanup_eq_table(dev); err_mcg_table_free: if (!mlx4_is_slave(dev)) mlx4_cleanup_mcg_table(dev); err_mr_table_free: mlx4_cleanup_mr_table(dev); err_xrcd_table_free: mlx4_cleanup_xrcd_table(dev); err_pd_table_free: mlx4_cleanup_pd_table(dev); err_kar_unmap: iounmap(priv->kar); err_uar_free: mlx4_uar_free(dev, &priv->driver_uar); err_uar_table_free: mlx4_cleanup_uar_table(dev); return err; } static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; - int nreq = min_t(int, dev->caps.num_ports * - min_t(int, num_possible_cpus() + 1, MAX_MSIX_P_PORT) - + MSIX_LEGACY_SZ, MAX_MSIX); int err; int i; if (msi_x) { + int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); if (msi_x > 1 && !mlx4_is_mfunc(dev)) nreq = min_t(int, nreq, msi_x); entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) goto no_msi; for (i = 0; i < nreq; ++i) entries[i].entry = i; retry: err = pci_enable_msix(dev->pdev, entries, nreq); if (err) { /* Try again if at least 2 vectors are available */ if (err > 1) { mlx4_info(dev, "Requested %d vectors, " "but only %d MSI-X vectors available, " "trying again\n", nreq, err); nreq = err; goto retry; } kfree(entries); /* if error, or can't alloc even 1 IRQ */ if (err < 0) { mlx4_err(dev, "No IRQs left, device can't " "be started.\n"); goto no_irq; } goto no_msi; } if (nreq < MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { /*Working in legacy mode , all EQ's shared*/ dev->caps.comp_pool = 0; dev->caps.num_comp_vectors = nreq - 1; } else { dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; } for (i = 0; i < nreq; ++i) priv->eq_table.eq[i].irq = entries[i].vector; dev->flags |= MLX4_FLAG_MSI_X; kfree(entries); return; } no_msi: dev->caps.num_comp_vectors = 1; dev->caps.comp_pool = 0; for (i = 0; i < 2; ++i) priv->eq_table.eq[i].irq = dev->pdev->irq; return; no_irq: dev->caps.num_comp_vectors = 0; dev->caps.comp_pool = 0; return; } static void mlx4_init_hca_info(struct mlx4_dev *dev) { struct mlx4_hca_info *info = &mlx4_priv(dev)->hca_info; info->dev = dev; info->firmware_attr = (struct device_attribute)__ATTR(fw_ver, S_IRUGO, show_firmware_version, NULL); if (device_create_file(&dev->pdev->dev, &info->firmware_attr)) mlx4_err(dev, "Failed to add file firmware version"); info->hca_attr = (struct device_attribute)__ATTR(hca, S_IRUGO, show_hca, NULL); if (device_create_file(&dev->pdev->dev, &info->hca_attr)) mlx4_err(dev, "Failed to add file hca type"); info->board_attr = (struct device_attribute)__ATTR(board_id, S_IRUGO, show_board, NULL); if (device_create_file(&dev->pdev->dev, &info->board_attr)) mlx4_err(dev, "Failed to add file board id type"); } static int mlx4_init_port_info(struct mlx4_dev *dev, int port) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; int err = 0; info->dev = dev; info->port = port; if (!mlx4_is_slave(dev)) { mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); info->base_qpn = mlx4_get_base_qpn(dev, port); } sprintf(info->dev_name, "mlx4_port%d", port); info->port_attr.attr.name = info->dev_name; if (mlx4_is_mfunc(dev)) info->port_attr.attr.mode = S_IRUGO; else { info->port_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_attr.store = set_port_type; } info->port_attr.show = show_port_type; sysfs_attr_init(&info->port_attr.attr); err = device_create_file(&dev->pdev->dev, &info->port_attr); if (err) { mlx4_err(dev, "Failed to create file for port %d\n", port); info->port = -1; } sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); info->port_mtu_attr.attr.name = info->dev_mtu_name; if (mlx4_is_mfunc(dev)) info->port_mtu_attr.attr.mode = S_IRUGO; else { info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_mtu_attr.store = set_port_ib_mtu; } info->port_mtu_attr.show = show_port_ib_mtu; sysfs_attr_init(&info->port_mtu_attr.attr); err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); if (err) { mlx4_err(dev, "Failed to create mtu file for port %d\n", port); device_remove_file(&info->dev->pdev->dev, &info->port_attr); info->port = -1; } return err; } static void mlx4_cleanup_hca_info(struct mlx4_hca_info *info) { device_remove_file(&info->dev->pdev->dev, &info->firmware_attr); device_remove_file(&info->dev->pdev->dev, &info->board_attr); device_remove_file(&info->dev->pdev->dev, &info->hca_attr); } static void mlx4_cleanup_port_info(struct mlx4_port_info *info) { if (info->port < 0) return; device_remove_file(&info->dev->pdev->dev, &info->port_attr); device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); } static int mlx4_init_steering(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int num_entries = dev->caps.num_ports; int i, j; priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); if (!priv->steer) return -ENOMEM; for (i = 0; i < num_entries; i++) for (j = 0; j < MLX4_NUM_STEERS; j++) { INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); } return 0; } static void mlx4_clear_steering(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_steer_index *entry, *tmp_entry; struct mlx4_promisc_qp *pqp, *tmp_pqp; int num_entries = dev->caps.num_ports; int i, j; for (i = 0; i < num_entries; i++) { for (j = 0; j < MLX4_NUM_STEERS; j++) { list_for_each_entry_safe(pqp, tmp_pqp, &priv->steer[i].promisc_qps[j], list) { list_del(&pqp->list); kfree(pqp); } list_for_each_entry_safe(entry, tmp_entry, &priv->steer[i].steer_entries[j], list) { list_del(&entry->list); list_for_each_entry_safe(pqp, tmp_pqp, &entry->duplicates, list) { list_del(&pqp->list); kfree(pqp); } kfree(entry); } } } kfree(priv->steer); } static int extended_func_num(struct pci_dev *pdev) { return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); } #define MLX4_OWNER_BASE 0x8069c #define MLX4_OWNER_SIZE 4 static int mlx4_get_ownership(struct mlx4_dev *dev) { void __iomem *owner; u32 ret; if (pci_channel_offline(dev->pdev)) return -EIO; owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, MLX4_OWNER_SIZE); if (!owner) { mlx4_err(dev, "Failed to obtain ownership bit\n"); return -ENOMEM; } ret = readl(owner); iounmap(owner); return (int) !!ret; } static void mlx4_free_ownership(struct mlx4_dev *dev) { void __iomem *owner; if (pci_channel_offline(dev->pdev)) return; owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, MLX4_OWNER_SIZE); if (!owner) { mlx4_err(dev, "Failed to obtain ownership bit\n"); return; } writel(0, owner); msleep(1000); iounmap(owner); } static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) { struct mlx4_priv *priv; struct mlx4_dev *dev; int err; int port; int nvfs, prb_vf; pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, " "aborting.\n"); return err; } mlx4_get_val(num_vfs.dbdf2val.tbl, pci_physfn(pdev), 0, &nvfs); mlx4_get_val(probe_vf.dbdf2val.tbl, pci_physfn(pdev), 0, &prb_vf); if (nvfs > MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, "There are more VF's (%d) than allowed(%d)\n", nvfs, MLX4_MAX_NUM_VF); return -EINVAL; } if (nvfs < 0) { dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); return -EINVAL; } /* * Check for BARs. */ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing DCS, aborting." "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%x)\n", pci_dev_data, pci_resource_flags(pdev, 0)); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing UAR, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); goto err_disable_pdev; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " "consistent PCI DMA mask.\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " "aborting.\n"); goto err_release_regions; } } /* Allow large DMA segments, up to the firmware limit of 1 GB */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_release_regions; } dev = &priv->dev; dev->pdev = pdev; INIT_LIST_HEAD(&priv->dev_list); INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); mutex_init(&priv->port_mutex); INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); INIT_LIST_HEAD(&priv->bf_list); mutex_init(&priv->bf_mutex); dev->rev_id = pdev->revision; dev->numa_node = dev_to_node(&pdev->dev); /* Detect if this device is a virtual function */ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { /* When acting as pf, we normally skip vfs unless explicitly * requested to probe them. */ if (nvfs && extended_func_num(pdev) > prb_vf) { mlx4_warn(dev, "Skipping virtual function:%d\n", extended_func_num(pdev)); err = -ENODEV; goto err_free_dev; } mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); dev->flags |= MLX4_FLAG_SLAVE; } else { /* We reset the device and enable SRIOV only for physical * devices. Try to claim ownership on the device; * if already taken, skip -- do not allow multiple PFs */ err = mlx4_get_ownership(dev); if (err) { if (err < 0) goto err_free_dev; else { mlx4_warn(dev, "Multiple PFs not yet supported." " Skipping PF.\n"); err = -EINVAL; goto err_free_dev; } } if (nvfs) { mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", nvfs); err = pci_enable_sriov(pdev, nvfs); if (err) { mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", err); err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); dev->flags |= MLX4_FLAG_SRIOV | MLX4_FLAG_MASTER; dev->num_vfs = nvfs; } } atomic_set(&priv->opreq_count, 0); INIT_WORK(&priv->opreq_task, mlx4_opreq_action); /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left * the HCA in an undefined state. */ err = mlx4_reset(dev); if (err) { mlx4_err(dev, "Failed to reset HCA, aborting.\n"); goto err_sriov; } } slave_start: err = mlx4_cmd_init(dev); if (err) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); goto err_sriov; } /* In slave functions, the communication channel must be initialized * before posting commands. Also, init num_slaves before calling * mlx4_init_hca */ if (mlx4_is_mfunc(dev)) { if (mlx4_is_master(dev)) dev->num_slaves = MLX4_MAX_NUM_SLAVES; else { dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { mlx4_err(dev, "Failed to init slave mfunc" " interface, aborting.\n"); goto err_cmd; } } } err = mlx4_init_hca(dev); if (err) { if (err == -EACCES) { /* Not primary Physical function * Running in slave mode */ mlx4_cmd_cleanup(dev); dev->flags |= MLX4_FLAG_SLAVE; dev->flags &= ~MLX4_FLAG_MASTER; goto slave_start; } else goto err_mfunc; } /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { err = mlx4_multi_func_init(dev); if (err) { mlx4_err(dev, "Failed to init master mfunc" "interface, aborting.\n"); goto err_close; } } err = mlx4_alloc_eq_table(dev); if (err) goto err_master_mfunc; priv->msix_ctl.pool_bm = 0; mutex_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); /* no MSIX and no shared IRQ */ if (!dev->caps.num_comp_vectors && !dev->caps.comp_pool) { err = -ENOSPC; goto err_free_eq; } if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { err = -ENOSYS; mlx4_err(dev, "INTx is not supported in multi-function mode." " aborting.\n"); goto err_free_eq; } if (!mlx4_is_slave(dev)) { err = mlx4_init_steering(dev); if (err) goto err_free_eq; } err = mlx4_setup_hca(dev); if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && !mlx4_is_mfunc(dev)) { dev->flags &= ~MLX4_FLAG_MSI_X; dev->caps.num_comp_vectors = 1; dev->caps.comp_pool = 0; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); } if (err) goto err_steer; mlx4_init_quotas(dev); mlx4_init_hca_info(dev); for (port = 1; port <= dev->caps.num_ports; port++) { err = mlx4_init_port_info(dev, port); if (err) goto err_port; } err = mlx4_register_device(dev); if (err) goto err_port; mlx4_request_modules(dev); mlx4_sense_init(dev); mlx4_start_sense(dev); priv->pci_dev_data = pci_dev_data; pci_set_drvdata(pdev, dev); return 0; err_port: for (--port; port >= 1; --port) mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_counters_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev); err_steer: if (!mlx4_is_slave(dev)) mlx4_clear_steering(dev); err_free_eq: mlx4_free_eq_table(dev); err_master_mfunc: if (mlx4_is_master(dev)) { mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); mlx4_multi_func_cleanup(dev); } if (mlx4_is_slave(dev)) { kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); } err_close: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); mlx4_close_hca(dev); err_mfunc: if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); err_cmd: mlx4_cmd_cleanup(dev); err_sriov: if (dev->flags & MLX4_FLAG_SRIOV) pci_disable_sriov(pdev); if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); err_free_dev: kfree(priv); err_release_regions: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static int __devinit mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { device_set_desc(pdev->dev.bsddev, mlx4_version); return __mlx4_init_one(pdev, id->driver_data); } static void mlx4_remove_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); int p; if (dev) { /* in SRIOV it is not allowed to unload the pf's * driver while there are alive vf's */ if (mlx4_is_master(dev)) { if (mlx4_how_many_lives_vf(dev)) mlx4_err(dev, "Removing PF when there are assigned VF's !!!\n"); } mlx4_stop_sense(dev); mlx4_unregister_device(dev); mlx4_cleanup_hca_info(&priv->hca_info); for (p = 1; p <= dev->caps.num_ports; p++) { mlx4_cleanup_port_info(&priv->port[p]); mlx4_CLOSE_PORT(dev, p); } if (mlx4_is_master(dev)) mlx4_free_resource_tracker(dev, RES_TR_FREE_SLAVES_ONLY); mlx4_cleanup_counters_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); if (mlx4_is_master(dev)) mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); if (!mlx4_is_slave(dev)) mlx4_clear_steering(dev); mlx4_free_eq_table(dev); if (mlx4_is_master(dev)) mlx4_multi_func_cleanup(dev); mlx4_close_hca(dev); if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); mlx4_cmd_cleanup(dev); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); if (dev->flags & MLX4_FLAG_SRIOV) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); } if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); kfree(priv); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } static int restore_current_port_types(struct mlx4_dev *dev, enum mlx4_port_type *types, enum mlx4_port_type *poss_types) { struct mlx4_priv *priv = mlx4_priv(dev); int err, i; mlx4_stop_sense(dev); mutex_lock(&priv->port_mutex); for (i = 0; i < dev->caps.num_ports; i++) dev->caps.possible_type[i + 1] = poss_types[i]; err = mlx4_change_port_types(dev, types); mlx4_start_sense(dev); mutex_unlock(&priv->port_mutex); return err; } int mlx4_restart_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); enum mlx4_port_type curr_type[MLX4_MAX_PORTS]; enum mlx4_port_type poss_type[MLX4_MAX_PORTS]; int pci_dev_data, err, i; pci_dev_data = priv->pci_dev_data; for (i = 0; i < dev->caps.num_ports; i++) { curr_type[i] = dev->caps.port_type[i + 1]; poss_type[i] = dev->caps.possible_type[i + 1]; } mlx4_remove_one(pdev); err = __mlx4_init_one(pdev, pci_dev_data); if (err) return err; dev = pci_get_drvdata(pdev); err = restore_current_port_types(dev, curr_type, poss_type); if (err) mlx4_err(dev, "mlx4_restart_one: could not restore original port types (%d)\n", err); return 0; } static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { /* MT25408 "Hermon" SDR */ { PCI_VDEVICE(MELLANOX, 0x6340), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" DDR */ { PCI_VDEVICE(MELLANOX, 0x634a), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" QDR */ { PCI_VDEVICE(MELLANOX, 0x6354), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" DDR PCIe gen2 */ { PCI_VDEVICE(MELLANOX, 0x6732), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" QDR PCIe gen2 */ { PCI_VDEVICE(MELLANOX, 0x673c), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" EN 10GigE */ { PCI_VDEVICE(MELLANOX, 0x6368), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ { PCI_VDEVICE(MELLANOX, 0x6750), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ { PCI_VDEVICE(MELLANOX, 0x6372), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ { PCI_VDEVICE(MELLANOX, 0x675a), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ { PCI_VDEVICE(MELLANOX, 0x6764), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ { PCI_VDEVICE(MELLANOX, 0x6746), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ { PCI_VDEVICE(MELLANOX, 0x676e), .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25400 Family [ConnectX-2 Virtual Function] */ { PCI_VDEVICE(MELLANOX, 0x1002), .driver_data = MLX4_PCI_DEV_IS_VF }, /* MT27500 Family [ConnectX-3] */ { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ { PCI_VDEVICE(MELLANOX, 0x1004), .driver_data = MLX4_PCI_DEV_IS_VF }, { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ { 0, } }; MODULE_DEVICE_TABLE(pci, mlx4_pci_table); static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { mlx4_remove_one(pdev); return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { int ret = __mlx4_init_one(pdev, 0); return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static const struct pci_error_handlers mlx4_err_handler = { .error_detected = mlx4_pci_err_detected, .slot_reset = mlx4_pci_slot_reset, }; static int suspend(struct pci_dev *pdev, pm_message_t state) { mlx4_remove_one(pdev); return 0; } static int resume(struct pci_dev *pdev) { return __mlx4_init_one(pdev, 0); } static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, .remove = __devexit_p(mlx4_remove_one), .suspend = suspend, .resume = resume, .err_handler = &mlx4_err_handler, }; static int __init mlx4_verify_params(void) { int status; status = update_defaults(&port_type_array); if (status == INVALID_STR) { if (mlx4_fill_dbdf2val_tbl(&port_type_array.dbdf2val)) return -1; } else if (status == INVALID_DATA) { return -1; } status = update_defaults(&num_vfs); if (status == INVALID_STR) { if (mlx4_fill_dbdf2val_tbl(&num_vfs.dbdf2val)) return -1; } else if (status == INVALID_DATA) { return -1; } status = update_defaults(&probe_vf); if (status == INVALID_STR) { if (mlx4_fill_dbdf2val_tbl(&probe_vf.dbdf2val)) return -1; } else if (status == INVALID_DATA) { return -1; } if (msi_x < 0) { pr_warn("mlx4_core: bad msi_x: %d\n", msi_x); return -1; } if ((log_num_mac < 0) || (log_num_mac > 7)) { pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; } if (log_num_vlan != 0) pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", MLX4_LOG_NUM_VLANS); if (mlx4_set_4k_mtu != -1) pr_warning("mlx4_core: set_4k_mtu - obsolete module param\n"); if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) { pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); return -1; } if (mlx4_log_num_mgm_entry_size != -1 && (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " "in legal range (-1 or %d..%d)\n", mlx4_log_num_mgm_entry_size, MLX4_MIN_MGM_LOG_ENTRY_SIZE, MLX4_MAX_MGM_LOG_ENTRY_SIZE); return -1; } if (mod_param_profile.num_qp < 18 || mod_param_profile.num_qp > 23) { pr_warning("mlx4_core: bad log_num_qp: %d\n", mod_param_profile.num_qp); return -1; } if (mod_param_profile.num_srq < 10) { pr_warning("mlx4_core: too low log_num_srq: %d\n", mod_param_profile.num_srq); return -1; } if (mod_param_profile.num_cq < 10) { pr_warning("mlx4_core: too low log_num_cq: %d\n", mod_param_profile.num_cq); return -1; } if (mod_param_profile.num_mpt < 10) { pr_warning("mlx4_core: too low log_num_mpt: %d\n", mod_param_profile.num_mpt); return -1; } if (mod_param_profile.num_mtt_segs && mod_param_profile.num_mtt_segs < 15) { pr_warning("mlx4_core: too low log_num_mtt: %d\n", mod_param_profile.num_mtt_segs); return -1; } if (mod_param_profile.num_mtt_segs > MLX4_MAX_LOG_NUM_MTT) { pr_warning("mlx4_core: too high log_num_mtt: %d\n", mod_param_profile.num_mtt_segs); return -1; } return 0; } static int __init mlx4_init(void) { int ret; if (mlx4_verify_params()) return -EINVAL; mlx4_catas_init(); mlx4_wq = create_singlethread_workqueue("mlx4"); if (!mlx4_wq) return -ENOMEM; if (enable_sys_tune) sys_tune_init(); ret = pci_register_driver(&mlx4_driver); if (ret < 0) goto err; return 0; err: if (enable_sys_tune) sys_tune_fini(); destroy_workqueue(mlx4_wq); return ret; } static void __exit mlx4_cleanup(void) { if (enable_sys_tune) sys_tune_fini(); pci_unregister_driver(&mlx4_driver); destroy_workqueue(mlx4_wq); } module_init_order(mlx4_init, SI_ORDER_MIDDLE); module_exit(mlx4_cleanup); #include static int mlx4_evhand(module_t mod, int event, void *arg) { return (0); } static moduledata_t mlx4_mod = { .name = "mlx4", .evhand = mlx4_evhand, }; MODULE_VERSION(mlx4, 1); DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY); Index: stable/10/sys/ofed/drivers/net/mlx4/profile.c =================================================================== --- stable/10/sys/ofed/drivers/net/mlx4/profile.c (revision 318535) +++ stable/10/sys/ofed/drivers/net/mlx4/profile.c (revision 318536) @@ -1,249 +1,256 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2014 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include "mlx4.h" #include "fw.h" enum { MLX4_RES_QP, MLX4_RES_RDMARC, MLX4_RES_ALTC, MLX4_RES_AUXC, MLX4_RES_SRQ, MLX4_RES_CQ, MLX4_RES_EQ, MLX4_RES_DMPT, MLX4_RES_CMPT, MLX4_RES_MTT, MLX4_RES_MCG, MLX4_RES_NUM }; static const char *res_name[] = { [MLX4_RES_QP] = "QP", [MLX4_RES_RDMARC] = "RDMARC", [MLX4_RES_ALTC] = "ALTC", [MLX4_RES_AUXC] = "AUXC", [MLX4_RES_SRQ] = "SRQ", [MLX4_RES_CQ] = "CQ", [MLX4_RES_EQ] = "EQ", [MLX4_RES_DMPT] = "DMPT", [MLX4_RES_CMPT] = "CMPT", [MLX4_RES_MTT] = "MTT", [MLX4_RES_MCG] = "MCG", }; u64 mlx4_make_profile(struct mlx4_dev *dev, struct mlx4_profile *request, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *init_hca) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource { u64 size; u64 start; int type; u64 num; int log_num; }; u64 total_size = 0; struct mlx4_resource *profile; struct mlx4_resource tmp; int i, j; profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); if (!profile) return -ENOMEM; profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz; profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz; profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz; profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz; profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); profile[MLX4_RES_QP].num = request->num_qp; profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; profile[MLX4_RES_ALTC].num = request->num_qp; profile[MLX4_RES_AUXC].num = request->num_qp; profile[MLX4_RES_SRQ].num = request->num_srq; profile[MLX4_RES_CQ].num = request->num_cq; profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs : min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; profile[MLX4_RES_MTT].num = ((u64)request->num_mtt_segs) * (1 << log_mtts_per_seg); profile[MLX4_RES_MCG].num = request->num_mcg; for (i = 0; i < MLX4_RES_NUM; ++i) { profile[i].type = i; profile[i].num = roundup_pow_of_two(profile[i].num); profile[i].log_num = ilog2(profile[i].num); profile[i].size *= profile[i].num; profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); } /* * Sort the resources in decreasing order of size. Since they * all have sizes that are powers of 2, we'll be able to keep * resources aligned to their size and pack them without gaps * using the sorted order. */ for (i = MLX4_RES_NUM; i > 0; --i) for (j = 1; j < i; ++j) { if (profile[j].size > profile[j - 1].size) { tmp = profile[j]; profile[j] = profile[j - 1]; profile[j - 1] = tmp; } } for (i = 0; i < MLX4_RES_NUM; ++i) { if (profile[i].size) { profile[i].start = total_size; total_size += profile[i].size; } if (total_size > dev_cap->max_icm_sz) { mlx4_err(dev, "Profile requires 0x%llx bytes; " "won't fit in 0x%llx bytes of context memory.\n", (unsigned long long) total_size, (unsigned long long) dev_cap->max_icm_sz); kfree(profile); return -ENOMEM; } if (profile[i].size) mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " "size 0x%10llx\n", i, res_name[profile[i].type], profile[i].log_num, (unsigned long long) profile[i].start, (unsigned long long) profile[i].size); } mlx4_dbg(dev, "HCA context memory: reserving %d KB\n", (int) (total_size >> 10)); for (i = 0; i < MLX4_RES_NUM; ++i) { switch (profile[i].type) { case MLX4_RES_QP: dev->caps.num_qps = profile[i].num; init_hca->qpc_base = profile[i].start; init_hca->log_num_qps = profile[i].log_num; break; case MLX4_RES_RDMARC: for (priv->qp_table.rdmarc_shift = 0; request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; ++priv->qp_table.rdmarc_shift) ; /* nothing */ dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; priv->qp_table.rdmarc_base = (u32) profile[i].start; init_hca->rdmarc_base = profile[i].start; init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; break; case MLX4_RES_ALTC: init_hca->altc_base = profile[i].start; break; case MLX4_RES_AUXC: init_hca->auxc_base = profile[i].start; break; case MLX4_RES_SRQ: dev->caps.num_srqs = profile[i].num; init_hca->srqc_base = profile[i].start; init_hca->log_num_srqs = profile[i].log_num; break; case MLX4_RES_CQ: dev->caps.num_cqs = profile[i].num; init_hca->cqc_base = profile[i].start; init_hca->log_num_cqs = profile[i].log_num; break; case MLX4_RES_EQ: - dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, - MAX_MSIX)); - init_hca->eqc_base = profile[i].start; - init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + init_hca->log_num_eqs = 0x1f; + init_hca->eqc_base = profile[i].start; + init_hca->num_sys_eqs = dev_cap->num_sys_eqs; + } else { + dev->caps.num_eqs = roundup_pow_of_two( + min_t(unsigned, + dev_cap->max_eqs, MAX_MSIX)); + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + } break; case MLX4_RES_DMPT: dev->caps.num_mpts = profile[i].num; priv->mr_table.mpt_base = profile[i].start; init_hca->dmpt_base = profile[i].start; init_hca->log_mpt_sz = profile[i].log_num; break; case MLX4_RES_CMPT: init_hca->cmpt_base = profile[i].start; break; case MLX4_RES_MTT: dev->caps.num_mtts = profile[i].num; priv->mr_table.mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start; break; case MLX4_RES_MCG: init_hca->mc_base = profile[i].start; init_hca->log_mc_entry_sz = ilog2(mlx4_get_mgm_entry_size(dev)); init_hca->log_mc_table_sz = profile[i].log_num; if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { dev->caps.num_mgms = profile[i].num; } else { init_hca->log_mc_hash_sz = profile[i].log_num - 1; dev->caps.num_mgms = profile[i].num >> 1; dev->caps.num_amgms = profile[i].num >> 1; } break; default: break; } } /* * PDs don't take any HCA memory, but we assign them as part * of the HCA profile anyway. */ dev->caps.num_pds = MLX4_NUM_PDS; kfree(profile); return total_size; } Index: stable/10/sys/ofed/include/linux/mlx4/device.h =================================================================== --- stable/10/sys/ofed/include/linux/mlx4/device.h (revision 318535) +++ stable/10/sys/ofed/include/linux/mlx4/device.h (revision 318536) @@ -1,1345 +1,1347 @@ /* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX4_DEVICE_H #define MLX4_DEVICE_H #include #include #include #include #include #include #include #include #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 #define MSIX_LEGACY_SZ 4 #define MIN_MSIX_P_PORT 5 #define MLX4_ROCE_MAX_GIDS 128 #define MLX4_ROCE_PF_GIDS 16 #define MLX4_NUM_UP 8 #define MLX4_NUM_TC 8 #define MLX4_MAX_100M_UNITS_VAL 255 /* * work around: can't set values * greater then this value when * using 100 Mbps units. */ #define MLX4_RATELIMIT_100M_UNITS 3 /* 100 Mbps */ #define MLX4_RATELIMIT_1G_UNITS 4 /* 1 Gbps */ #define MLX4_RATELIMIT_DEFAULT 0x00ff #define CORE_CLOCK_MASK 0xffffffffffffULL enum { MLX4_FLAG_MSI_X = 1 << 0, MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, MLX4_FLAG_MASTER = 1 << 2, MLX4_FLAG_SLAVE = 1 << 3, MLX4_FLAG_SRIOV = 1 << 4, MLX4_FLAG_DEV_NUM_STR = 1 << 5, MLX4_FLAG_OLD_REG_MAC = 1 << 6, }; enum { MLX4_PORT_CAP_IS_SM = 1 << 1, MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19, }; enum { MLX4_MAX_PORTS = 2, MLX4_MAX_PORT_PKEYS = 128 }; /* base qkey for use in sriov tunnel-qp/proxy-qp communication. * These qkeys must not be allowed for general use. This is a 64k range, * and to test for violation, we use the mask (protect against future chg). */ #define MLX4_RESERVED_QKEY_BASE (0xFFFF0000) #define MLX4_RESERVED_QKEY_MASK (0xFFFF0000) enum { MLX4_BOARD_ID_LEN = 64, MLX4_VSD_LEN = 208 }; enum { MLX4_MAX_NUM_PF = 16, MLX4_MAX_NUM_VF = 64, MLX4_MFUNC_MAX = 80, MLX4_MAX_EQ_NUM = 1024, MLX4_MFUNC_EQ_NUM = 4, MLX4_MFUNC_MAX_EQES = 8, MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) }; /* Driver supports 3 diffrent device methods to manage traffic steering: * -device managed - High level API for ib and eth flow steering. FW is * managing flow steering tables. * - B0 steering mode - Common low level API for ib and (if supported) eth. * - A0 steering mode - Limited low level API for eth. In case of IB, * B0 mode is in use. */ enum { MLX4_STEERING_MODE_A0, MLX4_STEERING_MODE_B0, MLX4_STEERING_MODE_DEVICE_MANAGED }; static inline const char *mlx4_steering_mode_str(int steering_mode) { switch (steering_mode) { case MLX4_STEERING_MODE_A0: return "A0 steering"; case MLX4_STEERING_MODE_B0: return "B0 steering"; case MLX4_STEERING_MODE_DEVICE_MANAGED: return "Device managed flow steering"; default: return "Unrecognize steering mode"; } } enum { MLX4_DEV_CAP_FLAG_RC = 1LL << 0, MLX4_DEV_CAP_FLAG_UC = 1LL << 1, MLX4_DEV_CAP_FLAG_UD = 1LL << 2, MLX4_DEV_CAP_FLAG_XRC = 1LL << 3, MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12, MLX4_DEV_CAP_FLAG_BLH = 1LL << 15, MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16, MLX4_DEV_CAP_FLAG_APM = 1LL << 17, MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19, MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20, MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21, MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37, MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38, MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_CROSS_CHANNEL = 1LL << 44, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, MLX4_DEV_CAP_FLAG_COUNTERS_EXT = 1LL << 49, MLX4_DEV_CAP_FLAG_SET_PORT_ETH_SCHED = 1LL << 53, MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, MLX4_DEV_CAP_FLAG_FAST_DROP = 1LL << 57, MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61, MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62 }; enum { MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, MLX4_DEV_CAP_FLAG2_FSM = 1LL << 4, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 5, MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 6, MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1LL << 7, MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 8, MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 9, MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 10, MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 11, MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 12, MLX4_DEV_CAP_FLAG2_TS = 1LL << 13, MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1LL << 14, MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 15, MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 16, MLX4_DEV_CAP_FLAG2_FS_EN_NCSI = 1LL << 17, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_DMFS_TAG_MODE = 1LL << 19, MLX4_DEV_CAP_FLAG2_ROCEV2 = 1LL << 20, MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 21, MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 22, MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 23, MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1LL << 24, MLX4_DEV_CAP_FLAG2_RX_CSUM_MODE = 1LL << 25, + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 26, }; /* bit enums for an 8-bit flags field indicating special use * QPs which require special handling in qp_reserve_range. * Currently, this only includes QPs used by the ETH interface, * where we expect to use blueflame. These QPs must not have * bits 6 and 7 set in their qp number. * * This enum may use only bits 0..7. */ enum { MLX4_RESERVE_BF_QP = 1 << 7, }; enum { MLX4_DEV_CAP_CQ_FLAG_IO = 1 << 0 }; enum { MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0 }; /* bit enums for an 8-bit flags field indicating special use * QPs which require special handling in qp_reserve_range. * Currently, this only includes QPs used by the ETH interface, * where we expect to use blueflame. These QPs must not have * bits 6 and 7 set in their qp number. * * This enum may use only bits 0..7. */ enum { MLX4_RESERVE_ETH_BF_QP = 1 << 7, }; enum { MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 }; enum { MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 }; enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 }; #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1, MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, }; enum mlx4_event { MLX4_EVENT_TYPE_COMP = 0x00, MLX4_EVENT_TYPE_PATH_MIG = 0x01, MLX4_EVENT_TYPE_COMM_EST = 0x02, MLX4_EVENT_TYPE_SQ_DRAINED = 0x03, MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14, MLX4_EVENT_TYPE_CQ_ERROR = 0x04, MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07, MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, MLX4_EVENT_TYPE_CMD = 0x0a, MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a, MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e, MLX4_EVENT_TYPE_NONE = 0xff, }; enum { MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 }; enum { MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1, MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2, }; enum { MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, }; enum slave_port_state { SLAVE_PORT_DOWN = 0, SLAVE_PENDING_UP, SLAVE_PORT_UP, }; enum slave_port_gen_event { SLAVE_PORT_GEN_EVENT_DOWN = 0, SLAVE_PORT_GEN_EVENT_UP, SLAVE_PORT_GEN_EVENT_NONE, }; enum slave_port_state_event { MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, MLX4_PORT_STATE_DEV_EVENT_PORT_UP, MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, }; enum { MLX4_PERM_LOCAL_READ = 1 << 10, MLX4_PERM_LOCAL_WRITE = 1 << 11, MLX4_PERM_REMOTE_READ = 1 << 12, MLX4_PERM_REMOTE_WRITE = 1 << 13, MLX4_PERM_ATOMIC = 1 << 14, MLX4_PERM_BIND_MW = 1 << 15, }; enum { MLX4_OPCODE_NOP = 0x00, MLX4_OPCODE_SEND_INVAL = 0x01, MLX4_OPCODE_RDMA_WRITE = 0x08, MLX4_OPCODE_RDMA_WRITE_IMM = 0x09, MLX4_OPCODE_SEND = 0x0a, MLX4_OPCODE_SEND_IMM = 0x0b, MLX4_OPCODE_LSO = 0x0e, MLX4_OPCODE_RDMA_READ = 0x10, MLX4_OPCODE_ATOMIC_CS = 0x11, MLX4_OPCODE_ATOMIC_FA = 0x12, MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14, MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15, MLX4_OPCODE_BIND_MW = 0x18, MLX4_OPCODE_FMR = 0x19, MLX4_OPCODE_LOCAL_INVAL = 0x1b, MLX4_OPCODE_CONFIG_CMD = 0x1f, MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX4_RECV_OPCODE_SEND = 0x01, MLX4_RECV_OPCODE_SEND_IMM = 0x02, MLX4_RECV_OPCODE_SEND_INVAL = 0x03, MLX4_CQE_OPCODE_ERROR = 0x1e, MLX4_CQE_OPCODE_RESIZE = 0x16, }; enum { MLX4_STAT_RATE_OFFSET = 5 }; enum mlx4_protocol { MLX4_PROT_IB_IPV6 = 0, MLX4_PROT_ETH, MLX4_PROT_IB_IPV4, MLX4_PROT_FCOE }; enum { MLX4_MTT_FLAG_PRESENT = 1 }; enum { MLX4_MAX_MTT_SHIFT = 31 }; enum mlx4_qp_region { MLX4_QP_REGION_FW = 0, MLX4_QP_REGION_ETH_ADDR, MLX4_QP_REGION_FC_ADDR, MLX4_QP_REGION_FC_EXCH, MLX4_NUM_QP_REGION }; enum mlx4_port_type { MLX4_PORT_TYPE_NONE = 0, MLX4_PORT_TYPE_IB = 1, MLX4_PORT_TYPE_ETH = 2, MLX4_PORT_TYPE_AUTO = 3, MLX4_PORT_TYPE_NA = 4 }; enum mlx4_special_vlan_idx { MLX4_NO_VLAN_IDX = 0, MLX4_VLAN_MISS_IDX, MLX4_VLAN_REGULAR }; enum mlx4_steer_type { MLX4_MC_STEER = 0, MLX4_UC_STEER, MLX4_NUM_STEERS }; enum { MLX4_NUM_FEXCH = 64 * 1024, }; enum { MLX4_MAX_FAST_REG_PAGES = 511, }; enum { MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, }; /* Port mgmt change event handling */ enum { MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0, MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1, MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2, MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3, MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) enum mlx4_module_id { MLX4_MODULE_ID_SFP = 0x3, MLX4_MODULE_ID_QSFP = 0xC, MLX4_MODULE_ID_QSFP_PLUS = 0xD, MLX4_MODULE_ID_QSFP28 = 0x11, }; static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; } struct mlx4_phys_caps { u32 gid_phys_table_len[MLX4_MAX_PORTS + 1]; u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1]; u32 num_phys_eqs; u32 base_sqpn; u32 base_proxy_sqpn; u32 base_tunnel_sqpn; }; struct mlx4_caps { u64 fw_ver; u32 function; int num_ports; int vl_cap[MLX4_MAX_PORTS + 1]; int ib_mtu_cap[MLX4_MAX_PORTS + 1]; __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1]; u64 def_mac[MLX4_MAX_PORTS + 1]; int eth_mtu_cap[MLX4_MAX_PORTS + 1]; int gid_table_len[MLX4_MAX_PORTS + 1]; int pkey_table_len[MLX4_MAX_PORTS + 1]; int trans_type[MLX4_MAX_PORTS + 1]; int vendor_oui[MLX4_MAX_PORTS + 1]; int wavelength[MLX4_MAX_PORTS + 1]; u64 trans_code[MLX4_MAX_PORTS + 1]; int local_ca_ack_delay; int num_uars; u32 uar_page_size; int bf_reg_size; int bf_regs_per_page; int max_sq_sg; int max_rq_sg; int num_qps; int max_wqes; int max_sq_desc_sz; int max_rq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; u32 *qp0_proxy; u32 *qp1_proxy; u32 *qp0_tunnel; u32 *qp1_tunnel; int num_srqs; int max_srq_wqes; int max_srq_sge; int reserved_srqs; int num_cqs; int max_cqes; int reserved_cqs; + int num_sys_eqs; int num_eqs; int reserved_eqs; int num_comp_vectors; int comp_pool; int num_mpts; int max_fmr_maps; u64 num_mtts; int fmr_reserved_mtts; int reserved_mtts; int reserved_mrws; int reserved_uars; int num_mgms; int num_amgms; int reserved_mcgs; int num_qp_per_mgm; int steering_mode; int num_pds; int reserved_pds; int max_xrcds; int reserved_xrcds; int mtt_entry_sz; u32 max_msg_sz; u32 page_size_cap; u64 flags; u64 flags2; u32 bmme_flags; u32 reserved_lkey; u16 stat_rate_support; u8 cq_timestamp; u8 port_width_cap[MLX4_MAX_PORTS + 1]; int max_gso_sz; int max_rss_tbl_sz; int reserved_qps_cnt[MLX4_NUM_QP_REGION]; int reserved_qps; int reserved_qps_base[MLX4_NUM_QP_REGION]; int log_num_macs; int log_num_vlans; enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; u8 supported_type[MLX4_MAX_PORTS + 1]; u8 suggested_type[MLX4_MAX_PORTS + 1]; u8 default_sense[MLX4_MAX_PORTS + 1]; u32 port_mask[MLX4_MAX_PORTS + 1]; enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; u32 max_counters; u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; u16 sqp_demux; u32 sync_qp; u32 cq_flags; u32 eqe_size; u32 cqe_size; u8 eqe_factor; u32 userspace_caps; /* userspace must be aware to */ u32 function_caps; /* functions must be aware to */ u8 fast_drop; u16 hca_core_clock; u32 max_basic_counters; u32 max_extended_counters; u8 def_counter_index[MLX4_MAX_PORTS + 1]; u8 alloc_res_qp_mask; }; struct mlx4_buf_list { void *buf; dma_addr_t map; }; struct mlx4_buf { struct mlx4_buf_list direct; struct mlx4_buf_list *page_list; int nbufs; int npages; int page_shift; }; struct mlx4_mtt { u32 offset; int order; int page_shift; }; enum { MLX4_DB_PER_PAGE = PAGE_SIZE / 4 }; struct mlx4_db_pgdir { struct list_head list; DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); unsigned long *bits[2]; __be32 *db_page; dma_addr_t db_dma; }; struct mlx4_ib_user_db_page; struct mlx4_db { __be32 *db; union { struct mlx4_db_pgdir *pgdir; struct mlx4_ib_user_db_page *user_page; } u; dma_addr_t dma; int index; int order; }; struct mlx4_hwq_resources { struct mlx4_db db; struct mlx4_mtt mtt; struct mlx4_buf buf; }; struct mlx4_mr { struct mlx4_mtt mtt; u64 iova; u64 size; u32 key; u32 pd; u32 access; int enabled; }; enum mlx4_mw_type { MLX4_MW_TYPE_1 = 1, MLX4_MW_TYPE_2 = 2, }; struct mlx4_mw { u32 key; u32 pd; enum mlx4_mw_type type; int enabled; }; struct mlx4_fmr { struct mlx4_mr mr; struct mlx4_mpt_entry *mpt; __be64 *mtts; dma_addr_t dma_handle; int max_pages; int max_maps; int maps; u8 page_shift; }; struct mlx4_uar { unsigned long pfn; int index; struct list_head bf_list; unsigned free_bf_bmap; void __iomem *map; void __iomem *bf_map; }; struct mlx4_bf { unsigned long offset; int buf_size; struct mlx4_uar *uar; void __iomem *reg; }; struct mlx4_cq { void (*comp) (struct mlx4_cq *); void (*event) (struct mlx4_cq *, enum mlx4_event); struct mlx4_uar *uar; u32 cons_index; __be32 *set_ci_db; __be32 *arm_db; int arm_sn; int cqn; unsigned vector; atomic_t refcount; struct completion free; int eqn; u16 irq; }; struct mlx4_qp { void (*event) (struct mlx4_qp *, enum mlx4_event); int qpn; atomic_t refcount; struct completion free; }; struct mlx4_srq { void (*event) (struct mlx4_srq *, enum mlx4_event); int srqn; int max; int max_gs; int wqe_shift; atomic_t refcount; struct completion free; }; struct mlx4_av { __be32 port_pd; u8 reserved1; u8 g_slid; __be16 dlid; u8 reserved2; u8 gid_index; u8 stat_rate; u8 hop_limit; __be32 sl_tclass_flowlabel; u8 dgid[16]; }; struct mlx4_eth_av { __be32 port_pd; u8 reserved1; u8 smac_idx; u16 reserved2; u8 reserved3; u8 gid_index; u8 stat_rate; u8 hop_limit; __be32 sl_tclass_flowlabel; u8 dgid[16]; u8 s_mac[6]; u8 reserved4[2]; __be16 vlan; u8 mac[6]; }; union mlx4_ext_av { struct mlx4_av ib; struct mlx4_eth_av eth; }; struct mlx4_if_stat_control { u8 reserved1[3]; /* Extended counters enabled */ u8 cnt_mode; /* Number of interfaces */ __be32 num_of_if; __be32 reserved[2]; }; struct mlx4_if_stat_basic { struct mlx4_if_stat_control control; struct { __be64 IfRxFrames; __be64 IfRxOctets; __be64 IfTxFrames; __be64 IfTxOctets; } counters[]; }; #define MLX4_IF_STAT_BSC_SZ(ports)(sizeof(struct mlx4_if_stat_extended) +\ sizeof(((struct mlx4_if_stat_extended *)0)->\ counters[0]) * ports) struct mlx4_if_stat_extended { struct mlx4_if_stat_control control; struct { __be64 IfRxUnicastFrames; __be64 IfRxUnicastOctets; __be64 IfRxMulticastFrames; __be64 IfRxMulticastOctets; __be64 IfRxBroadcastFrames; __be64 IfRxBroadcastOctets; __be64 IfRxNoBufferFrames; __be64 IfRxNoBufferOctets; __be64 IfRxErrorFrames; __be64 IfRxErrorOctets; __be32 reserved[39]; __be64 IfTxUnicastFrames; __be64 IfTxUnicastOctets; __be64 IfTxMulticastFrames; __be64 IfTxMulticastOctets; __be64 IfTxBroadcastFrames; __be64 IfTxBroadcastOctets; __be64 IfTxDroppedFrames; __be64 IfTxDroppedOctets; __be64 IfTxRequestedFramesSent; __be64 IfTxGeneratedFramesSent; __be64 IfTxTsoOctets; } __packed counters[]; }; #define MLX4_IF_STAT_EXT_SZ(ports) (sizeof(struct mlx4_if_stat_extended) +\ sizeof(((struct mlx4_if_stat_extended *)\ 0)->counters[0]) * ports) union mlx4_counter { struct mlx4_if_stat_control control; struct mlx4_if_stat_basic basic; struct mlx4_if_stat_extended ext; }; #define MLX4_IF_STAT_SZ(ports) MLX4_IF_STAT_EXT_SZ(ports) struct mlx4_quotas { int qp; int cq; int srq; int mpt; int mtt; int counter; int xrcd; }; struct mlx4_dev { struct pci_dev *pdev; unsigned long flags; unsigned long num_slaves; struct mlx4_caps caps; struct mlx4_phys_caps phys_caps; struct mlx4_quotas quotas; struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; u16 vsd_vendor_id; char vsd[MLX4_VSD_LEN]; int num_vfs; int numa_node; int oper_log_mgm_entry_size; u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; }; struct mlx4_clock_params { u64 offset; u8 bar; u8 size; }; struct mlx4_eqe { u8 reserved1; u8 type; u8 reserved2; u8 subtype; union { u32 raw[6]; struct { __be32 cqn; } __packed comp; struct { u16 reserved1; __be16 token; u32 reserved2; u8 reserved3[3]; u8 status; __be64 out_param; } __packed cmd; struct { __be32 qpn; } __packed qp; struct { __be32 srqn; } __packed srq; struct { __be32 cqn; u32 reserved1; u8 reserved2[3]; u8 syndrome; } __packed cq_err; struct { u32 reserved1[2]; __be32 port; } __packed port_change; struct { #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 u32 reserved; u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; } __packed comm_channel_arm; struct { u8 port; u8 reserved[3]; __be64 mac; } __packed mac_update; struct { __be32 slave_id; } __packed flr_event; struct { __be16 current_temperature; __be16 warning_threshold; } __packed warming; struct { u8 reserved[3]; u8 port; union { struct { __be16 mstr_sm_lid; __be16 port_lid; __be32 changed_attr; u8 reserved[3]; u8 mstr_sm_sl; __be64 gid_prefix; } __packed port_info; struct { __be32 block_ptr; __be32 tbl_entries_mask; } __packed tbl_change_info; } params; } __packed port_mgmt_change; struct { u8 reserved[3]; u8 port; u32 reserved1[5]; } __packed bad_cable; } event; u8 slave_id; u8 reserved3[2]; u8 owner; } __packed; struct mlx4_init_port_param { int set_guid0; int set_node_guid; int set_si_guid; u16 mtu; int port_width_cap; u16 vl_cap; u16 max_gid; u16 max_pkey; u64 guid0; u64 node_guid; u64 si_guid; }; #define MAD_IFC_DATA_SZ 192 /* MAD IFC Mailbox */ struct mlx4_mad_ifc { u8 base_version; u8 mgmt_class; u8 class_version; u8 method; __be16 status; __be16 class_specific; __be64 tid; __be16 attr_id; __be16 resv; __be32 attr_mod; __be64 mkey; __be16 dr_slid; __be16 dr_dlid; u8 reserved[28]; u8 data[MAD_IFC_DATA_SZ]; } __packed; #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if ((type) == (dev)->caps.port_mask[(port)]) #define mlx4_foreach_non_ib_transport_port(port, dev) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB)) #define mlx4_foreach_ib_transport_port(port, dev) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) #define MLX4_INVALID_SLAVE_ID 0xFF #define MLX4_SINK_COUNTER_INDEX 0xff void handle_port_mgmt_change_event(struct work_struct *work); static inline int mlx4_master_func_num(struct mlx4_dev *dev) { return dev->caps.function; } static inline int mlx4_is_master(struct mlx4_dev *dev) { return dev->flags & MLX4_FLAG_MASTER; } static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) { return dev->phys_caps.base_sqpn + 8 + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev); } static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) { return (qpn < dev->phys_caps.base_sqpn + 8 + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); } static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) { int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8; if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8) return 1; return 0; } static inline int mlx4_is_mfunc(struct mlx4_dev *dev) { return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); } static inline int mlx4_is_slave(struct mlx4_dev *dev) { return dev->flags & MLX4_FLAG_SLAVE; } int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { if (BITS_PER_LONG == 64 || buf->nbufs == 1) return (u8 *)buf->direct.buf + offset; else return (u8 *)buf->page_list[offset >> PAGE_SHIFT].buf + (offset & (PAGE_SIZE - 1)); } int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node); void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, struct mlx4_mtt *mtt); void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr); int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, struct mlx4_mw *mw); void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw); int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list); int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf); int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, int size, int max_direct); void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int size); int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base, u8 flags); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq); void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot); int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot); int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 port, int block_mcast_loopback, enum mlx4_protocol protocol, u64 *reg_id); int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol protocol, u64 reg_id); enum { MLX4_DOMAIN_UVERBS = 0x1000, MLX4_DOMAIN_ETHTOOL = 0x2000, MLX4_DOMAIN_RFS = 0x3000, MLX4_DOMAIN_NIC = 0x5000, }; enum mlx4_net_trans_rule_id { MLX4_NET_TRANS_RULE_ID_ETH = 0, MLX4_NET_TRANS_RULE_ID_IB, MLX4_NET_TRANS_RULE_ID_IPV6, MLX4_NET_TRANS_RULE_ID_IPV4, MLX4_NET_TRANS_RULE_ID_TCP, MLX4_NET_TRANS_RULE_ID_UDP, MLX4_NET_TRANS_RULE_NUM, /* should be last */ MLX4_NET_TRANS_RULE_DUMMY = -1, /* force enum to be signed */ }; extern const u16 __sw_id_hw[]; static inline int map_hw_to_sw_id(u16 header_id) { int i; for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) { if (header_id == __sw_id_hw[i]) return i; } return -EINVAL; } enum mlx4_net_trans_promisc_mode { MLX4_FS_REGULAR = 1, MLX4_FS_ALL_DEFAULT, MLX4_FS_MC_DEFAULT, MLX4_FS_UC_SNIFFER, MLX4_FS_MC_SNIFFER, MLX4_FS_MODE_NUM, /* should be last */ MLX4_FS_MODE_DUMMY = -1, /* force enum to be signed */ }; struct mlx4_spec_eth { u8 dst_mac[6]; u8 dst_mac_msk[6]; u8 src_mac[6]; u8 src_mac_msk[6]; u8 ether_type_enable; __be16 ether_type; __be16 vlan_id_msk; __be16 vlan_id; }; struct mlx4_spec_tcp_udp { __be16 dst_port; __be16 dst_port_msk; __be16 src_port; __be16 src_port_msk; }; struct mlx4_spec_ipv4 { __be32 dst_ip; __be32 dst_ip_msk; __be32 src_ip; __be32 src_ip_msk; }; struct mlx4_spec_ib { __be32 l3_qpn; __be32 qpn_msk; u8 dst_gid[16]; u8 dst_gid_msk[16]; }; struct mlx4_spec_list { struct list_head list; enum mlx4_net_trans_rule_id id; union { struct mlx4_spec_eth eth; struct mlx4_spec_ib ib; struct mlx4_spec_ipv4 ipv4; struct mlx4_spec_tcp_udp tcp_udp; }; }; enum mlx4_net_trans_hw_rule_queue { MLX4_NET_TRANS_Q_FIFO, MLX4_NET_TRANS_Q_LIFO, }; struct mlx4_net_trans_rule { struct list_head list; enum mlx4_net_trans_hw_rule_queue queue_mode; bool exclusive; bool allow_loopback; enum mlx4_net_trans_promisc_mode promisc_mode; u8 port; u16 priority; u32 qpn; }; struct mlx4_net_trans_rule_hw_ctrl { __be16 prio; u8 type; u8 flags; u8 rsvd1; u8 funcid; u8 vep; u8 port; __be32 qpn; __be32 rsvd2; }; struct mlx4_net_trans_rule_hw_ib { u8 size; u8 rsvd1; __be16 id; u32 rsvd2; __be32 l3_qpn; __be32 qpn_mask; u8 dst_gid[16]; u8 dst_gid_msk[16]; } __packed; struct mlx4_net_trans_rule_hw_eth { u8 size; u8 rsvd; __be16 id; u8 rsvd1[6]; u8 dst_mac[6]; u16 rsvd2; u8 dst_mac_msk[6]; u16 rsvd3; u8 src_mac[6]; u16 rsvd4; u8 src_mac_msk[6]; u8 rsvd5; u8 ether_type_enable; __be16 ether_type; __be16 vlan_tag_msk; __be16 vlan_tag; } __packed; struct mlx4_net_trans_rule_hw_tcp_udp { u8 size; u8 rsvd; __be16 id; __be16 rsvd1[3]; __be16 dst_port; __be16 rsvd2; __be16 dst_port_msk; __be16 rsvd3; __be16 src_port; __be16 rsvd4; __be16 src_port_msk; } __packed; struct mlx4_net_trans_rule_hw_ipv4 { u8 size; u8 rsvd; __be16 id; __be32 rsvd1; __be32 dst_ip; __be32 dst_ip_msk; __be32 src_ip; __be32 src_ip_msk; } __packed; struct _rule_hw { union { struct { u8 size; u8 rsvd; __be16 id; }; struct mlx4_net_trans_rule_hw_eth eth; struct mlx4_net_trans_rule_hw_ib ib; struct mlx4_net_trans_rule_hw_ipv4 ipv4; struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; }; }; int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode); int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, enum mlx4_net_trans_promisc_mode mode); int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); void mlx4_set_stats_bitmap(struct mlx4_dev *dev, unsigned long *stats_bitmap); int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, u8 *pg, u16 *ratelimit); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey); int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr); int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_query_diag_counters(struct mlx4_dev *mlx4_dev, int array_length, u8 op_modifier, u32 in_offset[], u32 counter_out[]); int mlx4_test_interrupts(struct mlx4_dev *dev); int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector); void mlx4_release_eq(struct mlx4_dev *dev, int vec); int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); int mlx4_counter_alloc(struct mlx4_dev *dev, u8 port, u32 *idx); void mlx4_counter_free(struct mlx4_dev *dev, u8 port, u32 idx); int mlx4_flow_attach(struct mlx4_dev *dev, struct mlx4_net_trans_rule *rule, u64 *reg_id); int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); int map_sw_to_hw_steering_mode(struct mlx4_dev *dev, enum mlx4_net_trans_promisc_mode flow_type); int map_sw_to_hw_steering_id(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); int hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val); int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey); int mlx4_is_slave_active(struct mlx4_dev *dev, int slave); int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port); int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port); int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr, u16 lid, u8 sl); int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change); enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port); int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event); void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, int *slave_id); int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, u8 *gid); int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn); int mlx4_read_clock(struct mlx4_dev *dev); int mlx4_get_internal_clock_params(struct mlx4_dev *dev, struct mlx4_clock_params *params); int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset, u16 size, u8 *data); #endif /* MLX4_DEVICE_H */ Index: stable/10 =================================================================== --- stable/10 (revision 318535) +++ stable/10 (revision 318536) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r313555