Index: head/sys/dev/ocs_fc/ocs_cam.c =================================================================== --- head/sys/dev/ocs_fc/ocs_cam.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_cam.c (revision 343349) @@ -1,2839 +1,2847 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @defgroup scsi_api_target SCSI Target API * @defgroup scsi_api_initiator SCSI Initiator API * @defgroup cam_api Common Access Method (CAM) API * @defgroup cam_io CAM IO */ /** * @file * Provides CAM functionality. */ #include "ocs.h" #include "ocs_scsi.h" #include "ocs_device.h" /* Default IO timeout value for initiators is 30 seconds */ #define OCS_CAM_IO_TIMEOUT 30 typedef struct { ocs_scsi_sgl_t *sgl; uint32_t sgl_max; uint32_t sgl_count; int32_t rc; } ocs_dmamap_load_arg_t; static void ocs_action(struct cam_sim *, union ccb *); static void ocs_poll(struct cam_sim *); static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *, struct ccb_hdr *, uint32_t *); static int32_t ocs_tgt_resource_abort(struct ocs_softc *, ocs_tgt_resource_t *); static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb); static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb); static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb); static int32_t ocs_target_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *); static int32_t ocs_io_abort_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *); static int32_t ocs_task_set_full_or_busy(ocs_io_t *io); static int32_t ocs_initiator_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, ocs_scsi_cmd_resp_t *, uint32_t, void *); static uint32_t ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role); static void ocs_ldt(void *arg); static void ocs_ldt_task(void *arg, int pending); static void ocs_delete_target(ocs_t *ocs, ocs_fcport *fcp, int tgt); uint32_t ocs_add_new_tgt(ocs_node_t *node, ocs_fcport *fcp); uint32_t ocs_update_tgt(ocs_node_t *node, ocs_fcport *fcp, uint32_t tgt_id); int32_t ocs_tgt_find(ocs_fcport *fcp, ocs_node_t *node); static inline ocs_io_t *ocs_scsi_find_io(struct ocs_softc *ocs, uint32_t tag) { return ocs_io_get_instance(ocs, tag); } static inline void ocs_target_io_free(ocs_io_t *io) { io->tgt_io.state = OCS_CAM_IO_FREE; io->tgt_io.flags = 0; io->tgt_io.app = NULL; ocs_scsi_io_complete(io); if(io->ocs->io_in_use != 0) atomic_subtract_acq_32(&io->ocs->io_in_use, 1); } static int32_t ocs_attach_port(ocs_t *ocs, int chan) { struct cam_sim *sim = NULL; struct cam_path *path = NULL; uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS); ocs_fcport *fcp = FCPORT(ocs, chan); if (NULL == (sim = cam_sim_alloc(ocs_action, ocs_poll, device_get_name(ocs->dev), ocs, device_get_unit(ocs->dev), &ocs->sim_lock, max_io, max_io, ocs->devq))) { device_printf(ocs->dev, "Can't allocate SIM\n"); return 1; } mtx_lock(&ocs->sim_lock); if (CAM_SUCCESS != xpt_bus_register(sim, ocs->dev, chan)) { device_printf(ocs->dev, "Can't register bus %d\n", 0); mtx_unlock(&ocs->sim_lock); cam_sim_free(sim, FALSE); return 1; } mtx_unlock(&ocs->sim_lock); if (CAM_REQ_CMP != xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) { device_printf(ocs->dev, "Can't create path\n"); xpt_bus_deregister(cam_sim_path(sim)); mtx_unlock(&ocs->sim_lock); cam_sim_free(sim, FALSE); return 1; } fcp->ocs = ocs; fcp->sim = sim; fcp->path = path; callout_init_mtx(&fcp->ldt, &ocs->sim_lock, 0); TASK_INIT(&fcp->ltask, 1, ocs_ldt_task, fcp); return 0; } static int32_t ocs_detach_port(ocs_t *ocs, int32_t chan) { ocs_fcport *fcp = NULL; struct cam_sim *sim = NULL; struct cam_path *path = NULL; fcp = FCPORT(ocs, chan); sim = fcp->sim; path = fcp->path; callout_drain(&fcp->ldt); ocs_ldt_task(fcp, 0); if (fcp->sim) { mtx_lock(&ocs->sim_lock); ocs_tgt_resource_abort(ocs, &fcp->targ_rsrc_wildcard); if (path) { xpt_async(AC_LOST_DEVICE, path, NULL); xpt_free_path(path); fcp->path = NULL; } xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, FALSE); fcp->sim = NULL; mtx_unlock(&ocs->sim_lock); } return 0; } int32_t ocs_cam_attach(ocs_t *ocs) { struct cam_devq *devq = NULL; int i = 0; uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS); if (NULL == (devq = cam_simq_alloc(max_io))) { device_printf(ocs->dev, "Can't allocate SIMQ\n"); return -1; } ocs->devq = devq; if (mtx_initialized(&ocs->sim_lock) == 0) { mtx_init(&ocs->sim_lock, "ocs_sim_lock", NULL, MTX_DEF); } for (i = 0; i < (ocs->num_vports + 1); i++) { if (ocs_attach_port(ocs, i)) { ocs_log_err(ocs, "Attach port failed for chan: %d\n", i); goto detach_port; } } ocs->io_high_watermark = max_io; ocs->io_in_use = 0; return 0; detach_port: while (--i >= 0) { ocs_detach_port(ocs, i); } cam_simq_free(ocs->devq); if (mtx_initialized(&ocs->sim_lock)) mtx_destroy(&ocs->sim_lock); return 1; } int32_t ocs_cam_detach(ocs_t *ocs) { int i = 0; for (i = (ocs->num_vports); i >= 0; i--) { ocs_detach_port(ocs, i); } cam_simq_free(ocs->devq); if (mtx_initialized(&ocs->sim_lock)) mtx_destroy(&ocs->sim_lock); return 0; } /*************************************************************************** * Functions required by SCSI base driver API */ /** * @ingroup scsi_api_target * @brief Attach driver to the BSD SCSI layer (a.k.a CAM) * * Allocates + initializes CAM related resources and attaches to the CAM * * @param ocs the driver instance's software context * * @return 0 on success, non-zero otherwise */ int32_t ocs_scsi_tgt_new_device(ocs_t *ocs) { ocs->enable_task_set_full = ocs_scsi_get_property(ocs, OCS_SCSI_ENABLE_TASK_SET_FULL); ocs_log_debug(ocs, "task set full processing is %s\n", ocs->enable_task_set_full ? "enabled" : "disabled"); return 0; } /** * @ingroup scsi_api_target * @brief Tears down target members of ocs structure. * * Called by OS code when device is removed. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_del_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_target * @brief accept new domain notification * * Called by base drive when new domain is discovered. A target-server * will use this call to prepare for new remote node notifications * arising from ocs_scsi_new_initiator(). * * The domain context has an element ocs_scsi_tgt_domain_t tgt_domain * which is declared by the target-server code and is used for target-server * private data. * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_new_domain() function is called to initiator-client backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_new_domain(ocs_domain_t *domain) { return 0; } /** * @ingroup scsi_api_target * @brief accept domain lost notification * * Called by base-driver when a domain goes away. A target-server will * use this call to clean up all domain scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_domain() function is called to initiator-client backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_tgt_del_domain(ocs_domain_t *domain) { } /** * @ingroup scsi_api_target * @brief accept new sli port (sport) notification * * Called by base drive when new sport is discovered. A target-server * will use this call to prepare for new remote node notifications * arising from ocs_scsi_new_initiator(). * * The domain context has an element ocs_scsi_tgt_sport_t tgt_sport * which is declared by the target-server code and is used for * target-server private data. * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_tgt_new_domain() is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_new_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; if(!sport->is_vport) { sport->tgt_data = FCPORT(ocs, 0); } return 0; } /** * @ingroup scsi_api_target * @brief accept SLI port gone notification * * Called by base-driver when a sport goes away. A target-server will * use this call to clean up all sport scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_sport() is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_tgt_del_sport(ocs_sport_t *sport) { return; } /** * @ingroup scsi_api_target * @brief receive notification of a new SCSI initiator node * * Sent by base driver to notify a target-server of the presense of a new * remote initiator. The target-server may use this call to prepare for * inbound IO from this node. * * The ocs_node_t structure has and elment of type ocs_scsi_tgt_node_t named * tgt_node that is declared and used by a target-server for private * information. * * This function is only called if the target capability is enabled in driver. * * @param node pointer to new remote initiator node * * @return returns 0 for success, a negative error code value for failure. * * @note */ int32_t ocs_scsi_new_initiator(ocs_node_t *node) { ocs_t *ocs = node->ocs; struct ac_contract ac; struct ac_device_changed *adc; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } /* * Update the IO watermark by decrementing it by the * number of IOs reserved for each initiator. */ atomic_subtract_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO); ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = ocs_node_get_wwpn(node); adc->port = node->rnode.fc_id; adc->target = node->instance_index; adc->arrived = 1; xpt_async(AC_CONTRACT, fcp->path, &ac); return 0; } /** * @ingroup scsi_api_target * @brief validate new initiator * * Sent by base driver to validate a remote initiatiator. The target-server * returns TRUE if this initiator should be accepted. * * This function is only called if the target capability is enabled in driver. * * @param node pointer to remote initiator node to validate * * @return TRUE if initiator should be accepted, FALSE if it should be rejected * * @note */ int32_t ocs_scsi_validate_initiator(ocs_node_t *node) { return 1; } /** * @ingroup scsi_api_target * @brief Delete a SCSI initiator node * * Sent by base driver to notify a target-server that a remote initiator * is now gone. The base driver will have terminated all outstanding IOs * and the target-server will receive appropriate completions. * * This function is only called if the base driver is enabled for * target capability. * * @param node pointer node being deleted * @param reason Reason why initiator is gone. * * @return OCS_SCSI_CALL_COMPLETE to indicate that all work was completed * * @note */ int32_t ocs_scsi_del_initiator(ocs_node_t *node, ocs_scsi_del_initiator_reason_e reason) { ocs_t *ocs = node->ocs; struct ac_contract ac; struct ac_device_changed *adc; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = ocs_node_get_wwpn(node); adc->port = node->rnode.fc_id; adc->target = node->instance_index; adc->arrived = 0; xpt_async(AC_CONTRACT, fcp->path, &ac); if (reason == OCS_SCSI_INITIATOR_MISSING) { return OCS_SCSI_CALL_COMPLETE; } /* * Update the IO watermark by incrementing it by the * number of IOs reserved for each initiator. */ atomic_add_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO); return OCS_SCSI_CALL_COMPLETE; } /** * @ingroup scsi_api_target * @brief receive FCP SCSI Command * * Called by the base driver when a new SCSI command has been received. The * target-server will process the command, and issue data and/or response phase * requests to the base driver. * * The IO context (ocs_io_t) structure has and element of type * ocs_scsi_tgt_io_t named tgt_io that is declared and used by * a target-server for private information. * * @param io pointer to IO context * @param lun LUN for this IO * @param cdb pointer to SCSI CDB * @param cdb_len length of CDB in bytes * @param flags command flags * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_cmd(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags) { ocs_t *ocs = io->ocs; struct ccb_accept_tio *atio = NULL; ocs_node_t *node = io->node; ocs_tgt_resource_t *trsrc = NULL; int32_t rc = -1; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } atomic_add_acq_32(&ocs->io_in_use, 1); /* set target io timeout */ io->timeout = ocs->target_io_timer_sec; if (ocs->enable_task_set_full && (ocs->io_in_use >= ocs->io_high_watermark)) { return ocs_task_set_full_or_busy(io); } else { atomic_store_rel_32(&io->node->tgt_node.busy_sent, FALSE); } if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { trsrc = &fcp->targ_rsrc[lun]; } else if (fcp->targ_rsrc_wildcard.enabled) { trsrc = &fcp->targ_rsrc_wildcard; } if (trsrc) { atio = (struct ccb_accept_tio *)STAILQ_FIRST(&trsrc->atio); } if (atio) { STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe); atio->ccb_h.status = CAM_CDB_RECVD; atio->ccb_h.target_lun = lun; atio->sense_len = 0; atio->init_id = node->instance_index; atio->tag_id = io->tag; atio->ccb_h.ccb_io_ptr = io; if (flags & OCS_SCSI_CMD_SIMPLE) atio->tag_action = MSG_SIMPLE_Q_TAG; else if (flags & FCP_TASK_ATTR_HEAD_OF_QUEUE) atio->tag_action = MSG_HEAD_OF_Q_TAG; else if (flags & FCP_TASK_ATTR_ORDERED) atio->tag_action = MSG_ORDERED_Q_TAG; else atio->tag_action = 0; atio->cdb_len = cdb_len; ocs_memcpy(atio->cdb_io.cdb_bytes, cdb, cdb_len); io->tgt_io.flags = 0; io->tgt_io.state = OCS_CAM_IO_COMMAND; io->tgt_io.lun = lun; xpt_done((union ccb *)atio); rc = 0; } else { device_printf( ocs->dev, "%s: no ATIO for LUN %lx (en=%s) OX_ID %#x\n", __func__, (unsigned long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X", be16toh(io->init_task_tag)); io->tgt_io.state = OCS_CAM_IO_MAX; ocs_target_io_free(io); } return rc; } /** * @ingroup scsi_api_target * @brief receive FCP SCSI Command with first burst data. * * Receive a new FCP SCSI command from the base driver with first burst data. * * @param io pointer to IO context * @param lun LUN for this IO * @param cdb pointer to SCSI CDB * @param cdb_len length of CDB in bytes * @param flags command flags * @param first_burst_buffers first burst buffers * @param first_burst_buffer_count The number of bytes received in the first burst * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_cmd_first_burst(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags, ocs_dma_t first_burst_buffers[], uint32_t first_burst_buffer_count) { return -1; } /** * @ingroup scsi_api_target * @brief receive a TMF command IO * * Called by the base driver when a SCSI TMF command has been received. The * target-server will process the command, aborting commands as needed, and post * a response using ocs_scsi_send_resp() * * The IO context (ocs_io_t) structure has and element of type ocs_scsi_tgt_io_t named * tgt_io that is declared and used by a target-server for private information. * * If the target-server walks the nodes active_ios linked list, and starts IO * abort processing, the code must be sure not to abort the IO passed into the * ocs_scsi_recv_tmf() command. * * @param tmfio pointer to IO context * @param lun logical unit value * @param cmd command request * @param abortio pointer to IO object to abort for TASK_ABORT (NULL for all other TMF) * @param flags flags * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd, ocs_io_t *abortio, uint32_t flags) { ocs_t *ocs = tmfio->ocs; ocs_node_t *node = tmfio->node; ocs_tgt_resource_t *trsrc = NULL; struct ccb_immediate_notify *inot = NULL; int32_t rc = -1; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { trsrc = &fcp->targ_rsrc[lun]; } else if (fcp->targ_rsrc_wildcard.enabled) { trsrc = &fcp->targ_rsrc_wildcard; } device_printf(tmfio->ocs->dev, "%s: io=%p cmd=%#x LU=%lx en=%s\n", __func__, tmfio, cmd, (unsigned long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X"); if (trsrc) { inot = (struct ccb_immediate_notify *)STAILQ_FIRST(&trsrc->inot); } if (!inot) { device_printf( ocs->dev, "%s: no INOT for LUN %llx (en=%s) OX_ID %#x\n", __func__, (unsigned long long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X", be16toh(tmfio->init_task_tag)); if (abortio) { ocs_scsi_io_complete(abortio); } ocs_scsi_io_complete(tmfio); goto ocs_scsi_recv_tmf_out; } tmfio->tgt_io.app = abortio; STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe); inot->tag_id = tmfio->tag; inot->seq_id = tmfio->tag; if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { inot->initiator_id = node->instance_index; } else { inot->initiator_id = CAM_TARGET_WILDCARD; } inot->ccb_h.status = CAM_MESSAGE_RECV; inot->ccb_h.target_lun = lun; switch (cmd) { case OCS_SCSI_TMF_ABORT_TASK: inot->arg = MSG_ABORT_TASK; inot->seq_id = abortio->tag; device_printf(ocs->dev, "%s: ABTS IO.%#x st=%#x\n", __func__, abortio->tag, abortio->tgt_io.state); abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_RECV; abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_NOTIFY; break; case OCS_SCSI_TMF_QUERY_TASK_SET: device_printf(ocs->dev, "%s: OCS_SCSI_TMF_QUERY_TASK_SET not supported\n", __func__); STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe); ocs_scsi_io_complete(tmfio); goto ocs_scsi_recv_tmf_out; break; case OCS_SCSI_TMF_ABORT_TASK_SET: inot->arg = MSG_ABORT_TASK_SET; break; case OCS_SCSI_TMF_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; case OCS_SCSI_TMF_LOGICAL_UNIT_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case OCS_SCSI_TMF_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case OCS_SCSI_TMF_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; default: device_printf(ocs->dev, "%s: unsupported TMF %#x\n", __func__, cmd); STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe); goto ocs_scsi_recv_tmf_out; } rc = 0; xpt_print(inot->ccb_h.path, "%s: func=%#x stat=%#x id=%#x lun=%#x" " flags=%#x tag=%#x seq=%#x ini=%#x arg=%#x\n", __func__, inot->ccb_h.func_code, inot->ccb_h.status, inot->ccb_h.target_id, (unsigned int)inot->ccb_h.target_lun, inot->ccb_h.flags, inot->tag_id, inot->seq_id, inot->initiator_id, inot->arg); xpt_done((union ccb *)inot); if (abortio) { abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_DEV; rc = ocs_scsi_tgt_abort_io(abortio, ocs_io_abort_cb, tmfio); } ocs_scsi_recv_tmf_out: return rc; } /** * @ingroup scsi_api_initiator * @brief Initializes any initiator fields on the ocs structure. * * Called by OS initialization code when a new device is discovered. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_initiator * @brief Tears down initiator members of ocs structure. * * Called by OS code when device is removed. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_del_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_initiator * @brief accept new domain notification * * Called by base drive when new domain is discovered. An initiator-client * will accept this call to prepare for new remote node notifications * arising from ocs_scsi_new_target(). * * The domain context has the element ocs_scsi_ini_domain_t ini_domain * which is declared by the initiator-client code and is used for * initiator-client private data. * * This function will only be called if the base-driver has been enabled for * initiator capability. * * Note that this call is made to initiator-client backends, * the ocs_scsi_tgt_new_domain() function is called to target-server backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_domain(ocs_domain_t *domain) { return 0; } /** * @ingroup scsi_api_initiator * @brief accept domain lost notification * * Called by base-driver when a domain goes away. An initiator-client will * use this call to clean up all domain scoped resources. * * This function will only be called if the base-driver has been enabled for * initiator capability. * * Note that this call is made to initiator-client backends, * the ocs_scsi_tgt_del_domain() function is called to target-server backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_ini_del_domain(ocs_domain_t *domain) { } /** * @ingroup scsi_api_initiator * @brief accept new sli port notification * * Called by base drive when new sli port (sport) is discovered. * A target-server will use this call to prepare for new remote node * notifications arising from ocs_scsi_new_initiator(). * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_new_sport() function is called to initiator-client backends. * * @param sport pointer to sport * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = FCPORT(ocs, 0); if (!sport->is_vport) { sport->tgt_data = fcp; fcp->fc_id = sport->fc_id; } return 0; } /** * @ingroup scsi_api_initiator * @brief accept sli port gone notification * * Called by base-driver when a sport goes away. A target-server will * use this call to clean up all sport scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_sport() function is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_ini_del_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = FCPORT(ocs, 0); if (!sport->is_vport) { fcp->fc_id = 0; } } void ocs_scsi_sport_deleted(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = NULL; ocs_xport_stats_t value; if (!sport->is_vport) { return; } fcp = sport->tgt_data; ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value); if (value.value == 0) { ocs_log_debug(ocs, "PORT offline,.. skipping\n"); return; } if ((fcp->role != KNOB_ROLE_NONE)) { if(fcp->vport->sport != NULL) { ocs_log_debug(ocs,"sport is not NULL, skipping\n"); return; } ocs_sport_vport_alloc(ocs->domain, fcp->vport); return; } } int32_t ocs_tgt_find(ocs_fcport *fcp, ocs_node_t *node) { ocs_fc_target_t *tgt = NULL; uint32_t i; for (i = 0; i < OCS_MAX_TARGETS; i++) { tgt = &fcp->tgt[i]; if (tgt->state == OCS_TGT_STATE_NONE) continue; if (ocs_node_get_wwpn(node) == tgt->wwpn) { return i; } } return -1; } /** * @ingroup scsi_api_initiator * @brief receive notification of a new SCSI target node * * Sent by base driver to notify an initiator-client of the presense of a new * remote target. The initiator-server may use this call to prepare for * inbound IO from this node. * * This function is only called if the base driver is enabled for * initiator capability. * * @param node pointer to new remote initiator node * * @return none * * @note */ uint32_t ocs_update_tgt(ocs_node_t *node, ocs_fcport *fcp, uint32_t tgt_id) { ocs_fc_target_t *tgt = NULL; tgt = &fcp->tgt[tgt_id]; tgt->node_id = node->instance_index; tgt->state = OCS_TGT_STATE_VALID; tgt->port_id = node->rnode.fc_id; tgt->wwpn = ocs_node_get_wwpn(node); tgt->wwnn = ocs_node_get_wwnn(node); return 0; } uint32_t ocs_add_new_tgt(ocs_node_t *node, ocs_fcport *fcp) { uint32_t i; struct ocs_softc *ocs = node->ocs; union ccb *ccb = NULL; for (i = 0; i < OCS_MAX_TARGETS; i++) { if (fcp->tgt[i].state == OCS_TGT_STATE_NONE) break; } if (NULL == (ccb = xpt_alloc_ccb_nowait())) { device_printf(ocs->dev, "%s: ccb allocation failed\n", __func__); return -1; } if (CAM_REQ_CMP != xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(fcp->sim), i, CAM_LUN_WILDCARD)) { device_printf( ocs->dev, "%s: target path creation failed\n", __func__); xpt_free_ccb(ccb); return -1; } ocs_update_tgt(node, fcp, i); xpt_rescan(ccb); return 0; } int32_t ocs_scsi_new_target(ocs_node_t *node) { ocs_fcport *fcp = NULL; int32_t i; fcp = node->sport->tgt_data; if (fcp == NULL) { printf("%s:FCP is NULL \n", __func__); return 0; } i = ocs_tgt_find(fcp, node); if (i < 0) { ocs_add_new_tgt(node, fcp); return 0; } ocs_update_tgt(node, fcp, i); return 0; } static void ocs_delete_target(ocs_t *ocs, ocs_fcport *fcp, int tgt) { struct cam_path *cpath = NULL; if (!fcp->sim) { device_printf(ocs->dev, "%s: calling with NULL sim\n", __func__); return; } if (CAM_REQ_CMP == xpt_create_path(&cpath, NULL, cam_sim_path(fcp->sim), tgt, CAM_LUN_WILDCARD)) { xpt_async(AC_LOST_DEVICE, cpath, NULL); xpt_free_path(cpath); } } /* * Device Lost Timer Function- when we have decided that a device was lost, * we wait a specific period of time prior to telling the OS about lost device. * * This timer function gets activated when the device was lost. * This function fires once a second and then scans the port database * for devices that are marked dead but still have a virtual target assigned. * We decrement a counter for that port database entry, and when it hits zero, * we tell the OS the device was lost. Timer will be stopped when the device * comes back active or removed from the OS. */ static void ocs_ldt(void *arg) { ocs_fcport *fcp = arg; taskqueue_enqueue(taskqueue_thread, &fcp->ltask); } static void ocs_ldt_task(void *arg, int pending) { ocs_fcport *fcp = arg; ocs_t *ocs = fcp->ocs; int i, more_to_do = 0; ocs_fc_target_t *tgt = NULL; for (i = 0; i < OCS_MAX_TARGETS; i++) { tgt = &fcp->tgt[i]; if (tgt->state != OCS_TGT_STATE_LOST) { continue; } if ((tgt->gone_timer != 0) && (ocs->attached)){ tgt->gone_timer -= 1; more_to_do++; continue; } if (tgt->is_target) { tgt->is_target = 0; ocs_delete_target(ocs, fcp, i); } tgt->state = OCS_TGT_STATE_NONE; } if (more_to_do) { callout_reset(&fcp->ldt, hz, ocs_ldt, fcp); } else { callout_deactivate(&fcp->ldt); } } /** * @ingroup scsi_api_initiator * @brief Delete a SCSI target node * * Sent by base driver to notify a initiator-client that a remote target * is now gone. The base driver will have terminated all outstanding IOs * and the initiator-client will receive appropriate completions. * * The ocs_node_t structure has and elment of type ocs_scsi_ini_node_t named * ini_node that is declared and used by a target-server for private * information. * * This function is only called if the base driver is enabled for * initiator capability. * * @param node pointer node being deleted * @param reason reason for deleting the target * * @return Returns OCS_SCSI_CALL_ASYNC if target delete is queued for async * completion and OCS_SCSI_CALL_COMPLETE if call completed or error. * * @note */ int32_t ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason) { struct ocs_softc *ocs = node->ocs; ocs_fcport *fcp = NULL; ocs_fc_target_t *tgt = NULL; - uint32_t tgt_id; + int32_t tgt_id; + if (ocs == NULL) { + ocs_log_err(ocs,"OCS is NULL \n"); + return -1; + } + fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs,"FCP is NULL \n"); - return 0; + return -1; } tgt_id = ocs_tgt_find(fcp, node); + if (tgt_id == -1) { + ocs_log_err(ocs,"target is invalid\n"); + return -1; + } tgt = &fcp->tgt[tgt_id]; // IF in shutdown delete target. if(!ocs->attached) { ocs_delete_target(ocs, fcp, tgt_id); } else { tgt->state = OCS_TGT_STATE_LOST; tgt->gone_timer = 30; if (!callout_active(&fcp->ldt)) { callout_reset(&fcp->ldt, hz, ocs_ldt, fcp); } } return 0; } /** * @brief Initialize SCSI IO * * Initialize SCSI IO, this function is called once per IO during IO pool * allocation so that the target server may initialize any of its own private * data. * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_io_init(ocs_io_t *io) { return 0; } /** * @brief Uninitialize SCSI IO * * Uninitialize target server private data in a SCSI io object * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_io_exit(ocs_io_t *io) { return 0; } /** * @brief Initialize SCSI IO * * Initialize SCSI IO, this function is called once per IO during IO pool * allocation so that the initiator client may initialize any of its own private * data. * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_io_init(ocs_io_t *io) { return 0; } /** * @brief Uninitialize SCSI IO * * Uninitialize initiator client private data in a SCSI io object * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_io_exit(ocs_io_t *io) { return 0; } /* * End of functions required by SCSI base driver API ***************************************************************************/ static __inline void ocs_set_ccb_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static int32_t ocs_task_set_full_or_busy_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { ocs_target_io_free(io); return 0; } /** * @brief send SCSI task set full or busy status * * A SCSI task set full or busy response is sent depending on whether * another IO is already active on the LUN. * * @param io pointer to IO context * * @return returns 0 for success, a negative error code value for failure. */ static int32_t ocs_task_set_full_or_busy(ocs_io_t *io) { ocs_scsi_cmd_resp_t rsp = { 0 }; ocs_t *ocs = io->ocs; /* * If there is another command for the LUN, then send task set full, * if this is the first one, then send the busy status. * * if 'busy sent' is FALSE, set it to TRUE and send BUSY * otherwise send FULL */ if (atomic_cmpset_acq_32(&io->node->tgt_node.busy_sent, FALSE, TRUE)) { rsp.scsi_status = SCSI_STATUS_BUSY; /* Busy */ printf("%s: busy [%s] tag=%x iiu=%d ihw=%d\n", __func__, io->node->display_name, io->tag, io->ocs->io_in_use, io->ocs->io_high_watermark); } else { rsp.scsi_status = SCSI_STATUS_TASK_SET_FULL; /* Task set full */ printf("%s: full tag=%x iiu=%d\n", __func__, io->tag, io->ocs->io_in_use); } /* Log a message here indicating a busy or task set full state */ if (OCS_LOG_ENABLE_Q_FULL_BUSY_MSG(ocs)) { /* Log Task Set Full */ if (rsp.scsi_status == SCSI_STATUS_TASK_SET_FULL) { /* Task Set Full Message */ ocs_log_info(ocs, "OCS CAM TASK SET FULL. Tasks >= %d\n", ocs->io_high_watermark); } else if (rsp.scsi_status == SCSI_STATUS_BUSY) { /* Log Busy Message */ ocs_log_info(ocs, "OCS CAM SCSI BUSY\n"); } } /* Send the response */ return ocs_scsi_send_resp(io, 0, &rsp, ocs_task_set_full_or_busy_cb, NULL); } /** * @ingroup cam_io * @brief Process target IO completions * * @param io * @param scsi_status did the IO complete successfully * @param flags * @param arg application specific pointer provided in the call to ocs_target_io() * * @todo */ static int32_t ocs_scsi_target_io_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { union ccb *ccb = arg; struct ccb_scsiio *csio = &ccb->csio; struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; uint32_t io_is_done = (ccb->ccb_h.flags & CAM_SEND_STATUS) == CAM_SEND_STATUS; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (CAM_DIR_NONE != cam_dir) { bus_dmasync_op_t op; if (CAM_DIR_IN == cam_dir) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } /* Synchronize the DMA memory with the CPU and free the mapping */ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op); if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) { bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap); } } if (io->tgt_io.sendresp) { io->tgt_io.sendresp = 0; ocs_scsi_cmd_resp_t resp = { 0 }; io->tgt_io.state = OCS_CAM_IO_RESP; resp.scsi_status = scsi_status; if (ccb->ccb_h.flags & CAM_SEND_SENSE) { resp.sense_data = (uint8_t *)&csio->sense_data; resp.sense_data_length = csio->sense_len; } resp.residual = io->exp_xfer_len - io->transferred; return ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb); } switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: ocs_set_ccb_status(ccb, CAM_REQ_CMP); break; case OCS_SCSI_STATUS_ABORTED: ocs_set_ccb_status(ccb, CAM_REQ_ABORTED); break; default: ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } if (io_is_done) { if ((io->tgt_io.flags & OCS_CAM_IO_F_ABORT_NOTIFY) == 0) { ocs_target_io_free(io); } } else { io->tgt_io.state = OCS_CAM_IO_DATA_DONE; /*device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n", __func__, io->tgt_io.state, io->tag);*/ } xpt_done(ccb); return 0; } /** * @note 1. Since the CCB is assigned to the ocs_io_t on an XPT_CONT_TARGET_IO * action, if an initiator aborts a command prior to the SIM receiving * a CTIO, the IO's CCB will be NULL. */ static int32_t ocs_io_abort_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { struct ocs_softc *ocs = NULL; ocs_io_t *tmfio = arg; ocs_scsi_tmf_resp_e tmf_resp = OCS_SCSI_TMF_FUNCTION_COMPLETE; int32_t rc = 0; ocs = io->ocs; io->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_DEV; /* A good status indicates the IO was aborted and will be completed in * the IO's completion handler. Handle the other cases here. */ switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: break; case OCS_SCSI_STATUS_NO_IO: break; default: device_printf(ocs->dev, "%s: unhandled status %d\n", __func__, scsi_status); tmf_resp = OCS_SCSI_TMF_FUNCTION_REJECTED; rc = -1; } ocs_scsi_send_tmf_resp(tmfio, tmf_resp, NULL, ocs_target_tmf_cb, NULL); return rc; } /** * @ingroup cam_io * @brief Process initiator IO completions * * @param io * @param scsi_status did the IO complete successfully * @param rsp pointer to response buffer * @param flags * @param arg application specific pointer provided in the call to ocs_target_io() * * @todo */ static int32_t ocs_scsi_initiator_io_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg) { union ccb *ccb = arg; struct ccb_scsiio *csio = &ccb->csio; struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; cam_status ccb_status= CAM_REQ_CMP_ERR; if (CAM_DIR_NONE != cam_dir) { bus_dmasync_op_t op; if (CAM_DIR_IN == cam_dir) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } /* Synchronize the DMA memory with the CPU and free the mapping */ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op); if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) { bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap); } } if (scsi_status == OCS_SCSI_STATUS_CHECK_RESPONSE) { csio->scsi_status = rsp->scsi_status; if (SCSI_STATUS_OK != rsp->scsi_status) { ccb_status = CAM_SCSI_STATUS_ERROR; } csio->resid = rsp->residual; if (rsp->residual > 0) { uint32_t length = rsp->response_wire_length; /* underflow */ if (csio->dxfer_len == (length + csio->resid)) { ccb_status = CAM_REQ_CMP; } } else if (rsp->residual < 0) { ccb_status = CAM_DATA_RUN_ERR; } if ((rsp->sense_data_length) && !(ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR))) { uint32_t sense_len = 0; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; if (rsp->sense_data_length < csio->sense_len) { csio->sense_resid = csio->sense_len - rsp->sense_data_length; sense_len = rsp->sense_data_length; } else { csio->sense_resid = 0; sense_len = csio->sense_len; } ocs_memcpy(&csio->sense_data, rsp->sense_data, sense_len); } } else if (scsi_status != OCS_SCSI_STATUS_GOOD) { ccb_status = CAM_REQ_CMP_ERR; ocs_set_ccb_status(ccb, ccb_status); csio->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(csio->ccb_h.path, 1); } else { ccb_status = CAM_REQ_CMP; } ocs_set_ccb_status(ccb, ccb_status); ocs_scsi_io_free(io); csio->ccb_h.ccb_io_ptr = NULL; csio->ccb_h.ccb_ocs_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return 0; } /** * @brief Load scatter-gather list entries into an IO * * This routine relies on the driver instance's software context pointer and * the IO object pointer having been already assigned to hooks in the CCB. * Although the routine does not return success/fail, callers can look at the * n_sge member to determine if the mapping failed (0 on failure). * * @param arg pointer to the CAM ccb for this IO * @param seg DMA address/length pairs * @param nseg number of DMA address/length pairs * @param error any errors while mapping the IO */ static void ocs_scsi_dmamap_load(void *arg, bus_dma_segment_t *seg, int nseg, int error) { ocs_dmamap_load_arg_t *sglarg = (ocs_dmamap_load_arg_t*) arg; if (error) { printf("%s: seg=%p nseg=%d error=%d\n", __func__, seg, nseg, error); sglarg->rc = -1; } else { uint32_t i = 0; uint32_t c = 0; if ((sglarg->sgl_count + nseg) > sglarg->sgl_max) { printf("%s: sgl_count=%d nseg=%d max=%d\n", __func__, sglarg->sgl_count, nseg, sglarg->sgl_max); sglarg->rc = -2; return; } for (i = 0, c = sglarg->sgl_count; i < nseg; i++, c++) { sglarg->sgl[c].addr = seg[i].ds_addr; sglarg->sgl[c].len = seg[i].ds_len; } sglarg->sgl_count = c; sglarg->rc = 0; } } /** * @brief Build a scatter-gather list from a CAM CCB * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * @param io pointer to the previously allocated IO object * @param sgl pointer to SGL * @param sgl_max number of entries in sgl * * @return 0 on success, non-zero otherwise */ static int32_t ocs_build_scsi_sgl(struct ocs_softc *ocs, union ccb *ccb, ocs_io_t *io, ocs_scsi_sgl_t *sgl, uint32_t sgl_max) { ocs_dmamap_load_arg_t dmaarg; int32_t err = 0; if (!ocs || !ccb || !io || !sgl) { printf("%s: bad param o=%p c=%p i=%p s=%p\n", __func__, ocs, ccb, io, sgl); return -1; } io->tgt_io.flags &= ~OCS_CAM_IO_F_DMAPPED; dmaarg.sgl = sgl; dmaarg.sgl_count = 0; dmaarg.sgl_max = sgl_max; dmaarg.rc = 0; err = bus_dmamap_load_ccb(ocs->buf_dmat, io->tgt_io.dmap, ccb, ocs_scsi_dmamap_load, &dmaarg, 0); if (err || dmaarg.rc) { device_printf( ocs->dev, "%s: bus_dmamap_load_ccb error (%d %d)\n", __func__, err, dmaarg.rc); return -1; } io->tgt_io.flags |= OCS_CAM_IO_F_DMAPPED; return dmaarg.sgl_count; } /** * @ingroup cam_io * @brief Send a target IO * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * * @return 0 on success, non-zero otherwise */ static int32_t ocs_target_io(struct ocs_softc *ocs, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; ocs_io_t *io = NULL; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; bool sendstatus = ccb->ccb_h.flags & CAM_SEND_STATUS; uint32_t xferlen = csio->dxfer_len; int32_t rc = 0; io = ocs_scsi_find_io(ocs, csio->tag_id); if (io == NULL) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); panic("bad tag value"); return 1; } /* Received an ABORT TASK for this IO */ if (io->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) { /*device_printf(ocs->dev, "%s: XPT_CONT_TARGET_IO state=%d tag=%#x xid=%#x flags=%#x\n", __func__, io->tgt_io.state, io->tag, io->init_task_tag, io->tgt_io.flags);*/ io->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM; if (ccb->ccb_h.flags & CAM_SEND_STATUS) { ocs_set_ccb_status(ccb, CAM_REQ_CMP); ocs_target_io_free(io); return 1; } ocs_set_ccb_status(ccb, CAM_REQ_ABORTED); return 1; } io->tgt_io.app = ccb; ocs_set_ccb_status(ccb, CAM_REQ_INPROG); ccb->ccb_h.status |= CAM_SIM_QUEUED; csio->ccb_h.ccb_ocs_ptr = ocs; csio->ccb_h.ccb_io_ptr = io; if ((sendstatus && (xferlen == 0))) { ocs_scsi_cmd_resp_t resp = { 0 }; ocs_assert(ccb->ccb_h.flags & CAM_SEND_STATUS, -1); io->tgt_io.state = OCS_CAM_IO_RESP; resp.scsi_status = csio->scsi_status; if (ccb->ccb_h.flags & CAM_SEND_SENSE) { resp.sense_data = (uint8_t *)&csio->sense_data; resp.sense_data_length = csio->sense_len; } resp.residual = io->exp_xfer_len - io->transferred; rc = ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb); } else if (xferlen != 0) { ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL]; int32_t sgl_count = 0; io->tgt_io.state = OCS_CAM_IO_DATA; if (sendstatus) io->tgt_io.sendresp = 1; sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl)); if (sgl_count > 0) { if (cam_dir == CAM_DIR_IN) { rc = ocs_scsi_send_rd_data(io, 0, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_target_io_cb, ccb); } else if (cam_dir == CAM_DIR_OUT) { rc = ocs_scsi_recv_wr_data(io, 0, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_target_io_cb, ccb); } else { device_printf(ocs->dev, "%s:" " unknown CAM direction %#x\n", __func__, cam_dir); ocs_set_ccb_status(ccb, CAM_REQ_INVALID); rc = 1; } } else { device_printf(ocs->dev, "%s: building SGL failed\n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); rc = 1; } } else { device_printf(ocs->dev, "%s: Wrong value xfer and sendstatus" " are 0 \n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_INVALID); rc = 1; } if (rc) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; io->tgt_io.state = OCS_CAM_IO_DATA_DONE; device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n", __func__, io->tgt_io.state, io->tag); if ((sendstatus && (xferlen == 0))) { ocs_target_io_free(io); } } return rc; } static int32_t ocs_target_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { /*device_printf(io->ocs->dev, "%s: tag=%x io=%p s=%#x\n", __func__, io->tag, io, scsi_status);*/ ocs_scsi_io_complete(io); return 0; } /** * @ingroup cam_io * @brief Send an initiator IO * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * * @return 0 on success, non-zero otherwise */ static int32_t ocs_initiator_io(struct ocs_softc *ocs, union ccb *ccb) { int32_t rc; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccb_h = &csio->ccb_h; ocs_node_t *node = NULL; ocs_io_t *io = NULL; ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL]; int32_t sgl_count; + ocs_fcport *fcp; - ocs_fcport *fcp = NULL; fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path))); - if (fcp == NULL) { - device_printf(ocs->dev, "%s: fcp is NULL\n", __func__); - return -1; - } if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_LOST) { device_printf(ocs->dev, "%s: device LOST %d\n", __func__, ccb_h->target_id); return CAM_REQUEUE_REQ; } if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_NONE) { device_printf(ocs->dev, "%s: device not ready %d\n", __func__, ccb_h->target_id); return CAM_SEL_TIMEOUT; } node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, ccb_h->target_id); return CAM_SEL_TIMEOUT; } if (!node->targ) { device_printf(ocs->dev, "%s: not target device %d\n", __func__, ccb_h->target_id); return CAM_SEL_TIMEOUT; } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); return -1; } /* eventhough this is INI, use target structure as ocs_build_scsi_sgl * only references the tgt_io part of an ocs_io_t */ io->tgt_io.app = ccb; csio->ccb_h.ccb_ocs_ptr = ocs; csio->ccb_h.ccb_io_ptr = io; sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl)); if (sgl_count < 0) { ocs_scsi_io_free(io); device_printf(ocs->dev, "%s: building SGL failed\n", __func__); return -1; } if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { io->timeout = 0; } else if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { io->timeout = OCS_CAM_IO_TIMEOUT; } else { io->timeout = ccb->ccb_h.timeout; } switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_NONE: rc = ocs_scsi_send_nodata_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, ocs_scsi_initiator_io_cb, ccb); break; case CAM_DIR_IN: rc = ocs_scsi_send_rd_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_initiator_io_cb, ccb); break; case CAM_DIR_OUT: rc = ocs_scsi_send_wr_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_initiator_io_cb, ccb); break; default: panic("%s invalid data direction %08x\n", __func__, ccb->ccb_h.flags); break; } return rc; } static uint32_t ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role) { uint32_t rc = 0, was = 0, i = 0; ocs_vport_spec_t *vport = fcp->vport; for (was = 0, i = 0; i < (ocs->num_vports + 1); i++) { if (FCPORT(ocs, i)->role != KNOB_ROLE_NONE) was++; } // Physical port if ((was == 0) || (vport == NULL)) { fcp->role = new_role; if (vport == NULL) { ocs->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; ocs->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; } else { vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_debug(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_debug(ocs, "port online failed : %d\n", rc); } return 0; } if ((fcp->role != KNOB_ROLE_NONE)){ fcp->role = new_role; vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; /* New Sport will be created in sport deleted cb */ return ocs_sport_vport_del(ocs, ocs->domain, vport->wwpn, vport->wwnn); } fcp->role = new_role; vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; if (fcp->role != KNOB_ROLE_NONE) { return ocs_sport_vport_alloc(ocs->domain, vport); } return (0); } /** * @ingroup cam_api * @brief Process CAM actions * * The driver supplies this routine to the CAM during intialization and * is the main entry point for processing CAM Control Blocks (CCB) * * @param sim pointer to the SCSI Interface Module * @param ccb CAM control block * * @todo * - populate path inquiry data via info retrieved from SLI port */ static void ocs_action(struct cam_sim *sim, union ccb *ccb) { struct ocs_softc *ocs = (struct ocs_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; int32_t rc, bus; bus = cam_sim_bus(sim); switch (ccb_h->func_code) { case XPT_SCSI_IO: if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } rc = ocs_initiator_io(ocs, ccb); if (0 == rc) { ocs_set_ccb_status(ccb, CAM_REQ_INPROG | CAM_SIM_QUEUED); break; } else { if (rc == CAM_REQUEUE_REQ) { cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (rc > 0) { ocs_set_ccb_status(ccb, rc); } else { ocs_set_ccb_status(ccb, CAM_SEL_TIMEOUT); } } xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; struct ccb_pathinq_settings_fc *fc = &cpi->xport_specific.fc; ocs_fcport *fcp = FCPORT(ocs, bus); uint64_t wwn = 0; ocs_xport_stats_t value; cpi->version_num = 1; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; if (ocs->ocs_xport == OCS_XPORT_FC) { cpi->transport = XPORT_FC; } else { cpi->transport = XPORT_UNKNOWN; } cpi->transport_version = 0; /* Set the transport parameters of the SIM */ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value); fc->bitrate = value.value * 1000; /* speed in Mbps */ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN)); fc->wwpn = be64toh(wwn); wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN)); fc->wwnn = be64toh(wwn); fc->port = fcp->fc_id; if (ocs->config_tgt) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; cpi->hba_inquiry = PI_TAG_ABLE; cpi->max_target = OCS_MAX_TARGETS; cpi->initiator_id = ocs->max_remote_nodes + 1; if (!ocs->enable_ini) { cpi->hba_misc |= PIM_NOINITIATOR; } cpi->max_lun = OCS_MAX_LUN; cpi->bus_id = cam_sim_bus(sim); /* Need to supply a base transfer speed prior to linking up * Worst case, this would be FC 1Gbps */ cpi->base_transfer_speed = 1 * 1000 * 1000; /* Calculate the max IO supported * Worst case would be an OS page per SGL entry */ cpi->maxio = PAGE_SIZE * (ocs_scsi_get_property(ocs, OCS_SCSI_MAX_SGL) - 1); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Emulex", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; ocs_xport_stats_t value; ocs_fcport *fcp = FCPORT(ocs, bus); ocs_fc_target_t *tgt = NULL; if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (cts->ccb_h.target_id > OCS_MAX_TARGETS) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } tgt = &fcp->tgt[cts->ccb_h.target_id]; if (tgt->state == OCS_TGT_STATE_NONE) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 2; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; /* speed in Mbps */ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value); fc->bitrate = value.value * 100; fc->wwpn = tgt->wwpn; fc->wwnn = tgt->wwnn; fc->port = tgt->port_id; fc->valid = CTS_FC_VALID_SPEED | CTS_FC_VALID_WWPN | CTS_FC_VALID_WWNN | CTS_FC_VALID_PORT; ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, TRUE); xpt_done(ccb); break; case XPT_GET_SIM_KNOB: { struct ccb_sim_knob *knob = &ccb->knob; uint64_t wwn = 0; ocs_fcport *fcp = FCPORT(ocs, bus); if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (bus == 0) { wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN)); knob->xport_specific.fc.wwnn = be64toh(wwn); wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN)); knob->xport_specific.fc.wwpn = be64toh(wwn); } else { knob->xport_specific.fc.wwnn = fcp->vport->wwnn; knob->xport_specific.fc.wwpn = fcp->vport->wwpn; } knob->xport_specific.fc.role = fcp->role; knob->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_SET_SIM_KNOB: { struct ccb_sim_knob *knob = &ccb->knob; bool role_changed = FALSE; ocs_fcport *fcp = FCPORT(ocs, bus); if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (knob->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { device_printf(ocs->dev, "%s: XPT_SET_SIM_KNOB wwnn=%llx wwpn=%llx\n", __func__, (unsigned long long)knob->xport_specific.fc.wwnn, (unsigned long long)knob->xport_specific.fc.wwpn); } if (knob->xport_specific.fc.valid & KNOB_VALID_ROLE) { switch (knob->xport_specific.fc.role) { case KNOB_ROLE_NONE: if (fcp->role != KNOB_ROLE_NONE) { role_changed = TRUE; } break; case KNOB_ROLE_TARGET: if (fcp->role != KNOB_ROLE_TARGET) { role_changed = TRUE; } break; case KNOB_ROLE_INITIATOR: if (fcp->role != KNOB_ROLE_INITIATOR) { role_changed = TRUE; } break; case KNOB_ROLE_BOTH: if (fcp->role != KNOB_ROLE_BOTH) { role_changed = TRUE; } break; default: device_printf(ocs->dev, "%s: XPT_SET_SIM_KNOB unsupported role: %d\n", __func__, knob->xport_specific.fc.role); } if (role_changed) { device_printf(ocs->dev, "BUS:%d XPT_SET_SIM_KNOB old_role: %d new_role: %d\n", bus, fcp->role, knob->xport_specific.fc.role); ocs_fcp_change_role(ocs, fcp, knob->xport_specific.fc.role); } } ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: ocs_abort_atio(ocs, ccb); break; case XPT_IMMEDIATE_NOTIFY: ocs_abort_inot(ocs, ccb); break; case XPT_SCSI_IO: rc = ocs_abort_initiator_io(ocs, accb); if (rc) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: printf("abort of unknown func %#x\n", accb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } case XPT_RESET_BUS: if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE) == 0) { - ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); - + rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); + if (rc) { + ocs_log_debug(ocs, "Failed to bring port online" + " : %d\n", rc); + } ocs_set_ccb_status(ccb, CAM_REQ_CMP); } else { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } xpt_done(ccb); break; case XPT_RESET_DEV: { ocs_node_t *node = NULL; ocs_io_t *io = NULL; int32_t rc = 0; ocs_fcport *fcp = FCPORT(ocs, bus); node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, ccb_h->target_id); ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); break; } rc = ocs_scsi_send_tmf(node, io, NULL, ccb_h->target_lun, OCS_SCSI_TMF_LOGICAL_UNIT_RESET, NULL, 0, 0, /* sgl, sgl_count, length */ ocs_initiator_tmf_cb, NULL/*arg*/); if (rc) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { ocs_set_ccb_status(ccb, CAM_REQ_CMP); } if (node->fcp2device) { ocs_reset_crn(node, ccb_h->target_lun); } xpt_done(ccb); break; } case XPT_EN_LUN: /* target support */ { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = 0; ocs_fcport *fcp = FCPORT(ocs, bus); device_printf(ocs->dev, "XPT_EN_LUN %sable %d:%d\n", ccb->cel.enable ? "en" : "dis", ccb->ccb_h.target_id, (unsigned int)ccb->ccb_h.target_lun); trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status); if (trsrc) { trsrc->enabled = ccb->cel.enable; /* Abort all ATIO/INOT on LUN disable */ if (trsrc->enabled == FALSE) { ocs_tgt_resource_abort(ocs, trsrc); } else { STAILQ_INIT(&trsrc->atio); STAILQ_INIT(&trsrc->inot); } status = CAM_REQ_CMP; } ocs_set_ccb_status(ccb, status); xpt_done(ccb); break; } /* * The flow of target IOs in CAM is: * - CAM supplies a number of CCBs to the driver used for received * commands. * - when the driver receives a command, it copies the relevant * information to the CCB and returns it to the CAM using xpt_done() * - after the target server processes the request, it creates * a new CCB containing information on how to continue the IO and * passes that to the driver * - the driver processes the "continue IO" (a.k.a CTIO) CCB * - once the IO completes, the driver returns the CTIO to the CAM * using xpt_done() */ case XPT_ACCEPT_TARGET_IO: /* used to inform upper layer of received CDB (a.k.a. ATIO) */ case XPT_IMMEDIATE_NOTIFY: /* used to inform upper layer of other event (a.k.a. INOT) */ { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = 0; ocs_fcport *fcp = FCPORT(ocs, bus); /*printf("XPT_%s %p\n", ccb_h->func_code == XPT_ACCEPT_TARGET_IO ? "ACCEPT_TARGET_IO" : "IMMEDIATE_NOTIFY", ccb);*/ trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status); if (trsrc == NULL) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } if (XPT_ACCEPT_TARGET_IO == ccb->ccb_h.func_code) { struct ccb_accept_tio *atio = NULL; atio = (struct ccb_accept_tio *)ccb; atio->init_id = 0x0badbeef; atio->tag_id = 0xdeadc0de; STAILQ_INSERT_TAIL(&trsrc->atio, &ccb->ccb_h, sim_links.stqe); } else { STAILQ_INSERT_TAIL(&trsrc->inot, &ccb->ccb_h, sim_links.stqe); } ccb->ccb_h.ccb_io_ptr = NULL; ccb->ccb_h.ccb_ocs_ptr = ocs; ocs_set_ccb_status(ccb, CAM_REQ_INPROG); /* * These actions give resources to the target driver. * If we didn't return here, this function would call * xpt_done(), signaling to the upper layers that an * IO or other event had arrived. */ break; } case XPT_NOTIFY_ACKNOWLEDGE: { ocs_io_t *io = NULL; ocs_io_t *abortio = NULL; /* Get the IO reference for this tag */ io = ocs_scsi_find_io(ocs, ccb->cna2.tag_id); if (io == NULL) { device_printf(ocs->dev, "%s: XPT_NOTIFY_ACKNOWLEDGE no IO with tag %#x\n", __func__, ccb->cna2.tag_id); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); break; } abortio = io->tgt_io.app; if (abortio) { abortio->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_NOTIFY; device_printf(ocs->dev, "%s: XPT_NOTIFY_ACK state=%d tag=%#x xid=%#x" " flags=%#x\n", __func__, abortio->tgt_io.state, abortio->tag, abortio->init_task_tag, abortio->tgt_io.flags); /* TMF response was sent in abort callback */ } else { ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_COMPLETE, NULL, ocs_target_tmf_cb, NULL); } ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_CONT_TARGET_IO: /* continue target IO, sending data/response (a.k.a. CTIO) */ if (ocs_target_io(ocs, ccb)) { device_printf(ocs->dev, "XPT_CONT_TARGET_IO failed flags=%x tag=%#x\n", ccb->ccb_h.flags, ccb->csio.tag_id); xpt_done(ccb); } break; default: device_printf(ocs->dev, "unhandled func_code = %#x\n", ccb_h->func_code); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /** * @ingroup cam_api * @brief Process events * * @param sim pointer to the SCSI Interface Module * */ static void ocs_poll(struct cam_sim *sim) { printf("%s\n", __func__); } static int32_t ocs_initiator_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg) { int32_t rc = 0; switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: case OCS_SCSI_STATUS_NO_IO: break; case OCS_SCSI_STATUS_CHECK_RESPONSE: if (rsp->response_data_length == 0) { ocs_log_test(io->ocs, "check response without data?!?\n"); rc = -1; break; } if (rsp->response_data[3] != 0) { ocs_log_test(io->ocs, "TMF status %08x\n", be32toh(*((uint32_t *)rsp->response_data))); rc = -1; break; } break; default: ocs_log_test(io->ocs, "status=%#x\n", scsi_status); rc = -1; } ocs_scsi_io_free(io); return rc; } /** * @brief lookup target resource structure * * Arbitrarily support * - wildcard target ID + LU * - 0 target ID + non-wildcard LU * * @param ocs the driver instance's software context * @param ccb_h pointer to the CCB header * @param status returned status value * * @return pointer to the target resource, NULL if none available (e.g. if LU * is not enabled) */ static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *fcp, struct ccb_hdr *ccb_h, uint32_t *status) { target_id_t tid = ccb_h->target_id; lun_id_t lun = ccb_h->target_lun; if (CAM_TARGET_WILDCARD == tid) { if (CAM_LUN_WILDCARD != lun) { *status = CAM_LUN_INVALID; return NULL; } return &fcp->targ_rsrc_wildcard; } else { if (lun < OCS_MAX_LUN) { return &fcp->targ_rsrc[lun]; } else { *status = CAM_LUN_INVALID; return NULL; } } } static int32_t ocs_tgt_resource_abort(struct ocs_softc *ocs, ocs_tgt_resource_t *trsrc) { union ccb *ccb = NULL; uint32_t count; count = 0; do { ccb = (union ccb *)STAILQ_FIRST(&trsrc->atio); if (ccb) { STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); count++; } } while (ccb); count = 0; do { ccb = (union ccb *)STAILQ_FIRST(&trsrc->inot); if (ccb) { STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); count++; } } while (ccb); return 0; } static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb) { ocs_io_t *aio = NULL; ocs_tgt_resource_t *trsrc = NULL; uint32_t status = CAM_REQ_INVALID; struct ccb_hdr *cur = NULL; union ccb *accb = ccb->cab.abort_ccb; int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)); ocs_fcport *fcp = FCPORT(ocs, bus); trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status); if (trsrc != NULL) { STAILQ_FOREACH(cur, &trsrc->atio, sim_links.stqe) { if (cur != &accb->ccb_h) continue; STAILQ_REMOVE(&trsrc->atio, cur, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } } /* if the ATIO has a valid IO pointer, CAM is telling * the driver that the ATIO (which represents the entire * exchange) has been aborted. */ aio = accb->ccb_h.ccb_io_ptr; if (aio == NULL) { ccb->ccb_h.status = CAM_UA_ABORT; return; } device_printf(ocs->dev, "%s: XPT_ABORT ATIO state=%d tag=%#x" " xid=%#x flags=%#x\n", __func__, aio->tgt_io.state, aio->tag, aio->init_task_tag, aio->tgt_io.flags); /* Expectations are: * - abort task was received * - already aborted IO in the DEVICE * - already received NOTIFY ACKNOWLEDGE */ if ((aio->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) == 0) { device_printf(ocs->dev, "%s: abort not received or io completed \n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } aio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM; ocs_target_io_free(aio); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb) { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = CAM_REQ_INVALID; struct ccb_hdr *cur = NULL; union ccb *accb = ccb->cab.abort_ccb; int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)); ocs_fcport *fcp = FCPORT(ocs, bus); trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status); if (trsrc) { STAILQ_FOREACH(cur, &trsrc->inot, sim_links.stqe) { if (cur != &accb->ccb_h) continue; STAILQ_REMOVE(&trsrc->inot, cur, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } } ocs_set_ccb_status(ccb, CAM_UA_ABORT); return; } static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb) { ocs_node_t *node = NULL; ocs_io_t *io = NULL; int32_t rc = 0; struct ccb_scsiio *csio = &accb->csio; ocs_fcport *fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((accb)->ccb_h.path))); node = ocs_node_get_instance(ocs, fcp->tgt[accb->ccb_h.target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, accb->ccb_h.target_id); ocs_set_ccb_status(accb, CAM_DEV_NOT_THERE); xpt_done(accb); return (-1); } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); ocs_set_ccb_status(accb, CAM_REQ_CMP_ERR); xpt_done(accb); return (-1); } rc = ocs_scsi_send_tmf(node, io, (ocs_io_t *)csio->ccb_h.ccb_io_ptr, accb->ccb_h.target_lun, OCS_SCSI_TMF_ABORT_TASK, NULL, 0, 0, ocs_initiator_tmf_cb, NULL/*arg*/); return rc; } void ocs_scsi_ini_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj) { switch(type) { case OCS_SCSI_DDUMP_DEVICE: { //ocs_t *ocs = obj; break; } case OCS_SCSI_DDUMP_DOMAIN: { //ocs_domain_t *domain = obj; break; } case OCS_SCSI_DDUMP_SPORT: { //ocs_sport_t *sport = obj; break; } case OCS_SCSI_DDUMP_NODE: { //ocs_node_t *node = obj; break; } case OCS_SCSI_DDUMP_IO: { //ocs_io_t *io = obj; break; } default: { break; } } } void ocs_scsi_tgt_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj) { switch(type) { case OCS_SCSI_DDUMP_DEVICE: { //ocs_t *ocs = obj; break; } case OCS_SCSI_DDUMP_DOMAIN: { //ocs_domain_t *domain = obj; break; } case OCS_SCSI_DDUMP_SPORT: { //ocs_sport_t *sport = obj; break; } case OCS_SCSI_DDUMP_NODE: { //ocs_node_t *node = obj; break; } case OCS_SCSI_DDUMP_IO: { ocs_io_t *io = obj; char *state_str = NULL; switch (io->tgt_io.state) { case OCS_CAM_IO_FREE: state_str = "FREE"; break; case OCS_CAM_IO_COMMAND: state_str = "COMMAND"; break; case OCS_CAM_IO_DATA: state_str = "DATA"; break; case OCS_CAM_IO_DATA_DONE: state_str = "DATA_DONE"; break; case OCS_CAM_IO_RESP: state_str = "RESP"; break; default: state_str = "xxx BAD xxx"; } ocs_ddump_value(textbuf, "cam_st", "%s", state_str); if (io->tgt_io.app) { ocs_ddump_value(textbuf, "cam_flags", "%#x", ((union ccb *)(io->tgt_io.app))->ccb_h.flags); ocs_ddump_value(textbuf, "cam_status", "%#x", ((union ccb *)(io->tgt_io.app))->ccb_h.status); } break; } default: { break; } } } int32_t ocs_scsi_get_block_vaddr(ocs_io_t *io, uint64_t blocknumber, ocs_scsi_vaddr_len_t addrlen[], uint32_t max_addrlen, void **dif_vaddr) { return -1; } uint32_t ocs_get_crn(ocs_node_t *node, uint8_t *crn, uint64_t lun) { uint32_t idx; struct ocs_lun_crn *lcrn = NULL; idx = lun % OCS_MAX_LUN; lcrn = node->ini_node.lun_crn[idx]; if (lcrn == NULL) { lcrn = ocs_malloc(node->ocs, sizeof(struct ocs_lun_crn), M_ZERO|M_NOWAIT); if (lcrn == NULL) { return (1); } lcrn->lun = lun; node->ini_node.lun_crn[idx] = lcrn; } if (lcrn->lun != lun) { return (1); } if (lcrn->crnseed == 0) lcrn->crnseed = 1; *crn = lcrn->crnseed++; return (0); } void ocs_del_crn(ocs_node_t *node) { uint32_t i; struct ocs_lun_crn *lcrn = NULL; for(i = 0; i < OCS_MAX_LUN; i++) { lcrn = node->ini_node.lun_crn[i]; if (lcrn) { ocs_free(node->ocs, lcrn, sizeof(*lcrn)); } } return; } void ocs_reset_crn(ocs_node_t *node, uint64_t lun) { uint32_t idx; struct ocs_lun_crn *lcrn = NULL; idx = lun % OCS_MAX_LUN; lcrn = node->ini_node.lun_crn[idx]; if (lcrn) lcrn->crnseed = 0; return; } Index: head/sys/dev/ocs_fc/ocs_hw.c =================================================================== --- head/sys/dev/ocs_fc/ocs_hw.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_hw.c (revision 343349) @@ -1,12551 +1,12573 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Defines and implements the Hardware Abstraction Layer (HW). * All interaction with the hardware is performed through the HW, which abstracts * the details of the underlying SLI-4 implementation. */ /** * @defgroup devInitShutdown Device Initialization and Shutdown * @defgroup domain Domain Functions * @defgroup port Port Functions * @defgroup node Remote Node Functions * @defgroup io IO Functions * @defgroup interrupt Interrupt handling * @defgroup os OS Required Functions */ #include "ocs.h" #include "ocs_os.h" #include "ocs_hw.h" #include "ocs_hw_queues.h" #define OCS_HW_MQ_DEPTH 128 #define OCS_HW_READ_FCF_SIZE 4096 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256 #define OCS_HW_WQ_TIMER_PERIOD_MS 500 /* values used for setting the auto xfer rdy parameters */ #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */ #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0 #define OCS_HW_REQUE_XRI_REGTAG 65534 /* max command and response buffer lengths -- arbitrary at the moment */ #define OCS_HW_DMTF_CLP_CMD_MAX 256 #define OCS_HW_DMTF_CLP_RSP_MAX 256 /* HW global data */ ocs_hw_global_t hw_global; static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t); static void ocs_hw_adjust_wqs(ocs_hw_t *hw); static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw); static int32_t ocs_hw_cb_link(void *, void *); static int32_t ocs_hw_cb_fip(void *, void *); static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t); static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *); static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *); static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *); static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *); static int32_t ocs_hw_flush(ocs_hw_t *); static int32_t ocs_hw_command_cancel(ocs_hw_t *); static int32_t ocs_hw_io_cancel(ocs_hw_t *); static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io); static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *); static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *); static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg); static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len); typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg); static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *); static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg); static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license); static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw); static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw); static void ocs_hw_io_free_internal(void *arg); static void ocs_hw_io_free_port_owned(void *arg); static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf); static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint); static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status); static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t); static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw); static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable); /* HW domain database operations */ static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *); static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *); /* Port state machine */ static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); /* Domain state machine */ static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data); static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); /* BZ 161832 */ static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw); /* WQE timeouts */ static void target_wqe_timer_cb(void *arg); static void shutdown_target_wqe_timer(ocs_hw_t *hw); static inline void ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) { if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) { /* * Active WQE list currently only used for * target WQE timeouts. */ ocs_lock(&hw->io_lock); ocs_list_add_tail(&hw->io_timed_wqe, io); io->submit_ticks = ocs_get_os_ticks(); ocs_unlock(&hw->io_lock); } } static inline void ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) { if (hw->config.emulate_tgt_wqe_timeout) { /* * If target wqe timeouts are enabled, * remove from active wqe list. */ ocs_lock(&hw->io_lock); if (ocs_list_on_list(&io->wqe_link)) { ocs_list_remove(&hw->io_timed_wqe, io); } ocs_unlock(&hw->io_lock); } } static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type) { switch (io_type) { case OCS_HW_IO_INITIATOR_READ: case OCS_HW_IO_INITIATOR_WRITE: case OCS_HW_IO_INITIATOR_NODATA: case OCS_HW_FC_CT: case OCS_HW_ELS_REQ: return 1; default: return 0; } } static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb) { /* if exchange not active, nothing to abort */ if (!xb) { return FALSE; } if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) { switch (ext) { /* exceptions where abort is not needed */ case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */ return FALSE; default: break; } } return TRUE; } /** * @brief Determine the number of chutes on the device. * * @par Description * Some devices require queue resources allocated per protocol processor * (chute). This function returns the number of chutes on this device. * * @param hw Hardware context allocated by the caller. * * @return Returns the number of chutes on the device for protocol. */ static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw) { uint32_t num_chutes = 1; if (sli_get_is_dual_ulp_capable(&hw->sli) && sli_get_is_ulp_enabled(&hw->sli, 0) && sli_get_is_ulp_enabled(&hw->sli, 1)) { num_chutes = 2; } return num_chutes; } static ocs_hw_rtn_e ocs_hw_link_event_init(ocs_hw_t *hw) { - if (hw == NULL) { - ocs_log_err(hw->os, "bad parameter hw=%p\n", hw); - return OCS_HW_RTN_ERROR; - } + ocs_hw_assert(hw); hw->link.status = SLI_LINK_STATUS_MAX; hw->link.topology = SLI_LINK_TOPO_NONE; hw->link.medium = SLI_LINK_MEDIUM_MAX; hw->link.speed = 0; hw->link.loop_map = NULL; hw->link.fc_id = UINT32_MAX; return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief If this is physical port 0, then read the max dump size. * * @par Description * Queries the FW for the maximum dump size * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ static ocs_hw_rtn_e ocs_hw_read_max_dump_size(ocs_hw_t *hw) { uint8_t buf[SLI4_BMBX_SIZE]; uint8_t bus, dev, func; int rc; /* lancer only */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_debug(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } /* * Make sure the FW is new enough to support this command. If the FW * is too old, the FW will UE. */ if (hw->workaround.disable_dump_loc) { ocs_log_test(hw->os, "FW version is too old for this feature\n"); return OCS_HW_RTN_ERROR; } /* attempt to detemine the dump size for function 0 only. */ ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); if (func == 0) { if (sli_cmd_common_set_dump_location(&hw->sli, buf, SLI4_BMBX_SIZE, 1, 0, NULL, 0)) { sli4_res_common_set_dump_location_t *rsp = (sli4_res_common_set_dump_location_t *) (buf + offsetof(sli4_cmd_sli_config_t, payload.embed)); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "set dump location command failed\n"); return rc; } else { hw->dump_size = rsp->buffer_length; ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length); } } } return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Set up the Hardware Abstraction Layer module. * * @par Description * Calls set up to configure the hardware. * * @param hw Hardware context allocated by the caller. * @param os Device abstraction. * @param port_type Protocol type of port, such as FC and NIC. * * @todo Why is port_type a parameter? * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type) { uint32_t i; char prop_buf[32]; if (hw == NULL) { ocs_log_err(os, "bad parameter(s) hw=%p\n", hw); return OCS_HW_RTN_ERROR; } if (hw->hw_setup_called) { /* Setup run-time workarounds. * Call for each setup, to allow for hw_war_version */ ocs_hw_workaround_setup(hw); return OCS_HW_RTN_SUCCESS; } /* * ocs_hw_init() relies on NULL pointers indicating that a structure * needs allocation. If a structure is non-NULL, ocs_hw_init() won't * free/realloc that memory */ ocs_memset(hw, 0, sizeof(ocs_hw_t)); hw->hw_setup_called = TRUE; hw->os = os; ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os)); ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link); ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link); hw->cmd_head_count = 0; ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os)); ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os)); ocs_atomic_init(&hw->io_alloc_failed_count, 0); hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; hw->config.dif_seed = 0; hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT; hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT; hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT; hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT; if (sli_setup(&hw->sli, hw->os, port_type)) { ocs_log_err(hw->os, "SLI setup failed\n"); return OCS_HW_RTN_ERROR; } ocs_memset(hw->domains, 0, sizeof(hw->domains)); ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); ocs_hw_link_event_init(hw); sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw); sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw); /* * Set all the queue sizes to the maximum allowed. These values may * be changes later by the adjust and workaround functions. */ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) { hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i); } /* * The RQ assignment for RQ pair mode. */ hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD; hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) { hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0); } /* by default, enable initiator-only auto-ABTS emulation */ hw->config.i_only_aab = TRUE; /* Setup run-time workarounds */ ocs_hw_workaround_setup(hw); /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { hw->first_domain_idx = -1; } /* Must be done after the workaround setup */ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { (void)ocs_hw_read_max_dump_size(hw); } /* calculate the number of WQs required. */ ocs_hw_adjust_wqs(hw); /* Set the default dif mode */ if (! sli_is_dif_inline_capable(&hw->sli)) { ocs_log_test(hw->os, "not inline capable, setting mode to separate\n"); hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE; } /* Workaround: BZ 161832 */ if (hw->workaround.use_dif_sec_xri) { ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link); } /* * Figure out the starting and max ULP to spread the WQs across the * ULPs. */ if (sli_get_is_dual_ulp_capable(&hw->sli)) { if (sli_get_is_ulp_enabled(&hw->sli, 0) && sli_get_is_ulp_enabled(&hw->sli, 1)) { hw->ulp_start = 0; hw->ulp_max = 1; } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) { hw->ulp_start = 0; hw->ulp_max = 0; } else { hw->ulp_start = 1; hw->ulp_max = 1; } } else { if (sli_get_is_ulp_enabled(&hw->sli, 0)) { hw->ulp_start = 0; hw->ulp_max = 0; } else { hw->ulp_start = 1; hw->ulp_max = 1; } } ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n", hw->ulp_start, hw->ulp_max); hw->config.queue_topology = hw_global.queue_topology_string; hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology); hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ]; hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ]; hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ]; hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ]; hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ]; /* Verify qtop configuration against driver supported configuration */ if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) { ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n", OCE_HW_MAX_NUM_MRQ_PAIRS); return OCS_HW_RTN_ERROR; } if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) { ocs_log_crit(hw->os, "Max supported EQs = %d\n", OCS_HW_MAX_NUM_EQ); return OCS_HW_RTN_ERROR; } if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) { ocs_log_crit(hw->os, "Max supported CQs = %d\n", OCS_HW_MAX_NUM_CQ); return OCS_HW_RTN_ERROR; } if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { ocs_log_crit(hw->os, "Max supported WQs = %d\n", OCS_HW_MAX_NUM_WQ); return OCS_HW_RTN_ERROR; } if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) { ocs_log_crit(hw->os, "Max supported MQs = %d\n", OCS_HW_MAX_NUM_MQ); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Allocate memory structures to prepare for the device operation. * * @par Description * Allocates memory structures needed by the device and prepares the device * for operation. * @n @n @b Note: This function may be called more than once (for example, at * initialization and then after a reset), but the size of the internal resources * may not be changed without tearing down the HW (ocs_hw_teardown()). * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_init(ocs_hw_t *hw) { ocs_hw_rtn_e rc; uint32_t i = 0; uint8_t buf[SLI4_BMBX_SIZE]; uint32_t max_rpi; int rem_count; int written_size = 0; uint32_t count; char prop_buf[32]; uint32_t ramdisc_blocksize = 512; uint32_t q_count = 0; /* * Make sure the command lists are empty. If this is start-of-day, * they'll be empty since they were just initialized in ocs_hw_setup. * If we've just gone through a reset, the command and command pending * lists should have been cleaned up as part of the reset (ocs_hw_reset()). */ ocs_lock(&hw->cmd_lock); if (!ocs_list_empty(&hw->cmd_head)) { ocs_log_test(hw->os, "command found on cmd list\n"); ocs_unlock(&hw->cmd_lock); return OCS_HW_RTN_ERROR; } if (!ocs_list_empty(&hw->cmd_pending)) { ocs_log_test(hw->os, "command found on pending list\n"); ocs_unlock(&hw->cmd_lock); return OCS_HW_RTN_ERROR; } ocs_unlock(&hw->cmd_lock); /* Free RQ buffers if prevously allocated */ ocs_hw_rx_free(hw); /* * The IO queues must be initialized here for the reset case. The * ocs_hw_init_io() function will re-add the IOs to the free list. * The cmd_head list should be OK since we free all entries in * ocs_hw_command_cancel() that is called in the ocs_hw_reset(). */ /* If we are in this function due to a reset, there may be stale items * on lists that need to be removed. Clean them up. */ rem_count=0; if (ocs_list_valid(&hw->io_wait_free)) { while ((!ocs_list_empty(&hw->io_wait_free))) { rem_count++; ocs_list_remove_head(&hw->io_wait_free); } if (rem_count > 0) { ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count); } } rem_count=0; if (ocs_list_valid(&hw->io_inuse)) { while ((!ocs_list_empty(&hw->io_inuse))) { rem_count++; ocs_list_remove_head(&hw->io_inuse); } if (rem_count > 0) { ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count); } } rem_count=0; if (ocs_list_valid(&hw->io_free)) { while ((!ocs_list_empty(&hw->io_free))) { rem_count++; ocs_list_remove_head(&hw->io_free); } if (rem_count > 0) { ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count); } } if (ocs_list_valid(&hw->io_port_owned)) { while ((!ocs_list_empty(&hw->io_port_owned))) { ocs_list_remove_head(&hw->io_port_owned); } } ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link); ocs_list_init(&hw->io_free, ocs_hw_io_t, link); ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link); ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link); ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link); ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link); /* If MRQ not required, Make sure we dont request feature. */ if (hw->config.n_rq == 1) { hw->sli.config.features.flag.mrqp = FALSE; } if (sli_init(&hw->sli)) { ocs_log_err(hw->os, "SLI failed to initialize\n"); return OCS_HW_RTN_ERROR; } /* * Enable the auto xfer rdy feature if requested. */ hw->auto_xfer_rdy_enabled = FALSE; if (sli_get_auto_xfer_rdy_capable(&hw->sli) && hw->config.auto_xfer_rdy_size > 0) { if (hw->config.esoc){ if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); } written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize); } else { written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size); } if (written_size) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "config auto xfer rdy failed\n"); return rc; } } hw->auto_xfer_rdy_enabled = TRUE; if (hw->config.auto_xfer_rdy_t10_enable) { rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n"); return rc; } } } if(hw->sliport_healthcheck) { rc = ocs_hw_config_sli_port_health_check(hw, 0, 1); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "Enabling Sliport Health check failed \n"); return rc; } } /* * Set FDT transfer hint, only works on Lancer */ if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) { /* * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature. */ ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT); } /* * Verify that we have not exceeded any queue sizes */ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ), OCS_HW_MAX_NUM_EQ); if (hw->config.n_eq > q_count) { ocs_log_err(hw->os, "requested %d EQ but %d allowed\n", hw->config.n_eq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ), OCS_HW_MAX_NUM_CQ); if (hw->config.n_cq > q_count) { ocs_log_err(hw->os, "requested %d CQ but %d allowed\n", hw->config.n_cq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ), OCS_HW_MAX_NUM_MQ); if (hw->config.n_mq > q_count) { ocs_log_err(hw->os, "requested %d MQ but %d allowed\n", hw->config.n_mq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ), OCS_HW_MAX_NUM_RQ); if (hw->config.n_rq > q_count) { ocs_log_err(hw->os, "requested %d RQ but %d allowed\n", hw->config.n_rq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ), OCS_HW_MAX_NUM_WQ); if (hw->config.n_wq > q_count) { ocs_log_err(hw->os, "requested %d WQ but %d allowed\n", hw->config.n_wq, q_count); return OCS_HW_RTN_ERROR; } /* zero the hashes */ ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n", OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE); ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n", OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE); ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n", OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE); rc = ocs_hw_init_queues(hw, hw->qtop); if (rc != OCS_HW_RTN_SUCCESS) { return rc; } max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi); if (i) { ocs_dma_t payload_memory; rc = OCS_HW_RTN_ERROR; if (hw->rnode_mem.size) { ocs_dma_free(hw->os, &hw->rnode_mem); } if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) { ocs_log_err(hw->os, "remote node memory allocation fail\n"); return OCS_HW_RTN_NO_MEMORY; } payload_memory.size = 0; if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->rnode_mem, UINT16_MAX, &payload_memory)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (payload_memory.size != 0) { /* The command was non-embedded - need to free the dma buffer */ ocs_dma_free(hw->os, &payload_memory); } } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "header template registration failed\n"); return rc; } } /* Allocate and post RQ buffers */ rc = ocs_hw_rx_allocate(hw); if (rc) { ocs_log_err(hw->os, "rx_allocate failed\n"); return rc; } /* Populate hw->seq_free_list */ if (hw->seq_pool == NULL) { uint32_t count = 0; uint32_t i; /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */ for (i = 0; i < hw->hw_rq_count; i++) { count += hw->hw_rq[i]->entry_count; } hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count); if (hw->seq_pool == NULL) { ocs_log_err(hw->os, "malloc seq_pool failed\n"); return OCS_HW_RTN_NO_MEMORY; } } if(ocs_hw_rx_post(hw)) { ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n"); } /* Allocate rpi_ref if not previously allocated */ if (hw->rpi_ref == NULL) { hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref), OCS_M_ZERO | OCS_M_NOWAIT); if (hw->rpi_ref == NULL) { ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i); return OCS_HW_RTN_NO_MEMORY; } } for (i = 0; i < max_rpi; i ++) { ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0); ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0); } ocs_memset(hw->domains, 0, sizeof(hw->domains)); /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { hw->first_domain_idx = -1; } ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); /* Register a FCFI to allow unsolicited frames to be routed to the driver */ if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { if (hw->hw_mrq_count) { ocs_log_debug(hw->os, "using REG_FCFI MRQ\n"); rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n"); return rc; } rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n"); return rc; } } else { sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; ocs_log_debug(hw->os, "using REG_FCFI standard\n"); /* Set the filter match/mask values from hw's filter_def values */ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { rq_cfg[i].rq_id = 0xffff; rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); } /* * Update the rq_id's of the FCF configuration (don't update more than the number * of rq_cfg elements) */ for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) { hw_rq_t *rq = hw->hw_rq[i]; uint32_t j; for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) { uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1; if (mask & (1U << j)) { rq_cfg[j].rq_id = rq->hdr->id; ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n", j, hw->config.filter_def[j], i, rq->hdr->id); } } } rc = OCS_HW_RTN_ERROR; if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "FCFI registration failed\n"); return rc; } hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi; } } /* * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits, * thus the pool allocation size of 64k) */ rc = ocs_hw_reqtag_init(hw); if (rc) { ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc); return rc; } rc = ocs_hw_setup_io(hw); if (rc) { ocs_log_err(hw->os, "IO allocation failure\n"); return rc; } rc = ocs_hw_init_io(hw); if (rc) { ocs_log_err(hw->os, "IO initialization failure\n"); return rc; } ocs_queue_history_init(hw->os, &hw->q_hist); /* get hw link config; polling, so callback will be called immediately */ hw->linkcfg = OCS_HW_LINKCFG_NA; ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw); /* if lancer ethernet, ethernet ports need to be enabled */ if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) { if (ocs_hw_set_eth_license(hw, hw->eth_license)) { /* log warning but continue */ ocs_log_err(hw->os, "Failed to set ethernet license\n"); } } /* Set the DIF seed - only for lancer right now */ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) && ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "Failed to set DIF seed value\n"); return rc; } /* Set the DIF mode - skyhawk only */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) && sli_get_dif_capable(&hw->sli)) { rc = ocs_hw_set_dif_mode(hw); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "Failed to set DIF mode value\n"); return rc; } } /* * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries */ for (i = 0; i < hw->eq_count; i++) { sli_queue_arm(&hw->sli, &hw->eq[i], TRUE); } /* * Initialize RQ hash */ for (i = 0; i < hw->rq_count; i++) { ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); } /* * Initialize WQ hash */ for (i = 0; i < hw->wq_count; i++) { ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); } /* * Arming the CQ allows (e.g.) MQ completions to write CQ entries */ for (i = 0; i < hw->cq_count; i++) { ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); sli_queue_arm(&hw->sli, &hw->cq[i], TRUE); } /* record the fact that the queues are functional */ hw->state = OCS_HW_STATE_ACTIVE; /* Note: Must be after the IOs are setup and the state is active*/ if (ocs_hw_rqpair_init(hw)) { ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n"); } /* finally kick off periodic timer to check for timed out target WQEs */ if (hw->config.emulate_tgt_wqe_timeout) { ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS); } /* * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there * are none of those, allocate one for WQ[0] */ if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) { for (i = 0; i < count; i++) { hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]); wq->send_frame_io = ocs_hw_io_alloc(hw); if (wq->send_frame_io == NULL) { ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); } } } else { hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw); if (hw->hw_wq[0]->send_frame_io == NULL) { ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); } } /* Initialize send frame frame sequence id */ ocs_atomic_init(&hw->send_frame_seq_id, 0); /* Initialize watchdog timer if enabled by user */ hw->expiration_logged = 0; if(hw->watchdog_timeout) { if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) { ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n"); }else if(!ocs_hw_config_watchdog_timer(hw)) { ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout); } } if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) { ocs_log_err(hw->os, "domain node memory allocation fail\n"); return OCS_HW_RTN_NO_MEMORY; } if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) { ocs_log_err(hw->os, "domain fcf memory allocation fail\n"); return OCS_HW_RTN_NO_MEMORY; } if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map, SLI4_MIN_LOOP_MAP_BYTES, 4)) { ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size); } return OCS_HW_RTN_SUCCESS; } /** * @brief Configure Multi-RQ * * @param hw Hardware context allocated by the caller. * @param mode 1 to set MRQ filters and 0 to set FCFI index * @param vlanid valid in mode 0 * @param fcf_index valid in mode 0 * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index) { uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0; hw_rq_t *rq; sli4_cmd_reg_fcfi_mrq_t *rsp = NULL; uint32_t i, j; sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; int32_t rc; if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { goto issue_cmd; } /* Set the filter match/mask values from hw's filter_def values */ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { rq_filter[i].rq_id = 0xffff; rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); } /* Accumulate counts for each filter type used, build rq_ids[] list */ for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) { if (rq->filter_mask & (1U << j)) { if (rq_filter[j].rq_id != 0xffff) { /* Already used. Bailout ifts not RQset case */ if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) { ocs_log_err(hw->os, "Wrong queue topology.\n"); return OCS_HW_RTN_ERROR; } continue; } if (rq->is_mrq) { rq_filter[j].rq_id = rq->base_mrq_id; mrq_bitmask |= (1U << j); } else { rq_filter[j].rq_id = rq->hdr->id; } } } } issue_cmd: /* Invoke REG_FCFI_MRQ */ rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, /* buf */ SLI4_BMBX_SIZE, /* size */ mode, /* mode 1 */ fcf_index, /* fcf_index */ vlanid, /* vlan_id */ hw->config.rq_selection_policy, /* RQ selection policy*/ mrq_bitmask, /* MRQ bitmask */ hw->hw_mrq_count, /* num_mrqs */ rq_filter); /* RQ filter */ if (rc == 0) { ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc); return OCS_HW_RTN_ERROR; } rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf; if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) { ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n", rsp->hdr.command, rsp->hdr.status); return OCS_HW_RTN_ERROR; } if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { hw->fcf_indicator = rsp->fcfi; } return 0; } /** * @brief Callback function for getting linkcfg during HW initialization. * * @param status Status of the linkcfg get operation. * @param value Link configuration enum to which the link configuration is set. * @param arg Callback argument (ocs_hw_t *). * * @return None. */ static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg) { ocs_hw_t *hw = (ocs_hw_t *)arg; if (status == 0) { hw->linkcfg = (ocs_hw_linkcfg_e)value; } else { hw->linkcfg = OCS_HW_LINKCFG_NA; } ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg); } /** * @ingroup devInitShutdown * @brief Tear down the Hardware Abstraction Layer module. * * @par Description * Frees memory structures needed by the device, and shuts down the device. Does * not free the HW context memory (which is done by the caller). * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_teardown(ocs_hw_t *hw) { uint32_t i = 0; uint32_t iters = 10;/*XXX*/ uint32_t max_rpi; uint32_t destroy_queues; uint32_t free_memory; if (!hw) { ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw); return OCS_HW_RTN_ERROR; } destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE); free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED); /* shutdown target wqe timer */ shutdown_target_wqe_timer(hw); /* Cancel watchdog timer if enabled */ if(hw->watchdog_timeout) { hw->watchdog_timeout = 0; ocs_hw_config_watchdog_timer(hw); } /* Cancel Sliport Healthcheck */ if(hw->sliport_healthcheck) { hw->sliport_healthcheck = 0; ocs_hw_config_sli_port_health_check(hw, 0, 0); } if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) { hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; ocs_hw_flush(hw); /* If there are outstanding commands, wait for them to complete */ while (!ocs_list_empty(&hw->cmd_head) && iters) { ocs_udelay(10000); ocs_hw_flush(hw); iters--; } if (ocs_list_empty(&hw->cmd_head)) { ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); } else { ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); } /* Cancel any remaining commands */ ocs_hw_command_cancel(hw); } else { hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; } ocs_lock_free(&hw->cmd_lock); /* Free unregistered RPI if workaround is in force */ if (hw->workaround.use_unregistered_rpi) { sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid); } max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); if (hw->rpi_ref) { for (i = 0; i < max_rpi; i++) { if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) { ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n", i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count)); } } ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref)); hw->rpi_ref = NULL; } ocs_dma_free(hw->os, &hw->rnode_mem); if (hw->io) { for (i = 0; i < hw->config.n_io; i++) { if (hw->io[i] && (hw->io[i]->sgl != NULL) && (hw->io[i]->sgl->virt != NULL)) { if(hw->io[i]->is_port_owned) { ocs_lock_free(&hw->io[i]->axr_lock); } ocs_dma_free(hw->os, hw->io[i]->sgl); } ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); hw->io[i] = NULL; } ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size); hw->wqe_buffs = NULL; ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *)); hw->io = NULL; } ocs_dma_free(hw->os, &hw->xfer_rdy); ocs_dma_free(hw->os, &hw->dump_sges); ocs_dma_free(hw->os, &hw->loop_map); ocs_lock_free(&hw->io_lock); ocs_lock_free(&hw->io_abort_lock); for (i = 0; i < hw->wq_count; i++) { sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory); } for (i = 0; i < hw->rq_count; i++) { sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory); } for (i = 0; i < hw->mq_count; i++) { sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory); } for (i = 0; i < hw->cq_count; i++) { sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory); } for (i = 0; i < hw->eq_count; i++) { sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory); } ocs_hw_qtop_free(hw->qtop); /* Free rq buffers */ ocs_hw_rx_free(hw); hw_queue_teardown(hw); ocs_hw_rqpair_teardown(hw); if (sli_teardown(&hw->sli)) { ocs_log_err(hw->os, "SLI teardown failed\n"); } ocs_queue_history_free(&hw->q_hist); /* record the fact that the queues are non-functional */ hw->state = OCS_HW_STATE_UNINITIALIZED; /* free sequence free pool */ ocs_array_free(hw->seq_pool); hw->seq_pool = NULL; /* free hw_wq_callback pool */ ocs_pool_free(hw->wq_reqtag_pool); ocs_dma_free(hw->os, &hw->domain_dmem); ocs_dma_free(hw->os, &hw->fcf_dmem); /* Mark HW setup as not having been called */ hw->hw_setup_called = FALSE; return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset) { uint32_t i; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t iters; ocs_hw_state_e prev_state = hw->state; if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_test(hw->os, "HW state %d is not active\n", hw->state); } hw->state = OCS_HW_STATE_RESET_IN_PROGRESS; /* shutdown target wqe timer */ shutdown_target_wqe_timer(hw); ocs_hw_flush(hw); /* * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM), * then the FW will UE when the reset is issued. So attempt to complete * all mailbox commands. */ iters = 10; while (!ocs_list_empty(&hw->cmd_head) && iters) { ocs_udelay(10000); ocs_hw_flush(hw); iters--; } if (ocs_list_empty(&hw->cmd_head)) { ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); } else { ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); } /* Reset the chip */ switch(reset) { case OCS_HW_RESET_FUNCTION: ocs_log_debug(hw->os, "issuing function level reset\n"); if (sli_reset(&hw->sli)) { ocs_log_err(hw->os, "sli_reset failed\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_RESET_FIRMWARE: ocs_log_debug(hw->os, "issuing firmware reset\n"); if (sli_fw_reset(&hw->sli)) { ocs_log_err(hw->os, "sli_soft_reset failed\n"); rc = OCS_HW_RTN_ERROR; } /* * Because the FW reset leaves the FW in a non-running state, * follow that with a regular reset. */ ocs_log_debug(hw->os, "issuing function level reset\n"); if (sli_reset(&hw->sli)) { ocs_log_err(hw->os, "sli_reset failed\n"); rc = OCS_HW_RTN_ERROR; } break; default: ocs_log_test(hw->os, "unknown reset type - no reset performed\n"); hw->state = prev_state; return OCS_HW_RTN_ERROR; } /* Not safe to walk command/io lists unless they've been initialized */ if (prev_state != OCS_HW_STATE_UNINITIALIZED) { ocs_hw_command_cancel(hw); /* Clean up the inuse list, the free list and the wait free list */ ocs_hw_io_cancel(hw); ocs_memset(hw->domains, 0, sizeof(hw->domains)); ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); ocs_hw_link_event_init(hw); ocs_lock(&hw->io_lock); /* The io lists should be empty, but remove any that didn't get cleaned up. */ while (!ocs_list_empty(&hw->io_timed_wqe)) { ocs_list_remove_head(&hw->io_timed_wqe); } /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */ while (!ocs_list_empty(&hw->io_free)) { ocs_list_remove_head(&hw->io_free); } while (!ocs_list_empty(&hw->io_wait_free)) { ocs_list_remove_head(&hw->io_wait_free); } /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */ ocs_hw_reqtag_reset(hw); ocs_unlock(&hw->io_lock); } if (prev_state != OCS_HW_STATE_UNINITIALIZED) { for (i = 0; i < hw->wq_count; i++) { sli_queue_reset(&hw->sli, &hw->wq[i]); } for (i = 0; i < hw->rq_count; i++) { sli_queue_reset(&hw->sli, &hw->rq[i]); } for (i = 0; i < hw->hw_rq_count; i++) { hw_rq_t *rq = hw->hw_rq[i]; if (rq->rq_tracker != NULL) { uint32_t j; for (j = 0; j < rq->entry_count; j++) { rq->rq_tracker[j] = NULL; } } } for (i = 0; i < hw->mq_count; i++) { sli_queue_reset(&hw->sli, &hw->mq[i]); } for (i = 0; i < hw->cq_count; i++) { sli_queue_reset(&hw->sli, &hw->cq[i]); } for (i = 0; i < hw->eq_count; i++) { sli_queue_reset(&hw->sli, &hw->eq[i]); } /* Free rq buffers */ ocs_hw_rx_free(hw); /* Teardown the HW queue topology */ hw_queue_teardown(hw); } else { /* Free rq buffers */ ocs_hw_rx_free(hw); } /* * Re-apply the run-time workarounds after clearing the SLI config * fields in sli_reset. */ ocs_hw_workaround_setup(hw); hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; return rc; } int32_t ocs_hw_get_num_eq(ocs_hw_t *hw) { return hw->eq_count; } static int32_t ocs_hw_get_fw_timed_out(ocs_hw_t *hw) { /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf * No further explanation is given in the document. * */ return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 && sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10); } ocs_hw_rtn_e ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; int32_t tmp; if (!value) { return OCS_HW_RTN_ERROR; } *value = 0; switch (prop) { case OCS_HW_N_IO: *value = hw->config.n_io; break; case OCS_HW_N_SGL: *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED); break; case OCS_HW_MAX_IO: *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); break; case OCS_HW_MAX_NODES: *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); break; case OCS_HW_MAX_RQ_ENTRIES: *value = hw->num_qentries[SLI_QTYPE_RQ]; break; case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: *value = hw->config.rq_default_buffer_size; break; case OCS_HW_AUTO_XFER_RDY_CAPABLE: *value = sli_get_auto_xfer_rdy_capable(&hw->sli); break; case OCS_HW_AUTO_XFER_RDY_XRI_CNT: *value = hw->config.auto_xfer_rdy_xri_cnt; break; case OCS_HW_AUTO_XFER_RDY_SIZE: *value = hw->config.auto_xfer_rdy_size; break; case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: switch (hw->config.auto_xfer_rdy_blk_size_chip) { case 0: *value = 512; break; case 1: *value = 1024; break; case 2: *value = 2048; break; case 3: *value = 4096; break; case 4: *value = 520; break; default: *value = 0; rc = OCS_HW_RTN_ERROR; break; } break; case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: *value = hw->config.auto_xfer_rdy_t10_enable; break; case OCS_HW_AUTO_XFER_RDY_P_TYPE: *value = hw->config.auto_xfer_rdy_p_type; break; case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: *value = hw->config.auto_xfer_rdy_ref_tag_is_lba; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: *value = hw->config.auto_xfer_rdy_app_tag_valid; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: *value = hw->config.auto_xfer_rdy_app_tag_value; break; case OCS_HW_MAX_SGE: *value = sli_get_max_sge(&hw->sli); break; case OCS_HW_MAX_SGL: *value = sli_get_max_sgl(&hw->sli); break; case OCS_HW_TOPOLOGY: /* * Infer link.status based on link.speed. * Report OCS_HW_TOPOLOGY_NONE if the link is down. */ if (hw->link.speed == 0) { *value = OCS_HW_TOPOLOGY_NONE; break; } switch (hw->link.topology) { case SLI_LINK_TOPO_NPORT: *value = OCS_HW_TOPOLOGY_NPORT; break; case SLI_LINK_TOPO_LOOP: *value = OCS_HW_TOPOLOGY_LOOP; break; case SLI_LINK_TOPO_NONE: *value = OCS_HW_TOPOLOGY_NONE; break; default: ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology); rc = OCS_HW_RTN_ERROR; break; } break; case OCS_HW_CONFIG_TOPOLOGY: *value = hw->config.topology; break; case OCS_HW_LINK_SPEED: *value = hw->link.speed; break; case OCS_HW_LINK_CONFIG_SPEED: switch (hw->config.speed) { case FC_LINK_SPEED_10G: *value = 10000; break; case FC_LINK_SPEED_AUTO_16_8_4: *value = 0; break; case FC_LINK_SPEED_2G: *value = 2000; break; case FC_LINK_SPEED_4G: *value = 4000; break; case FC_LINK_SPEED_8G: *value = 8000; break; case FC_LINK_SPEED_16G: *value = 16000; break; case FC_LINK_SPEED_32G: *value = 32000; break; default: ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed); rc = OCS_HW_RTN_ERROR; break; } break; case OCS_HW_IF_TYPE: *value = sli_get_if_type(&hw->sli); break; case OCS_HW_SLI_REV: *value = sli_get_sli_rev(&hw->sli); break; case OCS_HW_SLI_FAMILY: *value = sli_get_sli_family(&hw->sli); break; case OCS_HW_DIF_CAPABLE: *value = sli_get_dif_capable(&hw->sli); break; case OCS_HW_DIF_SEED: *value = hw->config.dif_seed; break; case OCS_HW_DIF_MODE: *value = hw->config.dif_mode; break; case OCS_HW_DIF_MULTI_SEPARATE: /* Lancer supports multiple DIF separates */ if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { *value = TRUE; } else { *value = FALSE; } break; case OCS_HW_DUMP_MAX_SIZE: *value = hw->dump_size; break; case OCS_HW_DUMP_READY: *value = sli_dump_is_ready(&hw->sli); break; case OCS_HW_DUMP_PRESENT: *value = sli_dump_is_present(&hw->sli); break; case OCS_HW_RESET_REQUIRED: tmp = sli_reset_required(&hw->sli); if(tmp < 0) { rc = OCS_HW_RTN_ERROR; } else { *value = tmp; } break; case OCS_HW_FW_ERROR: *value = sli_fw_error_status(&hw->sli); break; case OCS_HW_FW_READY: *value = sli_fw_ready(&hw->sli); break; case OCS_HW_FW_TIMED_OUT: *value = ocs_hw_get_fw_timed_out(hw); break; case OCS_HW_HIGH_LOGIN_MODE: *value = sli_get_hlm_capable(&hw->sli); break; case OCS_HW_PREREGISTER_SGL: *value = sli_get_sgl_preregister_required(&hw->sli); break; case OCS_HW_HW_REV1: *value = sli_get_hw_revision(&hw->sli, 0); break; case OCS_HW_HW_REV2: *value = sli_get_hw_revision(&hw->sli, 1); break; case OCS_HW_HW_REV3: *value = sli_get_hw_revision(&hw->sli, 2); break; case OCS_HW_LINKCFG: *value = hw->linkcfg; break; case OCS_HW_ETH_LICENSE: *value = hw->eth_license; break; case OCS_HW_LINK_MODULE_TYPE: *value = sli_get_link_module_type(&hw->sli); break; case OCS_HW_NUM_CHUTES: *value = ocs_hw_get_num_chutes(hw); break; case OCS_HW_DISABLE_AR_TGT_DIF: *value = hw->workaround.disable_ar_tgt_dif; break; case OCS_HW_EMULATE_I_ONLY_AAB: *value = hw->config.i_only_aab; break; case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: *value = hw->config.emulate_tgt_wqe_timeout; break; case OCS_HW_VPD_LEN: *value = sli_get_vpd_len(&hw->sli); break; case OCS_HW_SGL_CHAINING_CAPABLE: *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported; break; case OCS_HW_SGL_CHAINING_ALLOWED: /* * SGL Chaining is allowed in the following cases: * 1. Lancer with host SGL Lists * 2. Skyhawk with pre-registered SGL Lists */ *value = FALSE; if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && !sli_get_sgl_preregister(&hw->sli) && SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { *value = TRUE; } if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && sli_get_sgl_preregister(&hw->sli) && ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { *value = TRUE; } break; case OCS_HW_SGL_CHAINING_HOST_ALLOCATED: /* Only lancer supports host allocated SGL Chaining buffers. */ *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli))); break; case OCS_HW_SEND_FRAME_CAPABLE: if (hw->workaround.ignore_send_frame) { *value = 0; } else { /* Only lancer is capable */ *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH; } break; case OCS_HW_RQ_SELECTION_POLICY: *value = hw->config.rq_selection_policy; break; case OCS_HW_RR_QUANTA: *value = hw->config.rr_quanta; break; case OCS_HW_MAX_VPORTS: *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI); + break; default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); rc = OCS_HW_RTN_ERROR; } return rc; } void * ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop) { void *rc = NULL; switch (prop) { case OCS_HW_WWN_NODE: rc = sli_get_wwn_node(&hw->sli); break; case OCS_HW_WWN_PORT: rc = sli_get_wwn_port(&hw->sli); break; case OCS_HW_VPD: /* make sure VPD length is non-zero */ if (sli_get_vpd_len(&hw->sli)) { rc = sli_get_vpd(&hw->sli); } break; case OCS_HW_FW_REV: rc = sli_get_fw_name(&hw->sli, 0); break; case OCS_HW_FW_REV2: rc = sli_get_fw_name(&hw->sli, 1); break; case OCS_HW_IPL: rc = sli_get_ipl_name(&hw->sli); break; case OCS_HW_PORTNUM: rc = sli_get_portnum(&hw->sli); break; case OCS_HW_BIOS_VERSION_STRING: rc = sli_get_bios_version_string(&hw->sli); break; default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); } return rc; } ocs_hw_rtn_e ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; switch (prop) { case OCS_HW_N_IO: if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) || value == 0) { ocs_log_test(hw->os, "IO value out of range %d vs %d\n", value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI)); rc = OCS_HW_RTN_ERROR; } else { hw->config.n_io = value; } break; case OCS_HW_N_SGL: value += SLI4_SGE_MAX_RESERVED; if (value > sli_get_max_sgl(&hw->sli)) { ocs_log_test(hw->os, "SGL value out of range %d vs %d\n", value, sli_get_max_sgl(&hw->sli)); rc = OCS_HW_RTN_ERROR; } else { hw->config.n_sgl = value; } break; case OCS_HW_TOPOLOGY: if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) && (value != OCS_HW_TOPOLOGY_AUTO)) { ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n", value, sli_get_medium(&hw->sli)); rc = OCS_HW_RTN_ERROR; break; } switch (value) { case OCS_HW_TOPOLOGY_AUTO: if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC); } else { sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE); } break; case OCS_HW_TOPOLOGY_NPORT: sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA); break; case OCS_HW_TOPOLOGY_LOOP: sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL); break; default: ocs_log_test(hw->os, "unsupported topology %#x\n", value); rc = OCS_HW_RTN_ERROR; } hw->config.topology = value; break; case OCS_HW_LINK_SPEED: if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) { switch (value) { case 0: /* Auto-speed negotiation */ case 10000: /* FCoE speed */ hw->config.speed = FC_LINK_SPEED_10G; break; default: ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n", value, sli_get_medium(&hw->sli)); rc = OCS_HW_RTN_ERROR; } break; } switch (value) { case 0: /* Auto-speed negotiation */ hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; break; case 2000: /* FC speeds */ hw->config.speed = FC_LINK_SPEED_2G; break; case 4000: hw->config.speed = FC_LINK_SPEED_4G; break; case 8000: hw->config.speed = FC_LINK_SPEED_8G; break; case 16000: hw->config.speed = FC_LINK_SPEED_16G; break; case 32000: hw->config.speed = FC_LINK_SPEED_32G; break; default: ocs_log_test(hw->os, "unsupported speed %d\n", value); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_DIF_SEED: /* Set the DIF seed - only for lancer right now */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "DIF seed not supported for this device\n"); rc = OCS_HW_RTN_ERROR; } else { hw->config.dif_seed = value; } break; case OCS_HW_DIF_MODE: switch (value) { case OCS_HW_DIF_MODE_INLINE: /* * Make sure we support inline DIF. * * Note: Having both bits clear means that we have old * FW that doesn't set the bits. */ if (sli_is_dif_inline_capable(&hw->sli)) { hw->config.dif_mode = value; } else { ocs_log_test(hw->os, "chip does not support DIF inline\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_DIF_MODE_SEPARATE: /* Make sure we support DIF separates. */ if (sli_is_dif_separate_capable(&hw->sli)) { hw->config.dif_mode = value; } else { ocs_log_test(hw->os, "chip does not support DIF separate\n"); rc = OCS_HW_RTN_ERROR; } } break; case OCS_HW_RQ_PROCESS_LIMIT: { hw_rq_t *rq; uint32_t i; /* For each hw_rq object, set its parent CQ limit value */ for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; hw->cq[rq->cq->instance].proc_limit = value; } break; } case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: hw->config.rq_default_buffer_size = value; break; case OCS_HW_AUTO_XFER_RDY_XRI_CNT: hw->config.auto_xfer_rdy_xri_cnt = value; break; case OCS_HW_AUTO_XFER_RDY_SIZE: hw->config.auto_xfer_rdy_size = value; break; case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: switch (value) { case 512: hw->config.auto_xfer_rdy_blk_size_chip = 0; break; case 1024: hw->config.auto_xfer_rdy_blk_size_chip = 1; break; case 2048: hw->config.auto_xfer_rdy_blk_size_chip = 2; break; case 4096: hw->config.auto_xfer_rdy_blk_size_chip = 3; break; case 520: hw->config.auto_xfer_rdy_blk_size_chip = 4; break; default: ocs_log_err(hw->os, "Invalid block size %d\n", value); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: hw->config.auto_xfer_rdy_t10_enable = value; break; case OCS_HW_AUTO_XFER_RDY_P_TYPE: hw->config.auto_xfer_rdy_p_type = value; break; case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: hw->config.auto_xfer_rdy_ref_tag_is_lba = value; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: hw->config.auto_xfer_rdy_app_tag_valid = value; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: hw->config.auto_xfer_rdy_app_tag_value = value; break; case OCS_ESOC: hw->config.esoc = value; + break; case OCS_HW_HIGH_LOGIN_MODE: rc = sli_set_hlm(&hw->sli, value); break; case OCS_HW_PREREGISTER_SGL: rc = sli_set_sgl_preregister(&hw->sli, value); break; case OCS_HW_ETH_LICENSE: hw->eth_license = value; break; case OCS_HW_EMULATE_I_ONLY_AAB: hw->config.i_only_aab = value; break; case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: hw->config.emulate_tgt_wqe_timeout = value; break; case OCS_HW_BOUNCE: hw->config.bounce = value; break; case OCS_HW_RQ_SELECTION_POLICY: hw->config.rq_selection_policy = value; break; case OCS_HW_RR_QUANTA: hw->config.rr_quanta = value; break; default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); rc = OCS_HW_RTN_ERROR; } return rc; } ocs_hw_rtn_e ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; switch (prop) { case OCS_HW_WAR_VERSION: hw->hw_war_version = value; break; case OCS_HW_FILTER_DEF: { char *p = value; uint32_t idx = 0; for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) { hw->config.filter_def[idx] = 0; } for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) { hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0); p = ocs_strchr(p, ','); if (p != NULL) { p++; } } break; } default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); rc = OCS_HW_RTN_ERROR; break; } return rc; } /** * @ingroup interrupt * @brief Check for the events associated with the interrupt vector. * * @param hw Hardware context. * @param vector Zero-based interrupt vector number. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector) { int32_t rc = 0; if (!hw) { ocs_log_err(NULL, "HW context NULL?!?\n"); return -1; } if (vector > hw->eq_count) { ocs_log_err(hw->os, "vector %d. max %d\n", vector, hw->eq_count); return -1; } /* * The caller should disable interrupts if they wish to prevent us * from processing during a shutdown. The following states are defined: * OCS_HW_STATE_UNINITIALIZED - No queues allocated * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, * queues are cleared. * OCS_HW_STATE_ACTIVE - Chip and queues are operational * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox * completions. */ if (hw->state != OCS_HW_STATE_UNINITIALIZED) { rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]); /* Re-arm queue if there are no entries */ if (rc != 0) { sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE); } } return rc; } void ocs_hw_unsol_process_bounce(void *arg) { ocs_hw_sequence_t *seq = arg; ocs_hw_t *hw = seq->hw; ocs_hw_assert(hw != NULL); ocs_hw_assert(hw->callback.unsolicited != NULL); hw->callback.unsolicited(hw->args.unsolicited, seq); } int32_t ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec) { hw_eq_t *eq; int32_t rc = 0; CPUTRACE(""); /* * The caller should disable interrupts if they wish to prevent us * from processing during a shutdown. The following states are defined: * OCS_HW_STATE_UNINITIALIZED - No queues allocated * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, * queues are cleared. * OCS_HW_STATE_ACTIVE - Chip and queues are operational * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox * completions. */ if (hw->state == OCS_HW_STATE_UNINITIALIZED) { return 0; } /* Get pointer to hw_eq_t */ eq = hw->hw_eq[vector]; OCS_STAT(eq->use_count++); rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec); return rc; } /** * @ingroup interrupt * @brief Process events associated with an EQ. * * @par Description * Loop termination: * @n @n Without a mechanism to terminate the completion processing loop, it * is possible under some workload conditions for the loop to never terminate * (or at least take longer than the OS is happy to have an interrupt handler * or kernel thread context hold a CPU without yielding). * @n @n The approach taken here is to periodically check how much time * we have been in this * processing loop, and if we exceed a predetermined time (multiple seconds), the * loop is terminated, and ocs_hw_process() returns. * * @param hw Hardware context. * @param eq Pointer to HW EQ object. * @param max_isr_time_msec Maximum time in msec to stay in this function. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec) { uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 }; uint32_t done = FALSE; uint32_t tcheck_count; time_t tstart; time_t telapsed; tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; tstart = ocs_msectime(); CPUTRACE(""); while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) { uint16_t cq_id = 0; int32_t rc; rc = sli_eq_parse(&hw->sli, eqe, &cq_id); if (unlikely(rc)) { if (rc > 0) { uint32_t i; /* * Received a sentinel EQE indicating the EQ is full. * Process all CQs */ for (i = 0; i < hw->cq_count; i++) { ocs_hw_cq_process(hw, hw->hw_cq[i]); } continue; } else { return rc; } } else { int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id); if (likely(index >= 0)) { ocs_hw_cq_process(hw, hw->hw_cq[index]); } else { ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); } } if (eq->queue->n_posted > (eq->queue->posted_limit)) { sli_queue_arm(&hw->sli, eq->queue, FALSE); } if (tcheck_count && (--tcheck_count == 0)) { tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; telapsed = ocs_msectime() - tstart; if (telapsed >= max_isr_time_msec) { done = TRUE; } } } sli_queue_eq_arm(&hw->sli, eq->queue, TRUE); return 0; } /** * @brief Submit queued (pending) mbx commands. * * @par Description * Submit queued mailbox commands. * --- Assumes that hw->cmd_lock is held --- * * @param hw Hardware context. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_hw_cmd_submit_pending(ocs_hw_t *hw) { ocs_command_ctx_t *ctx; int32_t rc = 0; /* Assumes lock held */ /* Only submit MQE if there's room */ while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) { ctx = ocs_list_remove_head(&hw->cmd_pending); if (ctx == NULL) { break; } ocs_list_add_tail(&hw->cmd_head, ctx); hw->cmd_head_count++; if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) { ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc); rc = -1; break; } } return rc; } /** * @ingroup io * @brief Issue a SLI command. * * @par Description * Send a mailbox command to the hardware, and either wait for a completion * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT). * * @param hw Hardware context. * @param cmd Buffer containing a formatted command and results. * @param opts Command options: * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion. * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback. * @param cb Function callback used for asynchronous mode. May be NULL. * @n Prototype is (*cb)(void *arg, uint8_t *cmd). * @n @n @b Note: If the * callback function pointer is NULL, the results of the command are silently * discarded, allowing this pointer to exist solely on the stack. * @param arg Argument passed to an asynchronous callback. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* * If the chip is in an error state (UE'd) then reject this mailbox * command. */ if (sli_fw_error_status(&hw->sli) > 0) { uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1); uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2); if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) { hw->expiration_logged = 1; ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n", hw->watchdog_timeout); } ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS), err1, err2); return OCS_HW_RTN_ERROR; } if (OCS_CMD_POLL == opts) { ocs_lock(&hw->cmd_lock); if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) { /* * Can't issue Boot-strap mailbox command with other * mail-queue commands pending as this interaction is * undefined */ rc = OCS_HW_RTN_ERROR; } else { void *bmbx = hw->sli.bmbx.virt; ocs_memset(bmbx, 0, SLI4_BMBX_SIZE); ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE); if (sli_bmbx_command(&hw->sli) == 0) { rc = OCS_HW_RTN_SUCCESS; ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE); } } ocs_unlock(&hw->cmd_lock); } else if (OCS_CMD_NOWAIT == opts) { ocs_command_ctx_t *ctx = NULL; ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT); if (!ctx) { ocs_log_err(hw->os, "can't allocate command context\n"); return OCS_HW_RTN_NO_RESOURCES; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state); ocs_free(hw->os, ctx, sizeof(*ctx)); return OCS_HW_RTN_ERROR; } if (cb) { ctx->cb = cb; ctx->arg = arg; } ctx->buf = cmd; ctx->ctx = hw; ocs_lock(&hw->cmd_lock); /* Add to pending list */ ocs_list_add_tail(&hw->cmd_pending, ctx); /* Submit as much of the pending list as we can */ if (ocs_hw_cmd_submit_pending(hw) == 0) { rc = OCS_HW_RTN_SUCCESS; } ocs_unlock(&hw->cmd_lock); } return rc; } /** * @ingroup devInitShutdown * @brief Register a callback for the given event. * * @param hw Hardware context. * @param which Event of interest. * @param func Function to call when the event occurs. * @param arg Argument passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg) { if (!hw || !func || (which >= OCS_HW_CB_MAX)) { ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n", hw, which, func); return OCS_HW_RTN_ERROR; } switch (which) { case OCS_HW_CB_DOMAIN: hw->callback.domain = func; hw->args.domain = arg; break; case OCS_HW_CB_PORT: hw->callback.port = func; hw->args.port = arg; break; case OCS_HW_CB_UNSOLICITED: hw->callback.unsolicited = func; hw->args.unsolicited = arg; break; case OCS_HW_CB_REMOTE_NODE: hw->callback.rnode = func; hw->args.rnode = arg; break; case OCS_HW_CB_BOUNCE: hw->callback.bounce = func; hw->args.bounce = arg; break; default: ocs_log_test(hw->os, "unknown callback %#x\n", which); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } /** * @ingroup port * @brief Allocate a port object. * * @par Description * This function allocates a VPI object for the port and stores it in the * indicator field of the port object. * * @param hw Hardware context. * @param sport SLI port object used to connect to the domain. * @param domain Domain object associated with this port (may be NULL). * @param wwpn Port's WWPN in big-endian order, or NULL to use default. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain, uint8_t *wwpn) { uint8_t *cmd = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t index; sport->indicator = UINT32_MAX; sport->hw = hw; sport->ctx.app = sport; sport->sm_free_req_pending = 0; /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } if (wwpn) { ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn)); } if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) { ocs_log_err(hw->os, "FCOE_VPI allocation failure\n"); return OCS_HW_RTN_ERROR; } if (domain != NULL) { ocs_sm_function_t next = NULL; cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!cmd) { ocs_log_err(hw->os, "command memory allocation failed\n"); rc = OCS_HW_RTN_NO_MEMORY; goto ocs_hw_port_alloc_out; } /* If the WWPN is NULL, fetch the default WWPN and WWNN before * initializing the VPI */ if (!wwpn) { next = __ocs_hw_port_alloc_read_sparm64; } else { next = __ocs_hw_port_alloc_init_vpi; } ocs_sm_transition(&sport->ctx, next, cmd); } else if (!wwpn) { /* This is the convention for the HW, not SLI */ ocs_log_test(hw->os, "need WWN for physical port\n"); rc = OCS_HW_RTN_ERROR; } else { /* domain NULL and wwpn non-NULL */ ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL); } ocs_hw_port_alloc_out: if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); } return rc; } /** * @ingroup port * @brief Attach a physical/virtual SLI port to a domain. * * @par Description * This function registers a previously-allocated VPI with the * device. * * @param hw Hardware context. * @param sport Pointer to the SLI port object. * @param fc_id Fibre Channel ID to associate with this port. * * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure. */ ocs_hw_rtn_e ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !sport) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p sport=%p\n", hw, sport); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } sport->fc_id = fc_id; ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf); return rc; } /** * @brief Called when the port control command completes. * * @par Description * We only need to free the mailbox command buffer. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @ingroup port * @brief Control a port (initialize, shutdown, or set link configuration). * * @par Description * This function controls a port depending on the @c ctrl parameter: * - @b OCS_HW_PORT_INIT - * Issues the CONFIG_LINK and INIT_LINK commands for the specified port. * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up. * . * - @b OCS_HW_PORT_SHUTDOWN - * Issues the DOWN_LINK command for the specified port. * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down. * . * - @b OCS_HW_PORT_SET_LINK_CONFIG - * Sets the link configuration. * * @param hw Hardware context. * @param ctrl Specifies the operation: * - OCS_HW_PORT_INIT * - OCS_HW_PORT_SHUTDOWN * - OCS_HW_PORT_SET_LINK_CONFIG * * @param value Operation-specific value. * - OCS_HW_PORT_INIT - Selective reset AL_PA * - OCS_HW_PORT_SHUTDOWN - N/A * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value. * * @param cb Callback function to invoke the following operation. * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events * are handled by the OCS_HW_CB_DOMAIN callbacks). * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command * completes. * * @param arg Callback argument invoked after the command completes. * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events * are handled by the OCS_HW_CB_DOMAIN callbacks). * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command * completes. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; switch (ctrl) { case OCS_HW_PORT_INIT: { uint8_t *init_link; uint32_t speed = 0; uint8_t reset_alpa = 0; if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) { uint8_t *cfg_link; cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (cfg_link == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT, ocs_hw_cb_port_control, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "CONFIG_LINK failed\n"); break; } speed = hw->config.speed; reset_alpa = (uint8_t)(value & 0xff); } else { speed = FC_LINK_SPEED_10G; } /* * Bring link up, unless FW version is not supported */ if (hw->workaround.fw_version_too_low) { if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) { ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0)); } else { ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0)); } return OCS_HW_RTN_ERROR; } rc = OCS_HW_RTN_ERROR; /* Allocate a new buffer for the init_link command */ init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (init_link == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) { rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT, ocs_hw_cb_port_control, NULL); } /* Free buffer on error, since no callback is coming */ if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, init_link, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "INIT_LINK failed\n"); } break; } case OCS_HW_PORT_SHUTDOWN: { uint8_t *down_link; down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (down_link == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT, ocs_hw_cb_port_control, NULL); } /* Free buffer on error, since no callback is coming */ if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, down_link, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "DOWN_LINK failed\n"); } break; } case OCS_HW_PORT_SET_LINK_CONFIG: rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg); break; default: ocs_log_test(hw->os, "unhandled control %#x\n", ctrl); break; } return rc; } /** * @ingroup port * @brief Free port resources. * * @par Description * Issue the UNREG_VPI command to free the assigned VPI context. * * @param hw Hardware context. * @param sport SLI port object used to connect to the domain. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !sport) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p sport=%p\n", hw, sport); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL); return rc; } /** * @ingroup domain * @brief Allocate a fabric domain object. * * @par Description * This function starts a series of commands needed to connect to the domain, including * - REG_FCFI * - INIT_VFI * - READ_SPARMS * . * @b Note: Not all SLI interface types use all of the above commands. * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event. * * @param hw Hardware context. * @param domain Pointer to the domain object. * @param fcf FCF index. * @param vlan VLAN ID. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan) { uint8_t *cmd = NULL; uint32_t index; if (!hw || !domain || !domain->sport) { ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n", hw, domain, domain ? domain->sport : NULL); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!cmd) { ocs_log_err(hw->os, "command memory allocation failed\n"); return OCS_HW_RTN_NO_MEMORY; } domain->dma = hw->domain_dmem; domain->hw = hw; domain->sm.app = domain; domain->fcf = fcf; domain->fcf_indicator = UINT32_MAX; domain->vlan_id = vlan; domain->indicator = UINT32_MAX; if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) { ocs_log_err(hw->os, "FCOE_VFI allocation failure\n"); ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); return OCS_HW_RTN_ERROR; } ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd); return OCS_HW_RTN_SUCCESS; } /** * @ingroup domain * @brief Attach a SLI port to a domain. * * @param hw Hardware context. * @param domain Pointer to the domain object. * @param fc_id Fibre Channel ID to associate with this port. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !domain) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } domain->sport->fc_id = fc_id; ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf); return rc; } /** * @ingroup domain * @brief Free a fabric domain object. * * @par Description * Free both the driver and SLI port resources associated with the domain. * * @param hw Hardware context. * @param domain Pointer to the domain object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !domain) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL); return rc; } /** * @ingroup domain * @brief Free a fabric domain object. * * @par Description * Free the driver resources associated with the domain. The difference between * this call and ocs_hw_domain_free() is that this call assumes resources no longer * exist on the SLI port, due to a reset or after some error conditions. * * @param hw Hardware context. * @param domain Pointer to the domain object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain) { if (!hw || !domain) { ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); return OCS_HW_RTN_SUCCESS; } /** * @ingroup node * @brief Allocate a remote node object. * * @param hw Hardware context. * @param rnode Allocated remote node object to initialize. * @param fc_addr FC address of the remote node. * @param sport SLI port used to connect to remote node. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr, ocs_sli_port_t *sport) { /* Check for invalid indicator */ if (UINT32_MAX != rnode->indicator) { ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n", fc_addr, rnode->indicator); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } /* NULL SLI port indicates an unallocated remote node */ rnode->sport = NULL; if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) { ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", fc_addr); return OCS_HW_RTN_ERROR; } rnode->fc_id = fc_addr; rnode->sport = sport; return OCS_HW_RTN_SUCCESS; } /** * @ingroup node * @brief Update a remote node object with the remote port's service parameters. * * @param hw Hardware context. * @param rnode Allocated remote node object to initialize. * @param sparms DMA buffer containing the remote port's service parameters. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *buf = NULL; uint32_t count = 0; if (!hw || !rnode || !sparms) { ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n", hw, rnode, sparms); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } /* * If the attach count is non-zero, this RPI has already been registered. * Otherwise, register the RPI */ if (rnode->index == UINT32_MAX) { ocs_log_err(NULL, "bad parameter rnode->index invalid\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); return OCS_HW_RTN_ERROR; } count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1); if (count) { /* * Can't attach multiple FC_ID's to a node unless High Login * Mode is enabled */ if (sli_get_hlm(&hw->sli) == FALSE) { ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n", sli_get_hlm(&hw->sli), count); rc = OCS_HW_RTN_SUCCESS; } else { rnode->node_group = TRUE; rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached); rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS; } } else { rnode->node_group = FALSE; ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt); if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id, rnode->indicator, rnode->sport->indicator, sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_attach, rnode); } } if (count || rc) { if (rc < OCS_HW_RTN_SUCCESS) { ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI"); } ocs_free(hw->os, buf, SLI4_BMBX_SIZE); } return rc; } /** * @ingroup node * @brief Free a remote node resource. * * @param hw Hardware context. * @param rnode Remote node object to free. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !rnode) { ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", hw, rnode); return OCS_HW_RTN_ERROR; } if (rnode->sport) { if (!rnode->attached) { if (rnode->indicator != UINT32_MAX) { if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n", rnode->indicator, rnode->fc_id); rc = OCS_HW_RTN_ERROR; } else { rnode->node_group = FALSE; rnode->indicator = UINT32_MAX; rnode->index = UINT32_MAX; rnode->free_group = FALSE; } } } else { ocs_log_err(hw->os, "Error: rnode is still attached\n"); rc = OCS_HW_RTN_ERROR; } } return rc; } /** * @ingroup node * @brief Free a remote node object. * * @param hw Hardware context. * @param rnode Remote node object to free. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC; uint32_t index = UINT32_MAX; if (!hw || !rnode) { ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", hw, rnode); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } index = rnode->index; if (rnode->sport) { uint32_t count = 0; uint32_t fc_id; if (!rnode->attached) { return OCS_HW_RTN_SUCCESS_SYNC; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1); if (count <= 1) { /* There are no other references to this RPI * so unregister it and free the resource. */ fc_id = UINT32_MAX; rnode->node_group = FALSE; rnode->free_group = TRUE; } else { if (sli_get_hlm(&hw->sli) == FALSE) { ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n", count); } fc_id = rnode->fc_id & 0x00ffffff; } rc = OCS_HW_RTN_ERROR; if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator, SLI_RSRC_FCOE_RPI, fc_id)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "UNREG_RPI failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); rc = OCS_HW_RTN_ERROR; } } return rc; } /** * @ingroup node * @brief Free all remote node objects. * * @param hw Hardware context. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_free_all(ocs_hw_t *hw) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; if (!hw) { ocs_log_err(NULL, "bad parameter hw=%p\n", hw); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff, SLI_RSRC_FCOE_FCFI, UINT32_MAX)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "UNREG_RPI failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); rc = OCS_HW_RTN_ERROR; } return rc; } ocs_hw_rtn_e ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) { if (!hw || !ngroup) { ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", hw, ngroup); return OCS_HW_RTN_ERROR; } if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator, &ngroup->index)) { ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", ngroup->indicator); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode) { if (!hw || !ngroup || !rnode) { ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n", hw, ngroup, rnode); return OCS_HW_RTN_ERROR; } if (rnode->attached) { ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n", rnode->indicator, rnode->fc_id); return OCS_HW_RTN_ERROR; } if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", rnode->indicator); return OCS_HW_RTN_ERROR; } rnode->indicator = ngroup->indicator; rnode->index = ngroup->index; return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) { int ref; if (!hw || !ngroup) { ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", hw, ngroup); return OCS_HW_RTN_ERROR; } ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count); if (ref) { /* Hmmm, the reference count is non-zero */ ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n", ref, ngroup->indicator); if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) { ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", ngroup->indicator); return OCS_HW_RTN_ERROR; } ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0); } ngroup->indicator = UINT32_MAX; ngroup->index = UINT32_MAX; return OCS_HW_RTN_SUCCESS; } /** * @brief Initialize IO fields on each free call. * * @n @b Note: This is done on each free call (as opposed to each * alloc call) because port-owned XRIs are not * allocated with ocs_hw_io_alloc() but are freed with this * function. * * @param io Pointer to HW IO. */ static inline void ocs_hw_init_free_io(ocs_hw_io_t *io) { /* * Set io->done to NULL, to avoid any callbacks, should * a completion be received for one of these IOs */ io->done = NULL; io->abort_done = NULL; io->status_saved = 0; io->abort_in_progress = FALSE; io->port_owned_abort_count = 0; io->rnode = NULL; io->type = 0xFFFF; io->wq = NULL; io->ul_io = NULL; io->tgt_wqe_timeout = 0; } /** * @ingroup io * @brief Lockless allocate a HW IO object. * * @par Description * Assume that hw->ocs_lock is held. This function is only used if * use_dif_sec_xri workaround is being used. * * @param hw Hardware context. * * @return Returns a pointer to an object on success, or NULL on failure. */ static inline ocs_hw_io_t * _ocs_hw_io_alloc(ocs_hw_t *hw) { ocs_hw_io_t *io = NULL; if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { ocs_list_add_tail(&hw->io_inuse, io); io->state = OCS_HW_IO_STATE_INUSE; io->quarantine = FALSE; io->quarantine_first_phase = TRUE; io->abort_reqtag = UINT32_MAX; ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io); } else { ocs_atomic_add_return(&hw->io_alloc_failed_count, 1); } return io; } /** * @ingroup io * @brief Allocate a HW IO object. * * @par Description * @n @b Note: This function applies to non-port owned XRIs * only. * * @param hw Hardware context. * * @return Returns a pointer to an object on success, or NULL on failure. */ ocs_hw_io_t * ocs_hw_io_alloc(ocs_hw_t *hw) { ocs_hw_io_t *io = NULL; ocs_lock(&hw->io_lock); io = _ocs_hw_io_alloc(hw); ocs_unlock(&hw->io_lock); return io; } /** * @ingroup io * @brief Allocate/Activate a port owned HW IO object. * * @par Description * This function is called by the transport layer when an XRI is * allocated by the SLI-Port. This will "activate" the HW IO * associated with the XRI received from the SLI-Port to mirror * the state of the XRI. * @n @n @b Note: This function applies to port owned XRIs only. * * @param hw Hardware context. * @param io Pointer HW IO to activate/allocate. * * @return Returns a pointer to an object on success, or NULL on failure. */ ocs_hw_io_t * ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) { if (ocs_ref_read_count(&io->ref) > 0) { ocs_log_err(hw->os, "Bad parameter: refcount > 0\n"); return NULL; } if (io->wq != NULL) { ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator); return NULL; } ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); io->xbusy = TRUE; return io; } /** * @ingroup io * @brief When an IO is freed, depending on the exchange busy flag, and other * workarounds, move it to the correct list. * * @par Description * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed * from the busy or wait_free list. * * @param hw Hardware context. * @param io Pointer to the IO object to move. */ static void ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io) { if (io->xbusy) { /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */ ocs_list_add_tail(&hw->io_wait_free, io); io->state = OCS_HW_IO_STATE_WAIT_FREE; } else { /* IO not busy, add to free list */ ocs_list_add_tail(&hw->io_free, io); io->state = OCS_HW_IO_STATE_FREE; } /* BZ 161832 workaround */ if (hw->workaround.use_dif_sec_xri) { ocs_hw_check_sec_hio_list(hw); } } /** * @ingroup io * @brief Free a HW IO object. Perform cleanup common to * port and host-owned IOs. * * @param hw Hardware context. * @param io Pointer to the HW IO object. */ static inline void ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io) { /* initialize IO fields */ ocs_hw_init_free_io(io); /* Restore default SGL */ ocs_hw_io_restore_sgl(hw, io); } /** * @ingroup io * @brief Free a HW IO object associated with a port-owned XRI. * * @param arg Pointer to the HW IO object. */ static void ocs_hw_io_free_port_owned(void *arg) { ocs_hw_io_t *io = (ocs_hw_io_t *)arg; ocs_hw_t *hw = io->hw; /* * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs * waiting for buffers. */ if (io->auto_xfer_rdy_dnrx) { ocs_lock(&hw->io_lock); /* take a reference count because we still own the IO until the buffer is posted */ ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); ocs_list_add_tail(&hw->io_port_dnrx, io); ocs_unlock(&hw->io_lock); } /* perform common cleanup */ ocs_hw_io_free_common(hw, io); } /** * @ingroup io * @brief Free a previously-allocated HW IO object. Called when * IO refcount goes to zero (host-owned IOs only). * * @param arg Pointer to the HW IO object. */ static void ocs_hw_io_free_internal(void *arg) { ocs_hw_io_t *io = (ocs_hw_io_t *)arg; ocs_hw_t *hw = io->hw; /* perform common cleanup */ ocs_hw_io_free_common(hw, io); ocs_lock(&hw->io_lock); /* remove from in-use list */ ocs_list_remove(&hw->io_inuse, io); ocs_hw_io_free_move_correct_list(hw, io); ocs_unlock(&hw->io_lock); } /** * @ingroup io * @brief Free a previously-allocated HW IO object. * * @par Description * @n @b Note: This function applies to port and host owned XRIs. * * @param hw Hardware context. * @param io Pointer to the HW IO object. * * @return Returns a non-zero value if HW IO was freed, 0 if references * on the IO still exist, or a negative value if an error occurred. */ int32_t ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io) { /* just put refcount */ if (ocs_ref_read_count(&io->ref) <= 0) { ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n", io->indicator, io->reqtag); return -1; } return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */ } /** * @ingroup io * @brief Check if given HW IO is in-use * * @par Description * This function returns TRUE if the given HW IO has been * allocated and is in-use, and FALSE otherwise. It applies to * port and host owned XRIs. * * @param hw Hardware context. * @param io Pointer to the HW IO object. * * @return TRUE if an IO is in use, or FALSE otherwise. */ uint8_t ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io) { return (ocs_ref_read_count(&io->ref) > 0); } /** * @brief Write a HW IO to a work queue. * * @par Description * A HW IO is written to a work queue. * * @param wq Pointer to work queue. * @param wqe Pointer to WQ entry. * * @n @b Note: Assumes the SLI-4 queue lock is held. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) { int32_t rc; int32_t queue_rc; /* Every so often, set the wqec bit to generate comsummed completions */ if (wq->wqec_count) { wq->wqec_count--; } if (wq->wqec_count == 0) { sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf; genwqe->wqec = 1; wq->wqec_count = wq->wqec_set_count; } /* Decrement WQ free count */ wq->free_count--; queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf); if (queue_rc < 0) { rc = -1; } else { rc = 0; ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc); } return rc; } /** * @brief Write a HW IO to a work queue. * * @par Description * A HW IO is written to a work queue. * * @param wq Pointer to work queue. * @param wqe Pointer to WQE entry. * * @n @b Note: Takes the SLI-4 queue lock. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) { int32_t rc = 0; sli_queue_lock(wq->queue); if ( ! ocs_list_empty(&wq->pending_list)) { ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { rc = _hw_wq_write(wq, wqe); if (rc < 0) { break; } if (wqe->abort_wqe_submit_needed) { wqe->abort_wqe_submit_needed = 0; sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT ); ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) } } } else { if (wq->free_count > 0) { rc = _hw_wq_write(wq, wqe); } else { ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) } } sli_queue_unlock(wq->queue); return rc; } /** * @brief Update free count and submit any pending HW IOs * * @par Description * The WQ free count is updated, and any pending HW IOs are submitted that * will fit in the queue. * * @param wq Pointer to work queue. * @param update_free_count Value added to WQs free count. * * @return None. */ static void hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count) { ocs_hw_wqe_t *wqe; sli_queue_lock(wq->queue); /* Update free count with value passed in */ wq->free_count += update_free_count; while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { _hw_wq_write(wq, wqe); if (wqe->abort_wqe_submit_needed) { wqe->abort_wqe_submit_needed = 0; sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT); ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) } } sli_queue_unlock(wq->queue); } /** * @brief Check to see if there are any BZ 161832 workaround waiting IOs * * @par Description * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try * to allocate a secondary HW io, and dispatch it. * * @n @b Note: hw->io_lock MUST be taken when called. * * @param hw pointer to HW object * * @return none */ static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw) { ocs_hw_io_t *io; ocs_hw_io_t *sec_io; int rc = 0; while (!ocs_list_empty(&hw->sec_hio_wait_list)) { uint16_t flags; sec_io = _ocs_hw_io_alloc(hw); if (sec_io == NULL) { break; } io = ocs_list_remove_head(&hw->sec_hio_wait_list); ocs_list_add_tail(&hw->io_inuse, io); io->state = OCS_HW_IO_STATE_INUSE; io->sec_hio = sec_io; /* mark secondary XRI for second and subsequent data phase as quarantine */ if (io->xbusy) { sec_io->quarantine = TRUE; } flags = io->sec_iparam.fcp_tgt.flags; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout; /* Complete (continue) TRECV IO */ if (io->xbusy) { if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator, io->reqtag, SLI4_CQ_DEFAULT, io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, flags, io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { ocs_log_test(hw->os, "TRECEIVE WQE error\n"); break; } } else { if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, flags, io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { ocs_log_test(hw->os, "TRECEIVE WQE error\n"); break; } } if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } io->xbusy = TRUE; /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ ocs_hw_add_io_timed_wqe(hw, io); rc = hw_wq_write(io->wq, &io->wqe); if (rc >= 0) { /* non-negative return is success */ rc = 0; } else { /* failed to write wqe, remove from active wqe list */ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); io->xbusy = FALSE; ocs_hw_remove_io_timed_wqe(hw, io); } } } /** * @ingroup io * @brief Send a Single Request/Response Sequence (SRRS). * * @par Description * This routine supports communication sequences consisting of a single * request and single response between two endpoints. Examples include: * - Sending an ELS request. * - Sending an ELS response - To send an ELS reponse, the caller must provide * the OX_ID from the received request. * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request, * the caller must provide the R_CTL, TYPE, and DF_CTL * values to place in the FC frame header. * . * @n @b Note: The caller is expected to provide both send and receive * buffers for requests. In the case of sending a response, no receive buffer * is necessary and the caller may pass in a NULL pointer. * * @param hw Hardware context. * @param type Type of sequence (ELS request/response, FC-CT). * @param io Previously-allocated HW IO object. * @param send DMA memory holding data to send (for example, ELS request, BLS response). * @param len Length, in bytes, of data to send. * @param receive Optional DMA memory to hold a response. * @param rnode Destination of data (that is, a remote node). * @param iparam IO parameters (ELS response and FC-CT). * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns 0 on success, or a non-zero on failure. */ ocs_hw_rtn_e ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, ocs_dma_t *send, uint32_t len, ocs_dma_t *receive, ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam, ocs_hw_srrs_cb_t cb, void *arg) { sli4_sge_t *sge = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint16_t local_flags = 0; if (!hw || !io || !rnode || !iparam) { ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n", hw, io, send, receive, rnode, iparam); return OCS_HW_RTN_ERROR; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state); return OCS_HW_RTN_ERROR; } if (ocs_hw_is_xri_port_owned(hw, io->indicator)) { /* We must set the XC bit for port owned XRIs */ local_flags |= SLI4_IO_CONTINUATION; } io->rnode = rnode; io->type = type; io->done = cb; io->arg = arg; sge = io->sgl->virt; /* clear both SGE */ ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); if (send) { sge[0].buffer_address_high = ocs_addr32_hi(send->phys); sge[0].buffer_address_low = ocs_addr32_lo(send->phys); sge[0].sge_type = SLI4_SGE_TYPE_DATA; sge[0].buffer_length = len; } if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) { sge[1].buffer_address_high = ocs_addr32_hi(receive->phys); sge[1].buffer_address_low = ocs_addr32_lo(receive->phys); sge[1].sge_type = SLI4_SGE_TYPE_DATA; sge[1].buffer_length = receive->size; sge[1].last = TRUE; } else { sge[0].last = TRUE; } switch (type) { case OCS_HW_ELS_REQ: if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, *((uint8_t *)(send->virt)), /* req_type */ len, receive->size, iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) { ocs_log_err(hw->os, "REQ WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_ELS_RSP: if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->els.ox_id, rnode, local_flags, UINT32_MAX)) { ocs_log_err(hw->os, "RSP WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_ELS_RSP_SID: if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->els_sid.ox_id, rnode, local_flags, iparam->els_sid.s_id)) { ocs_log_err(hw->os, "RSP (SID) WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_FC_CT: if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, receive->size, iparam->fc_ct.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl, iparam->fc_ct.type, iparam->fc_ct.df_ctl)) { ocs_log_err(hw->os, "GEN WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_FC_CT_RSP: if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator, io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl, iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) { ocs_log_err(hw->os, "XMIT SEQ WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_BLS_ACC: case OCS_HW_BLS_RJT: { sli_bls_payload_t bls; if (OCS_HW_BLS_ACC == type) { bls.type = SLI_BLS_ACC; ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc)); } else { bls.type = SLI_BLS_RJT; ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt)); } bls.ox_id = iparam->bls.ox_id; bls.rx_id = iparam->bls.rx_id; if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, UINT32_MAX)) { ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; } case OCS_HW_BLS_ACC_SID: { sli_bls_payload_t bls; bls.type = SLI_BLS_ACC; ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc)); bls.ox_id = iparam->bls_sid.ox_id; bls.rx_id = iparam->bls_sid.rx_id; if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->bls_sid.s_id)) { ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n"); rc = OCS_HW_RTN_ERROR; } break; } case OCS_HW_BCAST: if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, iparam->bcast.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) { ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; default: ocs_log_err(hw->os, "bad SRRS type %#x\n", type); rc = OCS_HW_RTN_ERROR; } if (OCS_HW_RTN_SUCCESS == rc) { if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } io->xbusy = TRUE; /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ OCS_STAT(io->wq->use_count++); ocs_hw_add_io_timed_wqe(hw, io); rc = hw_wq_write(io->wq, &io->wqe); if (rc >= 0) { /* non-negative return is success */ rc = 0; } else { /* failed to write wqe, remove from active wqe list */ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); io->xbusy = FALSE; ocs_hw_remove_io_timed_wqe(hw, io); } } return rc; } /** * @ingroup io * @brief Send a read, write, or response IO. * * @par Description * This routine supports sending a higher-level IO (for example, FCP) between two endpoints * as a target or initiator. Examples include: * - Sending read data and good response (target). * - Sending a response (target with no data or after receiving write data). * . * This routine assumes all IOs use the SGL associated with the HW IO. Prior to * calling this routine, the data should be loaded using ocs_hw_io_add_sge(). * * @param hw Hardware context. * @param type Type of IO (target read, target response, and so on). * @param io Previously-allocated HW IO object. * @param len Length, in bytes, of data to send. * @param iparam IO parameters. * @param rnode Destination of data (that is, a remote node). * @param cb Function call upon completion of sending data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns 0 on success, or a non-zero value on failure. * * @todo * - Support specifiying relative offset. * - Use a WQ other than 0. */ ocs_hw_rtn_e ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode, void *cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t rpi; uint8_t send_wqe = TRUE; CPUTRACE(""); if (!hw || !io || !rnode || !iparam) { ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n", hw, io, iparam, rnode); return OCS_HW_RTN_ERROR; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); return OCS_HW_RTN_ERROR; } rpi = rnode->indicator; if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) { rpi = hw->workaround.unregistered_rid; ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi); } /* * Save state needed during later stages */ io->rnode = rnode; io->type = type; io->done = cb; io->arg = arg; /* * Format the work queue entry used to send the IO */ switch (type) { case OCS_HW_IO_INITIATOR_READ: /* * If use_dif_quarantine workaround is in effect, and dif_separates then mark the * initiator read IO for quarantine */ if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { io->quarantine = TRUE; } ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, iparam->fcp_ini.rsp); if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, iparam->fcp_ini.timeout)) { ocs_log_err(hw->os, "IREAD WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_IO_INITIATOR_WRITE: ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, iparam->fcp_ini.rsp); if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len, iparam->fcp_ini.first_burst, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, iparam->fcp_ini.timeout)) { ocs_log_err(hw->os, "IWRITE WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_IO_INITIATOR_NODATA: ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, iparam->fcp_ini.rsp); if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, iparam->fcp_ini.timeout)) { ocs_log_err(hw->os, "ICMND WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_IO_TARGET_WRITE: { uint16_t flags = iparam->fcp_tgt.flags; fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt; /* * Fill in the XFER_RDY for IF_TYPE 0 devices */ *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset); *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len); *((uint32_t *)xfer->rsvd) = 0; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; /* * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO * then mark the target write IO for quarantine */ if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { io->quarantine = TRUE; } /* * BZ 161832 Workaround: * Check for use_dif_sec_xri workaround. Note, even though the first dataphase * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the * potential for deadlock where all XRI's are allocated as primaries to IOs that * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first * data phase, it is marked for quarantine. */ if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { /* * If we have allocated a chained SGL for skyhawk, then * we can re-use this for the sec_hio. */ if (io->ovfl_io != NULL) { io->sec_hio = io->ovfl_io; io->sec_hio->quarantine = TRUE; } else { io->sec_hio = ocs_hw_io_alloc(hw); } if (io->sec_hio == NULL) { /* Failed to allocate, so save full request context and put * this IO on the wait list */ io->sec_iparam = *iparam; io->sec_len = len; ocs_lock(&hw->io_lock); ocs_list_remove(&hw->io_inuse, io); ocs_list_add_tail(&hw->sec_hio_wait_list, io); io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO; hw->sec_hio_wait_count++; ocs_unlock(&hw->io_lock); send_wqe = FALSE; /* Done */ break; } /* We quarantine the secondary IO if this is the second or subsequent data phase */ if (io->xbusy) { io->sec_hio->quarantine = TRUE; } } /* * If not the first data phase, and io->sec_hio has been allocated, then issue * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE */ if (io->xbusy && (io->sec_hio != NULL)) { if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TRECEIVE WQE error\n"); rc = OCS_HW_RTN_ERROR; } } else { if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TRECEIVE WQE error\n"); rc = OCS_HW_RTN_ERROR; } } break; } case OCS_HW_IO_TARGET_READ: { uint16_t flags = iparam->fcp_tgt.flags; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TSEND WQE error\n"); rc = OCS_HW_RTN_ERROR; } else if (hw->workaround.retain_tsend_io_length) { io->length = len; } break; } case OCS_HW_IO_TARGET_RSP: { uint16_t flags = iparam->fcp_tgt.flags; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } /* post a new auto xfer ready buffer */ if (hw->auto_xfer_rdy_enabled && io->is_port_owned) { if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) { flags |= SLI4_IO_DNRX; } } io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.cs_ctl, io->is_port_owned, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TRSP WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; } default: ocs_log_err(hw->os, "unsupported IO type %#x\n", type); rc = OCS_HW_RTN_ERROR; } if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) { if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } io->xbusy = TRUE; /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); OCS_STAT(io->wq->use_count++); ocs_hw_add_io_timed_wqe(hw, io); rc = hw_wq_write(io->wq, &io->wqe); if (rc >= 0) { /* non-negative return is success */ rc = 0; } else { /* failed to write wqe, remove from active wqe list */ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); io->xbusy = FALSE; ocs_hw_remove_io_timed_wqe(hw, io); } } return rc; } /** * @brief Send a raw frame * * @par Description * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent. * * @param hw Pointer to HW object. * @param hdr Pointer to a little endian formatted FC header. * @param sof Value to use as the frame SOF. * @param eof Value to use as the frame EOF. * @param payload Pointer to payload DMA buffer. * @param ctx Pointer to caller provided send frame context. * @param callback Callback function. * @param arg Callback function argument. * * @return Returns 0 on success, or a negative error code value on failure. */ ocs_hw_rtn_e ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload, ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) { int32_t rc; ocs_hw_wqe_t *wqe; uint32_t xri; hw_wq_t *wq; wqe = &ctx->wqe; /* populate the callback object */ ctx->hw = hw; /* Fetch and populate request tag */ ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg); if (ctx->wqcb == NULL) { ocs_log_err(hw->os, "can't allocate request tag\n"); return OCS_HW_RTN_NO_RESOURCES; } /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */ wq = ocs_varray_iter_next(hw->wq_class_array[1]); if (wq == NULL) { wq = hw->hw_wq[0]; } /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */ xri = wq->send_frame_io->indicator; /* Build the send frame WQE */ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload, payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index); if (rc) { ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); return OCS_HW_RTN_ERROR; } /* Write to WQ */ rc = hw_wq_write(wq, wqe); if (rc) { ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc); return OCS_HW_RTN_ERROR; } OCS_STAT(wq->use_count++); - return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS; + return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count) { if (sli_get_sgl_preregister(&hw->sli)) { ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n"); return OCS_HW_RTN_ERROR; } io->ovfl_sgl = sgl; io->ovfl_sgl_count = sgl_count; io->ovfl_io = NULL; return OCS_HW_RTN_SUCCESS; } static void ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) { /* Restore the default */ io->sgl = &io->def_sgl; io->sgl_count = io->def_sgl_count; /* * For skyhawk, we need to free the IO allocated for the chained * SGL. For all devices, clear the overflow fields on the IO. * * Note: For DIF IOs, we may be using the same XRI for the sec_hio and * the chained SGLs. If so, then we clear the ovfl_io field * when the sec_hio is freed. */ if (io->ovfl_io != NULL) { ocs_hw_io_free(hw, io->ovfl_io); io->ovfl_io = NULL; } /* Clear the overflow SGL */ io->ovfl_sgl = NULL; io->ovfl_sgl_count = 0; io->ovfl_lsp = NULL; } /** * @ingroup io * @brief Initialize the scatter gather list entries of an IO. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param type Type of IO (target read, target response, and so on). * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type) { sli4_sge_t *data = NULL; uint32_t i = 0; uint32_t skips = 0; if (!hw || !io) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", hw, io); return OCS_HW_RTN_ERROR; } /* Clear / reset the scatter-gather list */ io->sgl = &io->def_sgl; io->sgl_count = io->def_sgl_count; io->first_data_sge = 0; ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); io->n_sge = 0; io->sge_offset = 0; io->type = type; data = io->sgl->virt; /* * Some IO types have underlying hardware requirements on the order * of SGEs. Process all special entries here. */ switch (type) { case OCS_HW_IO_INITIATOR_READ: case OCS_HW_IO_INITIATOR_WRITE: case OCS_HW_IO_INITIATOR_NODATA: /* * No skips, 2 special for initiator I/Os * The addresses and length are written later */ /* setup command pointer */ data->sge_type = SLI4_SGE_TYPE_DATA; data++; /* setup response pointer */ data->sge_type = SLI4_SGE_TYPE_DATA; if (OCS_HW_IO_INITIATOR_NODATA == type) { data->last = TRUE; } data++; io->n_sge = 2; break; case OCS_HW_IO_TARGET_WRITE: #define OCS_TARGET_WRITE_SKIPS 2 skips = OCS_TARGET_WRITE_SKIPS; /* populate host resident XFER_RDY buffer */ data->sge_type = SLI4_SGE_TYPE_DATA; data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys); data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys); data->buffer_length = io->xfer_rdy.size; data++; skips--; io->n_sge = 1; break; case OCS_HW_IO_TARGET_READ: /* * For FCP_TSEND64, the first 2 entries are SKIP SGE's */ #define OCS_TARGET_READ_SKIPS 2 skips = OCS_TARGET_READ_SKIPS; break; case OCS_HW_IO_TARGET_RSP: /* * No skips, etc. for FCP_TRSP64 */ break; default: ocs_log_err(hw->os, "unsupported IO type %#x\n", type); return OCS_HW_RTN_ERROR; } /* * Write skip entries */ for (i = 0; i < skips; i++) { data->sge_type = SLI4_SGE_TYPE_SKIP; data++; } io->n_sge += skips; /* * Set last */ data->last = TRUE; return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Add a T10 PI seed scatter gather list entry. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info) { sli4_sge_t *data = NULL; sli4_diseed_sge_t *dif_seed; /* If no dif_info, or dif_oper is disabled, then just return success */ if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) { return OCS_HW_RTN_SUCCESS; } if (!hw || !io) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n", hw, io, dif_info); return OCS_HW_RTN_ERROR; } data = io->sgl->virt; data += io->n_sge; /* If we are doing T10 DIF add the DIF Seed SGE */ ocs_memset(data, 0, sizeof(sli4_diseed_sge_t)); dif_seed = (sli4_diseed_sge_t *)data; dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp; dif_seed->ref_tag_repl = dif_info->ref_tag_repl; dif_seed->app_tag_repl = dif_info->app_tag_repl; dif_seed->repl_app_tag = dif_info->repl_app_tag; if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) { dif_seed->atrt = dif_info->disable_app_ref_ffff; dif_seed->at = dif_info->disable_app_ffff; } dif_seed->sge_type = SLI4_SGE_TYPE_DISEED; /* Workaround for SKH (BZ157233) */ if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) { dif_seed->sge_type = SLI4_SGE_TYPE_SKIP; } dif_seed->app_tag_cmp = dif_info->app_tag_cmp; dif_seed->dif_blk_size = dif_info->blk_size; dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag; dif_seed->check_app_tag = dif_info->check_app_tag; dif_seed->check_ref_tag = dif_info->check_ref_tag; dif_seed->check_crc = dif_info->check_guard; dif_seed->new_ref_tag = dif_info->repl_ref_tag; switch(dif_info->dif_oper) { case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; break; case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; break; case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; break; case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; break; case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; break; case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; break; case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; break; case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; break; case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; break; default: ocs_log_err(hw->os, "unsupported DIF operation %#x\n", dif_info->dif_oper); return OCS_HW_RTN_ERROR; } /* * Set last, clear previous last */ data->last = TRUE; if (io->n_sge) { data[-1].last = FALSE; } io->n_sge++; return OCS_HW_RTN_SUCCESS; } static ocs_hw_rtn_e ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) { sli4_lsp_sge_t *lsp; /* fail if we're already pointing to the overflow SGL */ if (io->sgl == io->ovfl_sgl) { return OCS_HW_RTN_ERROR; } /* * For skyhawk, we can use another SGL to extend the SGL list. The * Chained entry must not be in the first 4 entries. * * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio. */ if (sli_get_sgl_preregister(&hw->sli) && io->def_sgl_count > 4 && io->ovfl_io == NULL && ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { io->ovfl_io = ocs_hw_io_alloc(hw); if (io->ovfl_io != NULL) { /* * Note: We can't call ocs_hw_io_register_sgl() here * because it checks that SGLs are not pre-registered * and for shyhawk, preregistered SGLs are required. */ io->ovfl_sgl = &io->ovfl_io->def_sgl; io->ovfl_sgl_count = io->ovfl_io->def_sgl_count; } } /* fail if we don't have an overflow SGL registered */ - if (io->ovfl_sgl == NULL) { + if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) { return OCS_HW_RTN_ERROR; } /* * Overflow, we need to put a link SGE in the last location of the current SGL, after * copying the the last SGE to the overflow SGL */ ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1]; lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1]; ocs_memset(lsp, 0, sizeof(*lsp)); if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { sli_skh_chain_sge_build(&hw->sli, (sli4_sge_t*)lsp, io->ovfl_io->indicator, 0, /* frag_num */ 0); /* offset */ } else { lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys); lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys); lsp->sge_type = SLI4_SGE_TYPE_LSP; lsp->last = 0; io->ovfl_lsp = lsp; io->ovfl_lsp->segment_length = sizeof(sli4_sge_t); } /* Update the current SGL pointer, and n_sgl */ io->sgl = io->ovfl_sgl; io->sgl_count = io->ovfl_sgl_count; io->n_sge = 1; return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Add a scatter gather list entry to an IO. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param addr Physical address. * @param length Length of memory pointed to by @c addr. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length) { sli4_sge_t *data = NULL; if (!hw || !io || !addr || !length) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p addr=%lx length=%u\n", hw, io, addr, length); return OCS_HW_RTN_ERROR; } if ((length != 0) && (io->n_sge + 1) > io->sgl_count) { if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); return OCS_HW_RTN_ERROR; } } if (length > sli_get_max_sge(&hw->sli)) { ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n", length, sli_get_max_sge(&hw->sli)); return OCS_HW_RTN_ERROR; } data = io->sgl->virt; data += io->n_sge; data->sge_type = SLI4_SGE_TYPE_DATA; data->buffer_address_high = ocs_addr32_hi(addr); data->buffer_address_low = ocs_addr32_lo(addr); data->buffer_length = length; data->data_offset = io->sge_offset; /* * Always assume this is the last entry and mark as such. * If this is not the first entry unset the "last SGE" * indication for the previous entry */ data->last = TRUE; if (io->n_sge) { data[-1].last = FALSE; } /* Set first_data_bde if not previously set */ if (io->first_data_sge == 0) { io->first_data_sge = io->n_sge; } io->sge_offset += length; io->n_sge++; /* Update the linked segment length (only executed after overflow has begun) */ if (io->ovfl_lsp != NULL) { io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t); } return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Add a T10 DIF scatter gather list entry to an IO. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param addr DIF physical address. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr) { sli4_dif_sge_t *data = NULL; if (!hw || !io || !addr) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p addr=%lx\n", hw, io, addr); return OCS_HW_RTN_ERROR; } if ((io->n_sge + 1) > hw->config.n_sgl) { if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) { ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); return OCS_HW_RTN_ERROR; } } data = io->sgl->virt; data += io->n_sge; data->sge_type = SLI4_SGE_TYPE_DIF; /* Workaround for SKH (BZ157233) */ if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) { data->sge_type = SLI4_SGE_TYPE_SKIP; } data->buffer_address_high = ocs_addr32_hi(addr); data->buffer_address_low = ocs_addr32_lo(addr); /* * Always assume this is the last entry and mark as such. * If this is not the first entry unset the "last SGE" * indication for the previous entry */ data->last = TRUE; if (io->n_sge) { data[-1].last = FALSE; } io->n_sge++; return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Abort a previously-started IO. * * @param hw Hardware context. * @param io_to_abort The IO to abort. * @param send_abts Boolean to have the hardware automatically * generate an ABTS. * @param cb Function call upon completion of the abort (may be NULL). * @param arg Argument to pass to abort completion function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg) { sli4_abort_type_e atype = SLI_ABORT_MAX; uint32_t id = 0, mask = 0; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; hw_wq_callback_t *wqcb; if (!hw || !io_to_abort) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", hw, io_to_abort); return OCS_HW_RTN_ERROR; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n", hw->state); return OCS_HW_RTN_ERROR; } /* take a reference on IO being aborted */ if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) { /* command no longer active */ ocs_log_test(hw ? hw->os : NULL, "io not active xri=0x%x tag=0x%x\n", io_to_abort->indicator, io_to_abort->reqtag); return OCS_HW_RTN_IO_NOT_ACTIVE; } /* non-port owned XRI checks */ /* Must have a valid WQ reference */ if (io_to_abort->wq == NULL) { ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n", io_to_abort->indicator); ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ return OCS_HW_RTN_IO_NOT_ACTIVE; } /* Validation checks complete; now check to see if already being aborted */ ocs_lock(&hw->io_abort_lock); if (io_to_abort->abort_in_progress) { ocs_unlock(&hw->io_abort_lock); ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ ocs_log_debug(hw ? hw->os : NULL, "io already being aborted xri=0x%x tag=0x%x\n", io_to_abort->indicator, io_to_abort->reqtag); return OCS_HW_RTN_IO_ABORT_IN_PROGRESS; } /* * This IO is not already being aborted. Set flag so we won't try to * abort it again. After all, we only have one abort_done callback. */ io_to_abort->abort_in_progress = 1; ocs_unlock(&hw->io_abort_lock); /* * If we got here, the possibilities are: * - host owned xri * - io_to_abort->wq_index != UINT32_MAX * - submit ABORT_WQE to same WQ * - port owned xri: * - rxri: io_to_abort->wq_index == UINT32_MAX * - submit ABORT_WQE to any WQ * - non-rxri * - io_to_abort->index != UINT32_MAX * - submit ABORT_WQE to same WQ * - io_to_abort->index == UINT32_MAX * - submit ABORT_WQE to any WQ */ io_to_abort->abort_done = cb; io_to_abort->abort_arg = arg; atype = SLI_ABORT_XRI; id = io_to_abort->indicator; /* Allocate a request tag for the abort portion of this IO */ wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort); if (wqcb == NULL) { ocs_log_err(hw->os, "can't allocate request tag\n"); return OCS_HW_RTN_NO_RESOURCES; } io_to_abort->abort_reqtag = wqcb->instance_index; /* * If the wqe is on the pending list, then set this wqe to be * aborted when the IO's wqe is removed from the list. */ if (io_to_abort->wq != NULL) { sli_queue_lock(io_to_abort->wq->queue); if (ocs_list_on_list(&io_to_abort->wqe.link)) { io_to_abort->wqe.abort_wqe_submit_needed = 1; io_to_abort->wqe.send_abts = send_abts; io_to_abort->wqe.id = id; io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag; sli_queue_unlock(io_to_abort->wq->queue); return 0; } sli_queue_unlock(io_to_abort->wq->queue); } if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask, io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) { ocs_log_err(hw->os, "ABORT WQE error\n"); io_to_abort->abort_reqtag = UINT32_MAX; ocs_hw_reqtag_free(hw, wqcb); rc = OCS_HW_RTN_ERROR; } if (OCS_HW_RTN_SUCCESS == rc) { if (io_to_abort->wq == NULL) { io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort); ocs_hw_assert(io_to_abort->wq != NULL); } /* ABORT_WQE does not actually utilize an XRI on the Port, * therefore, keep xbusy as-is to track the exchange's state, * not the ABORT_WQE's state */ rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe); if (rc > 0) { /* non-negative return is success */ rc = 0; /* can't abort an abort so skip adding to timed wqe list */ } } if (OCS_HW_RTN_SUCCESS != rc) { ocs_lock(&hw->io_abort_lock); io_to_abort->abort_in_progress = 0; ocs_unlock(&hw->io_abort_lock); ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ } return rc; } /** * @ingroup io * @brief Return the OX_ID/RX_ID of the IO. * * @param hw Hardware context. * @param io HW IO object. * * @return Returns X_ID on success, or -1 on failure. */ int32_t ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io) { if (!hw || !io) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", hw, io); return -1; } return io->indicator; } typedef struct ocs_hw_fw_write_cb_arg { ocs_hw_fw_cb_t cb; void *arg; } ocs_hw_fw_write_cb_arg_t; typedef struct ocs_hw_sfp_cb_arg { ocs_hw_sfp_cb_t cb; void *arg; ocs_dma_t payload; } ocs_hw_sfp_cb_arg_t; typedef struct ocs_hw_temp_cb_arg { ocs_hw_temp_cb_t cb; void *arg; } ocs_hw_temp_cb_arg_t; typedef struct ocs_hw_link_stat_cb_arg { ocs_hw_link_stat_cb_t cb; void *arg; } ocs_hw_link_stat_cb_arg_t; typedef struct ocs_hw_host_stat_cb_arg { ocs_hw_host_stat_cb_t cb; void *arg; } ocs_hw_host_stat_cb_arg_t; typedef struct ocs_hw_dump_get_cb_arg { ocs_hw_dump_get_cb_t cb; void *arg; void *mbox_cmd; } ocs_hw_dump_get_cb_arg_t; typedef struct ocs_hw_dump_clear_cb_arg { ocs_hw_dump_clear_cb_t cb; void *arg; void *mbox_cmd; } ocs_hw_dump_clear_cb_arg_t; /** * @brief Write a portion of a firmware image to the device. * * @par Description * Calls the correct firmware write function based on the device type. * * @param hw Hardware context. * @param dma DMA structure containing the firmware image chunk. * @param size Size of the firmware image chunk. * @param offset Offset, in bytes, from the beginning of the firmware image. * @param last True if this is the last chunk of the image. * Causes the image to be committed to flash. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) { if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg); } else { /* Write firmware_write for BE3/Skyhawk not supported */ return -1; } } /** * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer). * * @par Description * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a * firmware image chunk, and then sends the command with ocs_hw_command(). On completion, * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox * and to signal the caller that the write has completed. * * @param hw Hardware context. * @param dma DMA structure containing the firmware image chunk. * @param size Size of the firmware image chunk. * @param offset Offset, in bytes, from the beginning of the firmware image. * @param last True if this is the last chunk of the image. Causes the image to be committed to flash. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *mbxdata; ocs_hw_fw_write_cb_arg_t *cb_arg; int noc=0; /* No Commit bit - set to 1 for testing */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last, size, offset, "/prg/", dma)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); } return rc; } /** * @brief Called when the WRITE OBJECT command completes. * * @par Description * Get the number of bytes actually written out of the response, free the mailbox * that was malloc'd by ocs_hw_firmware_write(), * then call the callback and pass the status and bytes written. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is void cb(int32_t status, uint32_t bytes_written). * * @return Returns 0. */ static int32_t ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed); ocs_hw_fw_write_cb_arg_t *cb_arg = arg; uint32_t bytes_written; uint16_t mbox_status; uint32_t change_status; bytes_written = wr_obj_rsp->actual_write_length; mbox_status = mbox_rsp->hdr.status; change_status = wr_obj_rsp->change_status; ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_status) { status = mbox_status; } cb_arg->cb(status, bytes_written, change_status, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); } return 0; } /** * @brief Called when the READ_TRANSCEIVER_DATA command completes. * * @par Description * Get the number of bytes read out of the response, free the mailbox that was malloc'd * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg). * * @return Returns 0. */ static int32_t ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_sfp_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = NULL; sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL; uint32_t bytes_written; if (cb_arg) { payload = &(cb_arg->payload); if (cb_arg->cb) { mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt; bytes_written = mbox_rsp->hdr.response_length; if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg); } ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @ingroup io * @brief Function to retrieve the SFP information. * * @param hw Hardware context. * @param page The page of SFP data to retrieve (0xa0 or 0xa2). * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_sfp_cb_arg_t *cb_arg; uint8_t *mbxdata; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; /* payload holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t), OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } /* Send the HW command */ if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n", rc); ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); } return rc; } /** * @brief Function to retrieve the temperature information. * * @param hw Hardware context. * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_temp_cb_arg_t *cb_arg; uint8_t *mbxdata; mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE, SLI4_WKI_TAG_SAT_TEM)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "DUMP_TYPE4 failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); } return rc; } /** * @brief Called when the DUMP command completes. * * @par Description * Get the temperature data out of the response, free the mailbox that was malloc'd * by ocs_hw_get_temperature(), then call the callback and pass the status and data. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is defined by ocs_hw_temp_cb_t. * * @return Returns 0. */ static int32_t ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe; ocs_hw_temp_cb_arg_t *cb_arg = arg; uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */ uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/ uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */ uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */ uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */ uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */ if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, curr_temp, crit_temp_thrshld, warn_temp_thrshld, norm_temp_thrshld, fan_off_thrshld, fan_on_thrshld, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Function to retrieve the link statistics. * * @param hw Hardware context. * @param req_ext_counters If TRUE, then the extended counters will be requested. * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. * @param clear_all_counters If TRUE, the counters will be cleared. * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_link_stats(ocs_hw_t *hw, uint8_t req_ext_counters, uint8_t clear_overflow_flags, uint8_t clear_all_counters, ocs_hw_link_stat_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_link_stat_cb_arg_t *cb_arg; uint8_t *mbxdata; mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE, req_ext_counters, clear_overflow_flags, clear_all_counters)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_LINK_STATS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); } return rc; } /** * @brief Called when the READ_LINK_STAT command completes. * * @par Description * Get the counters out of the response, free the mailbox that was malloc'd * by ocs_hw_get_link_stats(), then call the callback and pass the status and data. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is defined by ocs_hw_link_stat_cb_t. * * @return Returns 0. */ static int32_t ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe; ocs_hw_link_stat_cb_arg_t *cb_arg = arg; ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX]; uint32_t num_counters = (mbox_rsp->gec ? 20 : 13); ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) * OCS_HW_LINK_STAT_MAX); counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of; counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of; counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of; counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of; counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of; counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of; counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of; counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of; counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of; counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of; counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of; counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of; counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of; counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of; counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of; counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count; counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count; counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count; counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count; counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count; counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count; counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count; counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit; counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit; counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit; counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit; counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count; counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count; counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count; counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, num_counters, counts, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Function to retrieve the link and host statistics. * * @param hw Hardware context. * @param cc clear counters, if TRUE all counters will be cleared. * @param cb Function call upon completion of receiving the data. * @param arg Argument to pass to pointer fc hosts statistics structure. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_host_stat_cb_arg_t *cb_arg; uint8_t *mbxdata; mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; /* Send the HW command to get the host stats */ if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_HOST_STATS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); } return rc; } /** * @brief Called when the READ_STATUS command completes. * * @par Description * Get the counters out of the response, free the mailbox that was malloc'd * by ocs_hw_get_host_stats(), then call the callback and pass * the status and data. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is defined by * ocs_hw_host_stat_cb_t. * * @return Returns 0. */ static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe; ocs_hw_host_stat_cb_arg_t *cb_arg = arg; ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX]; uint32_t num_counters = OCS_HW_HOST_STAT_MAX; ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) * OCS_HW_HOST_STAT_MAX); counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count; counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count; counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count; counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count; counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count; counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count; counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator; counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder; counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count; counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count; counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count; counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count; counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count; counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, num_counters, counts, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief HW link configuration enum to the CLP string value mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the CLP string that is used * in the DMTF_CLP_CMD mailbox command. */ typedef struct ocs_hw_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; const char *clp_str; } ocs_hw_linkcfg_map_t; /** * @brief Mapping from the HW linkcfg enum to the CLP command value * string. */ static ocs_hw_linkcfg_map_t linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"}, {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"}, {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"}, {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"}, {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"}, {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"}, {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}}; /** * @brief HW link configuration enum to Skyhawk link config ID mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the link config ID numbers used by Skyhawk */ typedef struct ocs_hw_skyhawk_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; uint32_t config_id; } ocs_hw_skyhawk_linkcfg_map_t; /** * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs */ static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, 0x0a}, {OCS_HW_LINKCFG_1X40G, 0x09}, }; /** * @brief Helper function for getting the HW linkcfg enum from the CLP * string value * * @param clp_str CLP string value from OEMELX_LinkConfig. * * @return Returns the HW linkcfg enum corresponding to clp_str. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_clp(const char *clp_str) { uint32_t i; for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) { return linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Helper function for getting the CLP string value from the HW * linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the OEMELX_LinkConfig CLP string value corresponding to * given linkcfg. */ static const char * ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { if (linkcfg_map[i].linkcfg == linkcfg) { return linkcfg_map[i].clp_str; } } return NULL; } /** * @brief Helper function for getting a Skyhawk link config ID from the HW * linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the Skyhawk link config ID corresponding to * given linkcfg. */ static uint32_t ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) { return skyhawk_linkcfg_map[i].config_id; } } return 0; } /** * @brief Helper function for getting the HW linkcfg enum from a * Skyhawk config ID. * * @param config_id Skyhawk link config ID. * * @return Returns the HW linkcfg enum corresponding to config_id. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_config_id(const uint32_t config_id) { uint32_t i; for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { if (skyhawk_linkcfg_map[i].config_id == config_id) { return skyhawk_linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Link configuration callback argument. */ typedef struct ocs_hw_linkcfg_cb_arg_s { ocs_hw_port_control_cb_t cb; void *arg; uint32_t opts; int32_t status; ocs_dma_t dma_cmd; ocs_dma_t dma_resp; uint32_t result_len; } ocs_hw_linkcfg_cb_arg_t; /** * @brief Set link configuration. * * @param hw Hardware context. * @param value Link configuration enum to which the link configuration is * set. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { if (!sli_link_is_configurable(&hw->sli)) { ocs_log_debug(hw->os, "Function not supported\n"); return OCS_HW_RTN_ERROR; } if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg); } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg); } else { ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); return OCS_HW_RTN_ERROR; } } /** * @brief Set link configuration for Lancer * * @param hw Hardware context. * @param value Link configuration enum to which the link configuration is * set. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; ocs_hw_linkcfg_cb_arg_t *cb_arg; const char *value_str = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* translate ocs_hw_linkcfg_e to CLP string */ value_str = ocs_hw_clp_from_linkcfg(value); /* allocate memory for callback argument */ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); return OCS_HW_RTN_NO_MEMORY; } ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str); /* allocate DMA for command */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); /* allocate DMA for response */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->opts = opts; rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { /* if failed, or polling, free memory here; if success and not * polling, will free in callback function */ if (rc) { ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", (char *)cb_arg->dma_cmd.virt); } ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_dma_free(hw->os, &cb_arg->dma_resp); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } return rc; } /** * @brief Callback for ocs_hw_set_linkcfg_skyhawk * * @param hw Hardware context. * @param status Status from the RECONFIG_GET_LINK_INFO command. * @param mqe Mailbox response structure. * @param arg Pointer to a callback argument. * * @return none */ static void ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; if (status) { ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status); } /* invoke callback */ if (cb_arg->cb) { cb_arg->cb(status, 0, cb_arg->arg); } /* if polling, will free memory in calling function */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } /** * @brief Set link configuration for a Skyhawk * * @param hw Hardware context. * @param value Link configuration enum to which the link configuration is * set. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { uint8_t *mbxdata; ocs_hw_linkcfg_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t config_id; config_id = ocs_hw_config_id_from_linkcfg(value); if (config_id == 0) { ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value); return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else if (opts == OCS_CMD_POLL) { /* if we're polling we have to call the callback here. */ ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else { /* We weren't poling, so the callback got called */ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); } return rc; } /** * @brief Get link configuration. * * @param hw Hardware context. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { if (!sli_link_is_configurable(&hw->sli)) { ocs_log_debug(hw->os, "Function not supported\n"); return OCS_HW_RTN_ERROR; } if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg); } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg); } else { ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); return OCS_HW_RTN_ERROR; } } /** * @brief Get link configuration for a Lancer * * @param hw Hardware context. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; ocs_hw_linkcfg_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* allocate memory for callback argument */ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); return OCS_HW_RTN_NO_MEMORY; } ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig"); /* allocate DMA for command */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } /* copy CLP command to DMA command */ ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); /* allocate DMA for response */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->opts = opts; rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { /* if failed or polling, free memory here; if not polling and success, * will free in callback function */ if (rc) { ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", (char *)cb_arg->dma_cmd.virt); } ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_dma_free(hw->os, &cb_arg->dma_resp); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } return rc; } /** * @brief Get the link configuration callback. * * @param hw Hardware context. * @param status Status from the RECONFIG_GET_LINK_INFO command. * @param mqe Mailbox response structure. * @param arg Pointer to a callback argument. * * @return none */ static void ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt; ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA; if (status) { ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status); } else { /* Call was successful */ value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id); } /* invoke callback */ if (cb_arg->cb) { cb_arg->cb(status, value, cb_arg->arg); } /* if polling, will free memory in calling function */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } /** * @brief Get link configuration for a Skyhawk. * * @param hw Hardware context. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { uint8_t *mbxdata; ocs_hw_linkcfg_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->opts = opts; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else if (opts == OCS_CMD_POLL) { /* if we're polling we have to call the callback here. */ ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else { /* We weren't poling, so the callback got called */ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); } return rc; } /** * @brief Sets the DIF seed value. * * @param hw Hardware context. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_dif_seed_t seed_param; ocs_memset(&seed_param, 0, sizeof(seed_param)); seed_param.seed = hw->config.dif_seed; /* send set_features command */ if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_DIF_SEED, 4, (uint32_t*)&seed_param)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_debug(hw->os, "DIF seed set to 0x%x\n", hw->config.dif_seed); } } else { ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); rc = OCS_HW_RTN_ERROR; } return rc; } /** * @brief Sets the DIF mode value. * * @param hw Hardware context. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_t10_pi_mem_model_t mode_param; ocs_memset(&mode_param, 0, sizeof(mode_param)); mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1); /* send set_features command */ if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_DIF_MEMORY_MODE, sizeof(mode_param), (uint32_t*)&mode_param)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_test(hw->os, "DIF mode set to %s\n", (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate")); } } else { ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); rc = OCS_HW_RTN_ERROR; } return rc; } static void ocs_hw_watchdog_timer_cb(void *arg) { ocs_hw_t *hw = (ocs_hw_t *)arg; ocs_hw_config_watchdog_timer(hw); return; } static void ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { uint16_t timeout = hw->watchdog_timeout; if (status != 0) { ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status); } else { if(timeout != 0) { /* keeping callback 500ms before timeout to keep heartbeat alive */ ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) ); }else { ocs_del_timer(&hw->watchdog_timer); } } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return; } /** * @brief Set configuration parameters for watchdog timer feature. * * @param hw Hardware context. * @param timeout Timeout for watchdog timer in seconds * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); + if (!buf) { + ocs_log_err(hw->os, "no buffer for command\n"); + return OCS_HW_RTN_NO_MEMORY; + } + sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout); rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL); if (rc) { ocs_free(hw->os, buf, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc); } return rc; } /** * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature. * * @param hw Hardware context. * @param buf Pointer to a mailbox buffer area. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; sli4_req_common_set_features_xfer_rdy_t10pi_t param; ocs_memset(¶m, 0, sizeof(param)); param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1); param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0); param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1); param.app_tag = hw->config.auto_xfer_rdy_app_tag_value; param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip; switch (hw->config.auto_xfer_rdy_p_type) { case 1: param.p_type = 0; break; case 3: param.p_type = 2; break; default: ocs_log_err(hw->os, "unsupported p_type %d\n", hw->config.auto_xfer_rdy_p_type); return OCS_HW_RTN_ERROR; } /* build the set_features command */ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI, sizeof(param), ¶m); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n", param.rtc, param.atv, param.p_type, param.app_tag, param.blk_size); } return rc; } /** * @brief enable sli port health check * * @param hw Hardware context. * @param buf Pointer to a mailbox buffer area. * @param query current status of the health check feature enabled/disabled * @param enable if 1: enable 0: disable * @param buf Pointer to a mailbox buffer area. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_health_check_t param; ocs_memset(¶m, 0, sizeof(param)); param.hck = enable; param.qry = query; /* build the set_features command */ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), ¶m); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_test(hw->os, "SLI Port Health Check is enabled \n"); } return rc; } /** * @brief Set FTD transfer hint feature * * @param hw Hardware context. * @param fdt_xfer_hint size in bytes where read requests are segmented. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_set_fdt_xfer_hint_t param; ocs_memset(¶m, 0, sizeof(param)); param.fdt_xfer_hint = fdt_xfer_hint; /* build the set_features command */ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), ¶m); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc); } else { ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint); } return rc; } /** * @brief Get the link configuration callback. * * @param hw Hardware context. * @param status Status from the DMTF CLP command. * @param result_len Length, in bytes, of the DMTF CLP result. * @param arg Pointer to a callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg) { int32_t rval; char retdata_str[64]; ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA; if (status) { ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status); } else { /* parse CLP response to get return data */ rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str, sizeof(retdata_str), cb_arg->dma_resp.virt, result_len); if (rval <= 0) { ocs_log_err(hw->os, "failed to get retdata %d\n", result_len); } else { /* translate string into hw enum */ linkcfg = ocs_hw_linkcfg_from_clp(retdata_str); } } /* invoke callback */ if (cb_arg->cb) { cb_arg->cb(status, linkcfg, cb_arg->arg); } /* if polling, will free memory in calling function */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_dma_free(hw->os, &cb_arg->dma_resp); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } /** * @brief Set the Lancer dump location * @par Description * This function tells a Lancer chip to use a specific DMA * buffer as a dump location rather than the internal flash. * * @param hw Hardware context. * @param num_buffers The number of DMA buffers to hold the dump (1..n). * @param dump_buffers DMA buffers to hold the dump. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ ocs_hw_rtn_e ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb) { uint8_t bus, dev, func; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; /* * Make sure the FW is new enough to support this command. If the FW * is too old, the FW will UE. */ if (hw->workaround.disable_dump_loc) { ocs_log_test(hw->os, "FW version is too old for this feature\n"); return OCS_HW_RTN_ERROR; } /* This command is only valid for physical port 0 */ ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); if (fdb == 0 && func != 0) { ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n", func); return OCS_HW_RTN_ERROR; } /* * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers, * We must allocate a SGL list and then pass the address of the list to the chip. */ if (num_buffers > 1) { uint32_t sge_size = num_buffers * sizeof(sli4_sge_t); sli4_sge_t *sge; uint32_t i; if (hw->dump_sges.size < sge_size) { ocs_dma_free(hw->os, &hw->dump_sges); if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "SGE DMA allocation failed\n"); return OCS_HW_RTN_NO_MEMORY; } } /* build the SGE list */ ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size); hw->dump_sges.len = sge_size; sge = hw->dump_sges.virt; for (i = 0; i < num_buffers; i++) { sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys); sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys); sge[i].last = (i == num_buffers - 1 ? 1 : 0); sge[i].buffer_length = dump_buffers[i].size; } rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, SLI4_BMBX_SIZE, FALSE, TRUE, &hw->dump_sges, fdb); } else { dump_buffers->len = dump_buffers->size; rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, SLI4_BMBX_SIZE, FALSE, FALSE, dump_buffers, fdb); } if (rc) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } } else { ocs_log_err(hw->os, "sli_cmd_common_set_dump_location failed\n"); rc = OCS_HW_RTN_ERROR; } return rc; } /** * @brief Set the Ethernet license. * * @par Description * This function sends the appropriate mailbox command (DMTF * CLP) to set the Ethernet license to the given license value. * Since it is used during the time of ocs_hw_init(), the mailbox * command is sent via polling (the BMBX route). * * @param hw Hardware context. * @param license 32-bit license value. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; ocs_dma_t dma_cmd; ocs_dma_t dma_resp; /* only for lancer right now */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license); /* allocate DMA for command */ if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); return OCS_HW_RTN_NO_MEMORY; } ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1); ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd)); /* allocate DMA for response */ if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_dma_free(hw->os, &dma_cmd); return OCS_HW_RTN_NO_MEMORY; } /* send DMTF CLP command mbx and poll */ if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) { ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt); rc = OCS_HW_RTN_ERROR; } ocs_dma_free(hw->os, &dma_cmd); ocs_dma_free(hw->os, &dma_resp); return rc; } /** * @brief Callback argument structure for the DMTF CLP commands. */ typedef struct ocs_hw_clp_cb_arg_s { ocs_hw_dmtf_clp_cb_t cb; ocs_dma_t *dma_resp; int32_t status; uint32_t opts; void *arg; } ocs_hw_clp_cb_arg_t; /** * @brief Execute the DMTF CLP command. * * @param hw Hardware context. * @param dma_cmd DMA buffer containing the CLP command. * @param dma_resp DMA buffer that will contain the response (if successful). * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL). * @param cb Callback function. * @param arg Callback argument. * * @return Returns the number of bytes written to the response * buffer on success, or a negative value if failed. */ static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_clp_cb_arg_t *cb_arg; uint8_t *mbxdata; /* allocate DMA for mailbox */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* allocate memory for callback argument */ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->dma_resp = dma_resp; cb_arg->opts = opts; /* Send the HW command */ if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE, dma_cmd, dma_resp)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg); if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) { /* if we're polling, copy response and invoke callback to * parse result */ ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg); /* set rc to resulting or "parsed" status */ rc = cb_arg->status; } /* if failed, or polling, free memory here */ if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "ocs_hw_command failed\n"); } ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } else { ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n"); rc = OCS_HW_RTN_ERROR; ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } return rc; } /** * @brief Called when the DMTF CLP command completes. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback argument. * * @return None. * */ static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { int32_t cb_status = 0; sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed; ocs_hw_clp_cb_arg_t *cb_arg = arg; uint32_t result_len = 0; int32_t stat_len; char stat_str[8]; /* there are several status codes here, check them all and condense * into a single callback status */ if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) { ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n", status, mbox_rsp->hdr.status, clp_rsp->hdr.status, clp_rsp->hdr.additional_status, clp_rsp->clp_status, clp_rsp->clp_detailed_status); if (status) { cb_status = status; } else if (mbox_rsp->hdr.status) { cb_status = mbox_rsp->hdr.status; } else { cb_status = clp_rsp->clp_status; } } else { result_len = clp_rsp->resp_length; } if (cb_status) { goto ocs_hw_cb_dmtf_clp_done; } if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) { ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n", cb_arg->dma_resp->size, result_len); cb_status = -1; goto ocs_hw_cb_dmtf_clp_done; } /* parse CLP response to get status */ stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str, sizeof(stat_str), cb_arg->dma_resp->virt, result_len); if (stat_len <= 0) { ocs_log_test(hw->os, "failed to get status %d\n", stat_len); cb_status = -1; goto ocs_hw_cb_dmtf_clp_done; } if (ocs_strcmp(stat_str, "0") != 0) { ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str); cb_status = -1; goto ocs_hw_cb_dmtf_clp_done; } ocs_hw_cb_dmtf_clp_done: /* save status in cb_arg for callers with NULL cb's + polling */ cb_arg->status = cb_status; if (cb_arg->cb) { cb_arg->cb(hw, cb_status, result_len, cb_arg->arg); } /* if polling, caller will free memory */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); } } /** * @brief Parse the CLP result and get the value corresponding to the given * keyword. * * @param hw Hardware context. * @param keyword CLP keyword for which the value is returned. * @param value Location to which the resulting value is copied. * @param value_len Length of the value parameter. * @param resp Pointer to the response buffer that is searched * for the keyword and value. * @param resp_len Length of response buffer passed in. * * @return Returns the number of bytes written to the value * buffer on success, or a negative vaue on failure. */ static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len) { char *start = NULL; char *end = NULL; /* look for specified keyword in string */ start = ocs_strstr(resp, keyword); if (start == NULL) { ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n", keyword); return -1; } /* now look for '=' and go one past */ start = ocs_strchr(start, '='); if (start == NULL) { ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n", keyword); return -1; } start++; /* \r\n terminates value */ end = ocs_strstr(start, "\r\n"); if (end == NULL) { ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n", keyword); return -1; } /* make sure given result array is big enough */ if ((end - start + 1) > value_len) { ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n", value_len, (end-start)); return -1; } ocs_strncpy(value, start, (end - start)); value[end-start] = '\0'; return (end-start+1); } /** * @brief Cause chip to enter an unrecoverable error state. * * @par Description * Cause chip to enter an unrecoverable error state. This is * used when detecting unexpected FW behavior so that the FW can be * hwted from the driver as soon as the error is detected. * * @param hw Hardware context. * @param dump Generate dump as part of reset. * * @return Returns 0 on success, or a non-zero value on failure. * */ ocs_hw_rtn_e ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (sli_raise_ue(&hw->sli, dump) != 0) { rc = OCS_HW_RTN_ERROR; } else { if (hw->state != OCS_HW_STATE_UNINITIALIZED) { hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; } } return rc; } /** * @brief Called when the OBJECT_GET command completes. * * @par Description * Get the number of bytes actually written out of the response, free the mailbox * that was malloc'd by ocs_hw_dump_get(), then call the callback * and pass the status and bytes read. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is void cb(int32_t status, uint32_t bytes_read). * * @return Returns 0. */ static int32_t ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed; ocs_hw_dump_get_cb_arg_t *cb_arg = arg; uint32_t bytes_read; uint8_t eof; bytes_read = rd_obj_rsp->actual_read_length; eof = rd_obj_rsp->eof; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, bytes_read, eof, cb_arg->arg); } ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); } return 0; } /** * @brief Read a dump image to the host. * * @par Description * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a * dump image chunk, then sends the command with the ocs_hw_command(). On completion, * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox * and signal the caller that the read has completed. * * @param hw Hardware context. * @param dma DMA structure to transfer the dump chunk into. * @param size Size of the dump chunk. * @param offset Offset, in bytes, from the beginning of the dump. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *mbxdata; ocs_hw_dump_get_cb_arg_t *cb_arg; uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } if (1 != sli_dump_is_present(&hw->sli)) { ocs_log_test(hw->os, "No dump is present\n"); return OCS_HW_RTN_ERROR; } if (1 == sli_reset_required(&hw->sli)) { ocs_log_test(hw->os, "device reset required\n"); return OCS_HW_RTN_ERROR; } mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->mbox_cmd = mbxdata; if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, size, offset, "/dbg/dump.bin", dma)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg); if (rc == 0 && opts == OCS_CMD_POLL) { ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg); } } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); } return rc; } /** * @brief Called when the OBJECT_DELETE command completes. * * @par Description * Free the mailbox that was malloc'd * by ocs_hw_dump_clear(), then call the callback and pass the status. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is void cb(int32_t status, void *arg). * * @return Returns 0. */ static int32_t ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_dump_clear_cb_arg_t *cb_arg = arg; sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, cb_arg->arg); } ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); } return 0; } /** * @brief Clear a dump image from the device. * * @par Description * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear * the dump, then sends the command with ocs_hw_command(). On completion, * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox * and to signal the caller that the write has completed. * * @param hw Hardware context. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *mbxdata; ocs_hw_dump_clear_cb_arg_t *cb_arg; uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->mbox_cmd = mbxdata; if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, "/dbg/dump.bin")) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg); if (rc == 0 && opts == OCS_CMD_POLL) { ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg); } } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); } return rc; } typedef struct ocs_hw_get_port_protocol_cb_arg_s { ocs_get_port_protocol_cb_t cb; void *arg; uint32_t pci_func; ocs_dma_t payload; } ocs_hw_get_port_protocol_cb_arg_t; /** * @brief Called for the completion of get_port_profile for a * user request. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = &(cb_arg->payload); sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; ocs_hw_port_protocol_e port_protocol; int num_descriptors; sli4_resource_descriptor_v1_t *desc_p; sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; int i; port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; num_descriptors = response->desc_count; desc_p = (sli4_resource_descriptor_v1_t *)response->desc; for (i=0; idescriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; if (pcie_desc_p->pf_number == cb_arg->pci_func) { switch(pcie_desc_p->pf_type) { case 0x02: port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; break; case 0x04: port_protocol = OCS_HW_PORT_PROTOCOL_FCOE; break; case 0x10: port_protocol = OCS_HW_PORT_PROTOCOL_FC; break; default: port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; break; } } } desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); } if (cb_arg->cb) { cb_arg->cb(status, port_protocol, cb_arg->arg); } ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @ingroup io * @brief Get the current port protocol. * @par Description * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param pci_func PCI function to query for current protocol. * @param cb Callback function to be called when the command completes. * @param ul_arg An argument that is passed to the callback function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ ocs_hw_rtn_e ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func, ocs_get_port_protocol_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_port_protocol_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; cb_arg->pci_func = pci_func; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); ocs_dma_free(hw->os, &cb_arg->payload); } return rc; } typedef struct ocs_hw_set_port_protocol_cb_arg_s { ocs_set_port_protocol_cb_t cb; void *arg; ocs_dma_t payload; uint32_t new_protocol; uint32_t pci_func; } ocs_hw_set_port_protocol_cb_arg_t; /** * @brief Called for the completion of set_port_profile for a * user request. * * @par Description * This is the second of two callbacks for the set_port_protocol * function. The set operation is a read-modify-write. This * callback is called when the write (SET_PROFILE_CONFIG) * completes. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return 0 on success, non-zero otherwise */ static int32_t ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; if (cb_arg->cb) { cb_arg->cb( status, cb_arg->arg); } ocs_dma_free(hw->os, &(cb_arg->payload)); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); return 0; } /** * @brief Called for the completion of set_port_profile for a * user request. * * @par Description * This is the first of two callbacks for the set_port_protocol * function. The set operation is a read-modify-write. This * callback is called when the read completes * (GET_PROFILE_CONFG). It will updated the resource * descriptors, then queue the write (SET_PROFILE_CONFIG). * * On entry there are three memory areas that were allocated by * ocs_hw_set_port_protocol. If a failure is detected in this * function those need to be freed. If this function succeeds * it allocates three more areas. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value otherwise. */ static int32_t ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = &(cb_arg->payload); sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; int num_descriptors; sli4_resource_descriptor_v1_t *desc_p; sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; int i; ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg; ocs_hw_port_protocol_e new_protocol; uint8_t *dst; sli4_isap_resouce_descriptor_v1_t *isap_desc_p; uint8_t *mbxdata; int pci_descriptor_count; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; int num_fcoe_ports = 0; int num_iscsi_ports = 0; new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol; num_descriptors = response->desc_count; /* Count PCI descriptors */ pci_descriptor_count = 0; desc_p = (sli4_resource_descriptor_v1_t *)response->desc; for (i=0; idescriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { ++pci_descriptor_count; } desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); if (new_cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } new_cb_arg->cb = cb_arg->cb; new_cb_arg->arg = cb_arg->arg; /* Allocate memory for the descriptors we're going to send. This is * one for each PCI descriptor plus one ISAP descriptor. */ if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) + (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) + sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &new_cb_arg->payload, 0, pci_descriptor_count+1, 1); /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */ dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc); /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor * that we're trying to change also set its pf_type. */ desc_p = (sli4_resource_descriptor_v1_t *)response->desc; for (i=0; idescriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; if (pcie_desc_p->pf_number == cb_arg->pci_func) { /* This is the PCIe descriptor for this OCS instance. * Update it with the new pf_type */ switch(new_protocol) { case OCS_HW_PORT_PROTOCOL_FC: pcie_desc_p->pf_type = SLI4_PROTOCOL_FC; break; case OCS_HW_PORT_PROTOCOL_FCOE: pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE; break; case OCS_HW_PORT_PROTOCOL_ISCSI: pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI; break; default: pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT; break; } } if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) { ++num_fcoe_ports; } if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) { ++num_iscsi_ports; } ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t)); dst += sizeof(sli4_pcie_resource_descriptor_v1_t); } desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); } /* Create an ISAP resource descriptor */ isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst; isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP; isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t); if (num_iscsi_ports > 0) { isap_desc_p->iscsi_tgt = 1; isap_desc_p->iscsi_ini = 1; isap_desc_p->iscsi_dif = 1; } if (num_fcoe_ports > 0) { isap_desc_p->fcoe_tgt = 1; isap_desc_p->fcoe_ini = 1; isap_desc_p->fcoe_dif = 1; } /* At this point we're done with the memory allocated by ocs_port_set_protocol */ ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg); if (rc) { ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n"); /* Call the upper level callback to report a failure */ if (new_cb_arg->cb) { new_cb_arg->cb( rc, new_cb_arg->arg); } /* Free the memory allocated by this function */ ocs_dma_free(hw->os, &new_cb_arg->payload); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); } return rc; } /** * @ingroup io * @brief Set the port protocol. * @par Description * Setting the port protocol is a read-modify-write operation. * This function submits a GET_PROFILE_CONFIG command to read * the current settings. The callback function will modify the * settings and issue the write. * * On successful completion this function will have allocated * two regular memory areas and one dma area which will need to * get freed later in the callbacks. * * @param hw Hardware context. * @param new_protocol New protocol to use. * @param pci_func PCI function to configure. * @param cb Callback function to be called when the command completes. * @param ul_arg An argument that is passed to the callback function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ ocs_hw_rtn_e ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol, uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg) { uint8_t *mbxdata; ocs_hw_set_port_protocol_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; cb_arg->new_protocol = new_protocol; cb_arg->pci_func = pci_func; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); ocs_dma_free(hw->os, &cb_arg->payload); } return rc; } typedef struct ocs_hw_get_profile_list_cb_arg_s { ocs_get_profile_list_cb_t cb; void *arg; ocs_dma_t payload; } ocs_hw_get_profile_list_cb_arg_t; /** * @brief Called for the completion of get_profile_list for a * user request. * @par Description * This function is called when the COMMMON_GET_PROFILE_LIST * mailbox completes. The response will be in * ctx->non_embedded_mem.virt. This function parses the * response and creates a ocs_hw_profile_list, then calls the * mgmt_cb callback function and passes that list to it. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_profile_list_t *list; ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = &(cb_arg->payload); sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt; int i; int num_descriptors; list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO); list->num_descriptors = response->profile_descriptor_count; num_descriptors = list->num_descriptors; if (num_descriptors > OCS_HW_MAX_PROFILES) { num_descriptors = OCS_HW_MAX_PROFILES; } for (i=0; idescriptors[i].profile_id = response->profile_descriptor[i].profile_id; list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index; ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description); } if (cb_arg->cb) { cb_arg->cb(status, list, cb_arg->arg); } else { ocs_free(hw->os, list, sizeof(*list)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); return 0; } /** * @ingroup io * @brief Get a list of available profiles. * @par Description * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ ocs_hw_rtn_e ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_profile_list_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); } return rc; } typedef struct ocs_hw_get_active_profile_cb_arg_s { ocs_get_active_profile_cb_t cb; void *arg; } ocs_hw_get_active_profile_cb_arg_t; /** * @brief Called for the completion of get_active_profile for a * user request. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg; sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed; uint32_t active_profile; active_profile = response->active_profile_id; if (cb_arg->cb) { cb_arg->cb(status, active_profile, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); return 0; } /** * @ingroup io * @brief Get the currently active profile. * @par Description * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_active_profile_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); } return rc; } typedef struct ocs_hw_get_nvparms_cb_arg_s { ocs_get_nvparms_cb_t cb; void *arg; } ocs_hw_get_nvparms_cb_arg_t; /** * @brief Called for the completion of get_nvparms for a * user request. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return 0 on success, non-zero otherwise */ static int32_t ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg; sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe; if (cb_arg->cb) { cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa, mbox_rsp->preferred_d_id, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); return 0; } /** * @ingroup io * @brief Read non-volatile parms. * @par Description * Issues a SLI-4 READ_NVPARMS mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_nvparms_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_NVPARMS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); } return rc; } typedef struct ocs_hw_set_nvparms_cb_arg_s { ocs_set_nvparms_cb_t cb; void *arg; } ocs_hw_set_nvparms_cb_arg_t; /** * @brief Called for the completion of set_nvparms for a * user request. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg; if (cb_arg->cb) { cb_arg->cb(status, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); return 0; } /** * @ingroup io * @brief Write non-volatile parms. * @par Description * Issues a SLI-4 WRITE_NVPARMS mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param wwpn Port's WWPN in big-endian order, or NULL to use default. * @param wwnn Port's WWNN in big-endian order, or NULL to use default. * @param hard_alpa A hard AL_PA address setting used during loop * initialization. If no hard AL_PA is required, set to 0. * @param preferred_d_id A preferred D_ID address setting * that may be overridden with the CONFIG_LINK mailbox command. * If there is no preference, set to 0. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg) { uint8_t *mbxdata; ocs_hw_set_nvparms_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "SET_NVPARMS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); } return rc; } /** * @brief Called to obtain the count for the specified type. * * @param hw Hardware context. * @param io_count_type IO count type (inuse, free, wait_free). * * @return Returns the number of IOs on the specified list type. */ uint32_t ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type) { ocs_hw_io_t *io = NULL; uint32_t count = 0; ocs_lock(&hw->io_lock); switch (io_count_type) { case OCS_HW_IO_INUSE_COUNT : ocs_list_foreach(&hw->io_inuse, io) { count++; } break; case OCS_HW_IO_FREE_COUNT : ocs_list_foreach(&hw->io_free, io) { count++; } break; case OCS_HW_IO_WAIT_FREE_COUNT : ocs_list_foreach(&hw->io_wait_free, io) { count++; } break; case OCS_HW_IO_PORT_OWNED_COUNT: ocs_list_foreach(&hw->io_port_owned, io) { count++; } break; case OCS_HW_IO_N_TOTAL_IO_COUNT : count = hw->config.n_io; break; } ocs_unlock(&hw->io_lock); return count; } /** * @brief Called to obtain the count of produced RQs. * * @param hw Hardware context. * * @return Returns the number of RQs produced. */ uint32_t ocs_hw_get_rqes_produced_count(ocs_hw_t *hw) { uint32_t count = 0; uint32_t i; uint32_t j; for (i = 0; i < hw->hw_rq_count; i++) { hw_rq_t *rq = hw->hw_rq[i]; if (rq->rq_tracker != NULL) { for (j = 0; j < rq->entry_count; j++) { if (rq->rq_tracker[j] != NULL) { count++; } } } } return count; } typedef struct ocs_hw_set_active_profile_cb_arg_s { ocs_set_active_profile_cb_t cb; void *arg; } ocs_hw_set_active_profile_cb_arg_t; /** * @brief Called for the completion of set_active_profile for a * user request. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg; if (cb_arg->cb) { cb_arg->cb(status, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); return 0; } /** * @ingroup io * @brief Set the currently active profile. * @par Description * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param profile_id Profile ID to activate. * @param cb Callback function to be called when the command completes. * @param ul_arg An argument that is passed to the callback function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg) { uint8_t *mbxdata; ocs_hw_set_active_profile_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t)); } return rc; } /* * Private functions */ /** * @brief Update the queue hash with the ID and index. * * @param hash Pointer to hash table. * @param id ID that was created. * @param index The index into the hash object. */ static void ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index) { uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1); /* * Since the hash is always bigger than the number of queues, then we * never have to worry about an infinite loop. */ while(hash[hash_index].in_use) { hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1); } /* not used, claim the entry */ hash[hash_index].id = id; hash[hash_index].in_use = 1; hash[hash_index].index = index; } /** * @brief Find index given queue ID. * * @param hash Pointer to hash table. * @param id ID to find. * * @return Returns the index into the HW cq array or -1 if not found. */ int32_t ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id) { int32_t rc = -1; int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1); /* * Since the hash is always bigger than the maximum number of Qs, then we * never have to worry about an infinite loop. We will always find an * unused entry. */ do { if (hash[index].in_use && hash[index].id == id) { rc = hash[index].index; } else { index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1); } } while(rc == -1 && hash[index].in_use); return rc; } static int32_t ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain) { int32_t rc = OCS_HW_RTN_ERROR; uint16_t fcfi = UINT16_MAX; if ((hw == NULL) || (domain == NULL)) { ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } fcfi = domain->fcf_indicator; if (fcfi < SLI4_MAX_FCFI) { uint16_t fcf_index = UINT16_MAX; ocs_log_debug(hw->os, "adding domain %p @ %#x\n", domain, fcfi); hw->domains[fcfi] = domain; /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { if (hw->first_domain_idx < 0) { hw->first_domain_idx = fcfi; } } fcf_index = domain->fcf; if (fcf_index < SLI4_MAX_FCF_INDEX) { ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n", fcf_index, fcfi); hw->fcf_index_fcfi[fcf_index] = fcfi; rc = OCS_HW_RTN_SUCCESS; } else { ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", fcf_index, SLI4_MAX_FCF_INDEX); hw->domains[fcfi] = NULL; } } else { ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", fcfi, SLI4_MAX_FCFI); } return rc; } static int32_t ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain) { int32_t rc = OCS_HW_RTN_ERROR; uint16_t fcfi = UINT16_MAX; if ((hw == NULL) || (domain == NULL)) { ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } fcfi = domain->fcf_indicator; if (fcfi < SLI4_MAX_FCFI) { uint16_t fcf_index = UINT16_MAX; ocs_log_debug(hw->os, "deleting domain %p @ %#x\n", domain, fcfi); if (domain != hw->domains[fcfi]) { ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n", domain, hw->domains[fcfi]); return OCS_HW_RTN_ERROR; } hw->domains[fcfi] = NULL; /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { if (hw->first_domain_idx == fcfi) { hw->first_domain_idx = -1; } } fcf_index = domain->fcf; if (fcf_index < SLI4_MAX_FCF_INDEX) { if (hw->fcf_index_fcfi[fcf_index] == fcfi) { hw->fcf_index_fcfi[fcf_index] = 0; rc = OCS_HW_RTN_SUCCESS; } else { ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n", hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index); } } else { ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", fcf_index, SLI4_MAX_FCF_INDEX); } } else { ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", fcfi, SLI4_MAX_FCFI); } return rc; } ocs_domain_t * ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi) { if (hw == NULL) { ocs_log_err(NULL, "bad parameter hw=%p\n", hw); return NULL; } if (fcfi < SLI4_MAX_FCFI) { return hw->domains[fcfi]; } else { ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", fcfi, SLI4_MAX_FCFI); return NULL; } } static ocs_domain_t * ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index) { if (hw == NULL) { ocs_log_err(NULL, "bad parameter hw=%p\n", hw); return NULL; } if (fcf_index < SLI4_MAX_FCF_INDEX) { return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]); } else { ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", fcf_index, SLI4_MAX_FCF_INDEX); return NULL; } } /** * @brief Quaratine an IO by taking a reference count and adding it to the * quarantine list. When the IO is popped from the list then the * count is released and the IO MAY be freed depending on whether * it is still referenced by the IO. * * @n @b Note: BZ 160124 - If this is a target write or an initiator read using * DIF, then we must add the XRI to a quarantine list until we receive * 4 more completions of this same type. * * @param hw Hardware context. * @param wq Pointer to the WQ associated with the IO object to quarantine. * @param io Pointer to the io object to quarantine. */ static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io) { ocs_quarantine_info_t *q_info = &wq->quarantine_info; uint32_t index; ocs_hw_io_t *free_io = NULL; /* return if the QX bit was clear */ if (!io->quarantine) { return; } /* increment the IO refcount to prevent it from being freed before the quarantine is over */ if (ocs_ref_get_unless_zero(&io->ref) == 0) { /* command no longer active */ ocs_log_debug(hw ? hw->os : NULL, "io not active xri=0x%x tag=0x%x\n", io->indicator, io->reqtag); return; } sli_queue_lock(wq->queue); index = q_info->quarantine_index; free_io = q_info->quarantine_ios[index]; q_info->quarantine_ios[index] = io; q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH; sli_queue_unlock(wq->queue); if (free_io != NULL) { ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */ } } /** * @brief Process entries on the given completion queue. * * @param hw Hardware context. * @param cq Pointer to the HW completion queue object. * * @return None. */ void ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq) { uint8_t cqe[sizeof(sli4_mcqe_t)]; uint16_t rid = UINT16_MAX; sli4_qentry_e ctype; /* completion type */ int32_t status; uint32_t n_processed = 0; time_t tstart; time_t telapsed; tstart = ocs_msectime(); while (!sli_queue_read(&hw->sli, cq->queue, cqe)) { status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); /* * The sign of status is significant. If status is: * == 0 : call completed correctly and the CQE indicated success * > 0 : call completed correctly and the CQE indicated an error * < 0 : call failed and no information is available about the CQE */ if (status < 0) { if (status == -2) { /* Notification that an entry was consumed, but not completed */ continue; } break; } switch (ctype) { case SLI_QENTRY_ASYNC: CPUTRACE("async"); sli_cqe_async(&hw->sli, cqe); break; case SLI_QENTRY_MQ: /* * Process MQ entry. Note there is no way to determine * the MQ_ID from the completion entry. */ CPUTRACE("mq"); ocs_hw_mq_process(hw, status, hw->mq); break; case SLI_QENTRY_OPT_WRITE_CMD: ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe); break; case SLI_QENTRY_OPT_WRITE_DATA: ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe); break; case SLI_QENTRY_WQ: CPUTRACE("wq"); ocs_hw_wq_process(hw, cq, cqe, status, rid); break; case SLI_QENTRY_WQ_RELEASE: { uint32_t wq_id = rid; - uint32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id); + int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id); + + if (unlikely(index < 0)) { + ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n", + index, rid); + break; + } + hw_wq_t *wq = hw->hw_wq[index]; /* Submit any HW IOs that are on the WQ pending list */ hw_wq_submit_pending(wq, wq->wqec_set_count); break; } case SLI_QENTRY_RQ: CPUTRACE("rq"); ocs_hw_rqpair_process_rq(hw, cq, cqe); break; case SLI_QENTRY_XABT: { CPUTRACE("xabt"); ocs_hw_xabt_process(hw, cq, cqe, rid); break; } default: ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid); break; } n_processed++; if (n_processed == cq->queue->proc_limit) { break; } if (cq->queue->n_posted >= (cq->queue->posted_limit)) { sli_queue_arm(&hw->sli, cq->queue, FALSE); } } sli_queue_arm(&hw->sli, cq->queue, TRUE); if (n_processed > cq->queue->max_num_processed) { cq->queue->max_num_processed = n_processed; } telapsed = ocs_msectime() - tstart; if (telapsed > cq->queue->max_process_time) { cq->queue->max_process_time = telapsed; } } /** * @brief Process WQ completion queue entries. * * @param hw Hardware context. * @param cq Pointer to the HW completion queue object. * @param cqe Pointer to WQ completion queue. * @param status Completion status. * @param rid Resource ID (IO tag). * * @return none */ void ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid) { hw_wq_callback_t *wqcb; ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id, ((cq->queue->index - 1) & (cq->queue->length - 1))); if(rid == OCS_HW_REQUE_XRI_REGTAG) { if(status) { ocs_log_err(hw->os, "reque xri failed, status = %d \n", status); } return; } wqcb = ocs_hw_reqtag_get_instance(hw, rid); if (wqcb == NULL) { ocs_log_err(hw->os, "invalid request tag: x%x\n", rid); return; } if (wqcb->callback == NULL) { ocs_log_err(hw->os, "wqcb callback is NULL\n"); return; } (*wqcb->callback)(wqcb->arg, cqe, status); } /** * @brief Process WQ completions for IO requests * * @param arg Generic callback argument * @param cqe Pointer to completion queue entry * @param status Completion status * * @par Description * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool. * * @return None. */ static void ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status) { ocs_hw_io_t *io = arg; ocs_hw_t *hw = io->hw; sli4_fc_wcqe_t *wcqe = (void *)cqe; uint32_t len = 0; uint32_t ext = 0; uint8_t out_of_order_axr_cmd = 0; uint8_t out_of_order_axr_data = 0; uint8_t lock_taken = 0; #if defined(OCS_DISC_SPIN_DELAY) uint32_t delay = 0; char prop_buf[32]; #endif /* * For the primary IO, this will also be used for the * response. So it is important to only set/clear this * flag on the first data phase of the IO because * subsequent phases will be done on the secondary XRI. */ if (io->quarantine && io->quarantine_first_phase) { io->quarantine = (wcqe->qx == 1); ocs_hw_io_quarantine(hw, io->wq, io); } io->quarantine_first_phase = FALSE; /* BZ 161832 - free secondary HW IO */ if (io->sec_hio != NULL && io->sec_hio->quarantine) { /* * If the quarantine flag is set on the * IO, then set it on the secondary IO * based on the quarantine XRI (QX) bit * sent by the FW. */ io->sec_hio->quarantine = (wcqe->qx == 1); /* use the primary io->wq because it is not set on the secondary IO. */ ocs_hw_io_quarantine(hw, io->wq, io->sec_hio); } ocs_hw_remove_io_timed_wqe(hw, io); /* clear xbusy flag if WCQE[XB] is clear */ if (io->xbusy && wcqe->xb == 0) { io->xbusy = FALSE; } /* get extended CQE status */ switch (io->type) { case OCS_HW_BLS_ACC: case OCS_HW_BLS_ACC_SID: break; case OCS_HW_ELS_REQ: sli_fc_els_did(&hw->sli, cqe, &ext); len = sli_fc_response_length(&hw->sli, cqe); break; case OCS_HW_ELS_RSP: case OCS_HW_ELS_RSP_SID: case OCS_HW_FC_CT_RSP: break; case OCS_HW_FC_CT: len = sli_fc_response_length(&hw->sli, cqe); break; case OCS_HW_IO_TARGET_WRITE: len = sli_fc_io_length(&hw->sli, cqe); #if defined(OCS_DISC_SPIN_DELAY) if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { delay = ocs_strtoul(prop_buf, 0, 0); ocs_udelay(delay); } #endif break; case OCS_HW_IO_TARGET_READ: len = sli_fc_io_length(&hw->sli, cqe); /* * if_type == 2 seems to return 0 "total length placed" on * FCP_TSEND64_WQE completions. If this appears to happen, * use the CTIO data transfer length instead. */ if (hw->workaround.retain_tsend_io_length && !len && !status) { len = io->length; } break; case OCS_HW_IO_TARGET_RSP: if(io->is_port_owned) { ocs_lock(&io->axr_lock); lock_taken = 1; if(io->axr_buf->call_axr_cmd) { out_of_order_axr_cmd = 1; } if(io->axr_buf->call_axr_data) { out_of_order_axr_data = 1; } } break; case OCS_HW_IO_INITIATOR_READ: len = sli_fc_io_length(&hw->sli, cqe); break; case OCS_HW_IO_INITIATOR_WRITE: len = sli_fc_io_length(&hw->sli, cqe); break; case OCS_HW_IO_INITIATOR_NODATA: break; case OCS_HW_IO_DNRX_REQUEUE: /* release the count for re-posting the buffer */ //ocs_hw_io_free(hw, io); break; default: ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n", io->type, io->indicator); break; } if (status) { ext = sli_fc_ext_status(&hw->sli, cqe); /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically * abort exchange if an error occurred and exchange is still busy. */ if (hw->config.i_only_aab && (ocs_hw_iotype_is_originator(io->type)) && (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) { ocs_hw_rtn_e rc; ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag); /* * Because the initiator will not issue another IO phase, then it is OK to to issue the * callback on the abort completion, but for consistency with the target, wait for the * XRI_ABORTED CQE to issue the IO callback. */ rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL); if (rc == OCS_HW_RTN_SUCCESS) { /* latch status to return after abort is complete */ io->status_saved = 1; io->saved_status = status; io->saved_ext = ext; io->saved_len = len; goto exit_ocs_hw_wq_process_io; } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { /* * Already being aborted by someone else (ABTS * perhaps). Just fall through and return original * error. */ ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", io->indicator, io->reqtag); } else { /* Failed to abort for some other reason, log error */ ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", io->indicator, io->reqtag, rc); } } /* * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW */ if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) { ocs_hw_rtn_e rc; ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag); /* * Because targets may send a response when the IO completes using the same XRI, we must * wait for the XRI_ABORTED CQE to issue the IO callback */ rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); if (rc == OCS_HW_RTN_SUCCESS) { /* latch status to return after abort is complete */ io->status_saved = 1; io->saved_status = status; io->saved_ext = ext; io->saved_len = len; goto exit_ocs_hw_wq_process_io; } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { /* * Already being aborted by someone else (ABTS * perhaps). Just fall through and return original * error. */ ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", io->indicator, io->reqtag); } else { /* Failed to abort for some other reason, log error */ ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", io->indicator, io->reqtag, rc); } } } /* BZ 161832 - free secondary HW IO */ if (io->sec_hio != NULL) { ocs_hw_io_free(hw, io->sec_hio); io->sec_hio = NULL; } if (io->done != NULL) { ocs_hw_done_t done = io->done; void *arg = io->arg; io->done = NULL; if (io->status_saved) { /* use latched status if exists */ status = io->saved_status; len = io->saved_len; ext = io->saved_ext; io->status_saved = 0; } /* Restore default SGL */ ocs_hw_io_restore_sgl(hw, io); done(io, io->rnode, len, status, ext, arg); } if(out_of_order_axr_cmd) { /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id); } }else { hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq); } if(out_of_order_axr_data) { /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = io->axr_buf->seq.header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id); } }else { hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq); } } } exit_ocs_hw_wq_process_io: if(lock_taken) { ocs_unlock(&io->axr_lock); } } /** * @brief Process WQ completions for abort requests. * * @param arg Generic callback argument. * @param cqe Pointer to completion queue entry. * @param status Completion status. * * @return None. */ static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status) { ocs_hw_io_t *io = arg; ocs_hw_t *hw = io->hw; uint32_t ext = 0; uint32_t len = 0; hw_wq_callback_t *wqcb; /* * For IOs that were aborted internally, we may need to issue the callback here depending * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then * issue the callback now. */ ext = sli_fc_ext_status(&hw->sli, cqe); if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT && ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done != NULL) { ocs_hw_done_t done = io->done; void *arg = io->arg; io->done = NULL; /* * Use latched status as this is always saved for an internal abort * * Note: We wont have both a done and abort_done function, so don't worry about * clobbering the len, status and ext fields. */ status = io->saved_status; len = io->saved_len; ext = io->saved_ext; io->status_saved = 0; done(io, io->rnode, len, status, ext, arg); } if (io->abort_done != NULL) { ocs_hw_done_t done = io->abort_done; void *arg = io->abort_arg; io->abort_done = NULL; done(io, io->rnode, len, status, ext, arg); } ocs_lock(&hw->io_abort_lock); /* clear abort bit to indicate abort is complete */ io->abort_in_progress = 0; ocs_unlock(&hw->io_abort_lock); /* Free the WQ callback */ ocs_hw_assert(io->abort_reqtag != UINT32_MAX); wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag); ocs_hw_reqtag_free(hw, wqcb); /* * Call ocs_hw_io_free() because this releases the WQ reservation as * well as doing the refcount put. Don't duplicate the code here. */ (void)ocs_hw_io_free(hw, io); } /** * @brief Process XABT completions * * @param hw Hardware context. * @param cq Pointer to the HW completion queue object. * @param cqe Pointer to WQ completion queue. * @param rid Resource ID (IO tag). * * * @return None. */ void ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid) { /* search IOs wait free list */ ocs_hw_io_t *io = NULL; io = ocs_hw_io_lookup(hw, rid); ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id, ((cq->queue->index - 1) & (cq->queue->length - 1))); if (io == NULL) { /* IO lookup failure should never happen */ ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid); return; } if (!io->xbusy) { ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); } else { /* mark IO as no longer busy */ io->xbusy = FALSE; } if (io->is_port_owned) { ocs_lock(&hw->io_lock); /* Take reference so that below callback will not free io before reque */ ocs_ref_get(&io->ref); ocs_unlock(&hw->io_lock); } /* For IOs that were aborted internally, we need to issue any pending callback here. */ if (io->done != NULL) { ocs_hw_done_t done = io->done; void *arg = io->arg; /* Use latched status as this is always saved for an internal abort */ int32_t status = io->saved_status; uint32_t len = io->saved_len; uint32_t ext = io->saved_ext; io->done = NULL; io->status_saved = 0; done(io, io->rnode, len, status, ext, arg); } /* Check to see if this is a port owned XRI */ if (io->is_port_owned) { ocs_lock(&hw->io_lock); ocs_hw_reque_xri(hw, io); ocs_unlock(&hw->io_lock); /* Not hanlding reque xri completion, free io */ ocs_hw_io_free(hw, io); return; } ocs_lock(&hw->io_lock); if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) { /* if on wait_free list, caller has already freed IO; * remove from wait_free list and add to free list. * if on in-use list, already marked as no longer busy; * just leave there and wait for caller to free. */ if (io->state == OCS_HW_IO_STATE_WAIT_FREE) { io->state = OCS_HW_IO_STATE_FREE; ocs_list_remove(&hw->io_wait_free, io); ocs_hw_io_free_move_correct_list(hw, io); } } ocs_unlock(&hw->io_lock); } /** * @brief Adjust the number of WQs and CQs within the HW. * * @par Description * Calculates the number of WQs and associated CQs needed in the HW based on * the number of IOs. Calculates the starting CQ index for each WQ, RQ and * MQ. * * @param hw Hardware context allocated by the caller. */ static void ocs_hw_adjust_wqs(ocs_hw_t *hw) { uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ); uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ]; uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ]; /* * possibly adjust the the size of the WQs so that the CQ is twice as * big as the WQ to allow for 2 completions per IO. This allows us to * handle multi-phase as well as aborts. */ if (max_cq_entries < max_wq_entries * 2) { max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2; } /* * Calculate the number of WQs to use base on the number of IOs. * * Note: We need to reserve room for aborts which must be sent down * the same WQ as the IO. So we allocate enough WQ space to * handle 2 times the number of IOs. Half of the space will be * used for normal IOs and the other hwf is reserved for aborts. */ hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries; /* * For performance reasons, it is best to use use a minimum of 4 WQs * for BE3 and Skyhawk. */ if (hw->config.n_wq < 4 && SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { hw->config.n_wq = 4; } /* * For dual-chute support, we need to have at least one WQ per chute. */ if (hw->config.n_wq < 2 && ocs_hw_get_num_chutes(hw) > 1) { hw->config.n_wq = 2; } /* make sure we haven't exceeded the max supported in the HW */ if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { hw->config.n_wq = OCS_HW_MAX_NUM_WQ; } /* make sure we haven't exceeded the chip maximum */ if (hw->config.n_wq > max_wq_num) { hw->config.n_wq = max_wq_num; } /* * Using Queue Topology string, we divide by number of chutes */ hw->config.n_wq /= ocs_hw_get_num_chutes(hw); } static int32_t ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size) { ocs_command_ctx_t *ctx = NULL; ocs_lock(&hw->cmd_lock); if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) { ocs_log_err(hw->os, "XXX no command context?!?\n"); ocs_unlock(&hw->cmd_lock); return -1; } hw->cmd_head_count--; /* Post any pending requests */ ocs_hw_cmd_submit_pending(hw); ocs_unlock(&hw->cmd_lock); if (ctx->cb) { if (ctx->buf) { ocs_memcpy(ctx->buf, mqe, size); } ctx->cb(hw, status, ctx->buf, ctx->arg); } ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t)); ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t)); return 0; } /** * @brief Process entries on the given mailbox queue. * * @param hw Hardware context. * @param status CQE status. * @param mq Pointer to the mailbox queue object. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq) { uint8_t mqe[SLI4_BMBX_SIZE]; if (!sli_queue_read(&hw->sli, mq, mqe)) { ocs_hw_command_process(hw, status, mqe, mq->size); } return 0; } /** * @brief Read a FCF table entry. * * @param hw Hardware context. * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first * read and the next_index field from the FCOE_READ_FCF_TABLE command * for subsequent reads. * * @return Returns 0 on success, or a non-zero value on failure. */ static ocs_hw_rtn_e ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index) { uint8_t *buf = NULL; int32_t rc = OCS_HW_RTN_ERROR; buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem, index)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); } return rc; } /** * @brief Callback function for the FCOE_READ_FCF_TABLE command. * * @par Description * Note that the caller has allocated: * - DMA memory to hold the table contents * - DMA memory structure * - Command/results buffer * . * Each of these must be freed here. * * @param hw Hardware context. * @param status Hardware status. * @param mqe Pointer to the mailbox command/results buffer. * @param arg Pointer to the DMA memory structure. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_dma_t *dma = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; if (status || hdr->status) { ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); } else if (dma->virt) { sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt; /* if FC or FCOE and FCF entry valid, process it */ if (read_fcf->fcf_entry.fc || (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) { if (hw->callback.domain != NULL) { ocs_domain_record_t drec = {0}; if (read_fcf->fcf_entry.fc) { /* * This is a pseudo FCF entry. Create a domain * record based on the read topology information */ drec.speed = hw->link.speed; drec.fc_id = hw->link.fc_id; drec.is_fc = TRUE; if (SLI_LINK_TOPO_LOOP == hw->link.topology) { drec.is_loop = TRUE; ocs_memcpy(drec.map.loop, hw->link.loop_map, sizeof(drec.map.loop)); } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) { drec.is_nport = TRUE; } } else { drec.index = read_fcf->fcf_entry.fcf_index; drec.priority = read_fcf->fcf_entry.fip_priority; /* copy address, wwn and vlan_bitmap */ ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address, sizeof(drec.address)); ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id, sizeof(drec.wwn)); ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap, sizeof(drec.map.vlan)); drec.is_ethernet = TRUE; drec.is_nport = TRUE; } hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_FOUND, &drec); } } else { /* if FCOE and FCF is not valid, ignore it */ ocs_log_test(hw->os, "ignore invalid FCF entry\n"); } if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) { ocs_hw_read_fcf(hw, read_fcf->next_index); } } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); //ocs_dma_free(hw->os, dma); //ocs_free(hw->os, dma, sizeof(ocs_dma_t)); return 0; } /** * @brief Callback function for the SLI link events. * * @par Description * This function allocates memory which must be freed in its callback. * * @param ctx Hardware context pointer (that is, ocs_hw_t *). * @param e Event structure pointer (that is, sli4_link_event_t *). * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_cb_link(void *ctx, void *e) { ocs_hw_t *hw = ctx; sli4_link_event_t *event = e; ocs_domain_t *d = NULL; uint32_t i = 0; int32_t rc = OCS_HW_RTN_ERROR; ocs_t *ocs = hw->os; ocs_hw_link_event_init(hw); switch (event->status) { case SLI_LINK_STATUS_UP: hw->link = *event; if (SLI_LINK_TOPO_NPORT == event->topology) { device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed); ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); } else if (SLI_LINK_TOPO_LOOP == event->topology) { uint8_t *buf = NULL; device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed); buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); break; } if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_TOPOLOGY failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); } } else { device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n", event->topology, event->speed); } break; case SLI_LINK_STATUS_DOWN: device_printf(ocs->dev, "Link Down\n"); hw->link.status = event->status; - for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) { + for (i = 0; i < SLI4_MAX_FCFI; i++) { + d = hw->domains[i]; if (d != NULL && hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d); } } break; default: ocs_log_test(hw->os, "unhandled link status %#x\n", event->status); break; } return 0; } static int32_t ocs_hw_cb_fip(void *ctx, void *e) { ocs_hw_t *hw = ctx; ocs_domain_t *domain = NULL; sli4_fip_event_t *event = e; + ocs_hw_assert(event); + ocs_hw_assert(hw); + /* Find the associated domain object */ if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) { ocs_domain_t *d = NULL; uint32_t i = 0; /* Clear VLINK is different from the other FIP events as it passes back * a VPI instead of a FCF index. Check all attached SLI ports for a * matching VPI */ - for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) { + for (i = 0; i < SLI4_MAX_FCFI; i++) { + d = hw->domains[i]; if (d != NULL) { ocs_sport_t *sport = NULL; ocs_list_foreach(&d->sport_list, sport) { if (sport->indicator == event->index) { domain = d; break; } } if (domain != NULL) { break; } } } } else { domain = ocs_hw_domain_get_indexed(hw, event->index); } switch (event->type) { case SLI4_FCOE_FIP_FCF_DISCOVERED: ocs_hw_read_fcf(hw, event->index); break; case SLI4_FCOE_FIP_FCF_DEAD: if (domain != NULL && hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); } break; case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: if (domain != NULL && hw->callback.domain != NULL) { /* * We will want to issue rediscover FCF when this domain is free'd in order * to invalidate the FCF table */ domain->req_rediscover_fcf = TRUE; hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); } break; case SLI4_FCOE_FIP_FCF_MODIFIED: if (domain != NULL && hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); } ocs_hw_read_fcf(hw, event->index); break; default: ocs_log_test(hw->os, "unsupported event %#x\n", event->type); } return 0; } static int32_t ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_remote_node_t *rnode = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_hw_remote_node_event_e evt = 0; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); rnode->attached = FALSE; ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); evt = OCS_HW_NODE_ATTACH_FAIL; } else { rnode->attached = TRUE; ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1); evt = OCS_HW_NODE_ATTACH_OK; } if (hw->callback.rnode != NULL) { hw->callback.rnode(hw->args.rnode, evt, rnode); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } static int32_t ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_remote_node_t *rnode = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; int32_t rc = 0; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); /* * In certain cases, a non-zero MQE status is OK (all must be true): * - node is attached * - if High Login Mode is enabled, node is part of a node group * - status is 0x1400 */ if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) || (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) { rc = -1; } } if (rc == 0) { rnode->node_group = FALSE; rnode->attached = FALSE; if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) { ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); } evt = OCS_HW_NODE_FREE_OK; } if (hw->callback.rnode != NULL) { hw->callback.rnode(hw->args.rnode, evt, rnode); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return rc; } static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; int32_t rc = 0; uint32_t i; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); } else { evt = OCS_HW_NODE_FREE_ALL_OK; } if (evt == OCS_HW_NODE_FREE_ALL_OK) { for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) { ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0); } if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) { ocs_log_test(hw->os, "FCOE_RPI free all failure\n"); rc = -1; } } if (hw->callback.rnode != NULL) { hw->callback.rnode(hw->args.rnode, evt, NULL); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return rc; } /** * @brief Initialize the pool of HW IO objects. * * @param hw Hardware context. * * @return Returns 0 on success, or a non-zero value on failure. */ static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *hw) { uint32_t i = 0; ocs_hw_io_t *io = NULL; uintptr_t xfer_virt = 0; uintptr_t xfer_phys = 0; uint32_t index; uint8_t new_alloc = TRUE; if (NULL == hw->io) { hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT); if (NULL == hw->io) { ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n", hw->config.n_io, sizeof(ocs_hw_io_t *)); return OCS_HW_RTN_NO_MEMORY; } for (i = 0; i < hw->config.n_io; i++) { hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t), OCS_M_ZERO | OCS_M_NOWAIT); if (hw->io[i] == NULL) { ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i); goto error; } } /* Create WQE buffs for IO */ hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size, OCS_M_ZERO | OCS_M_NOWAIT); if (NULL == hw->wqe_buffs) { ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t)); ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n", __func__, hw->config.n_io, hw->sli.config.wqe_size); return OCS_HW_RTN_NO_MEMORY; } } else { /* re-use existing IOs, including SGLs */ new_alloc = FALSE; } if (new_alloc) { if (ocs_dma_alloc(hw->os, &hw->xfer_rdy, sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io, 4/*XXX what does this need to be? */)) { ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n"); return OCS_HW_RTN_NO_MEMORY; } } xfer_virt = (uintptr_t)hw->xfer_rdy.virt; xfer_phys = hw->xfer_rdy.phys; for (i = 0; i < hw->config.n_io; i++) { hw_wq_callback_t *wqcb; io = hw->io[i]; /* initialize IO fields */ io->hw = hw; /* Assign a WQE buff */ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size]; /* Allocate the request tag for this IO */ wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io); if (wqcb == NULL) { ocs_log_err(hw->os, "can't allocate request tag\n"); return OCS_HW_RTN_NO_RESOURCES; } io->reqtag = wqcb->instance_index; /* Now for the fields that are initialized on each free */ ocs_hw_init_free_io(io); /* The XB flag isn't cleared on IO free, so initialize it to zero here */ io->xbusy = 0; if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) { ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i); return OCS_HW_RTN_NO_MEMORY; } if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) { ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i); ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t)); return OCS_HW_RTN_NO_MEMORY; } io->def_sgl_count = hw->config.n_sgl; io->sgl = &io->def_sgl; io->sgl_count = io->def_sgl_count; if (hw->xfer_rdy.size) { io->xfer_rdy.virt = (void *)xfer_virt; io->xfer_rdy.phys = xfer_phys; io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t); xfer_virt += sizeof(fcp_xfer_rdy_iu_t); xfer_phys += sizeof(fcp_xfer_rdy_iu_t); } } return OCS_HW_RTN_SUCCESS; error: for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); hw->io[i] = NULL; } return OCS_HW_RTN_NO_MEMORY; } static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *hw) { uint32_t i = 0, io_index = 0; uint32_t prereg = 0; ocs_hw_io_t *io = NULL; uint8_t cmd[SLI4_BMBX_SIZE]; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t nremaining; uint32_t n = 0; uint32_t sgls_per_request = 256; ocs_dma_t **sgls = NULL; ocs_dma_t reqbuf = { 0 }; prereg = sli_get_sgl_preregister(&hw->sli); if (prereg) { sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT); if (sgls == NULL) { ocs_log_err(hw->os, "ocs_malloc sgls failed\n"); return OCS_HW_RTN_NO_MEMORY; } rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT); if (rc) { ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n"); ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); return OCS_HW_RTN_NO_MEMORY; } } io = hw->io[io_index]; for (nremaining = hw->config.n_io; nremaining; nremaining -= n) { if (prereg) { /* Copy address of SGL's into local sgls[] array, break out if the xri * is not contiguous. */ for (n = 0; n < MIN(sgls_per_request, nremaining); n++) { /* Check that we have contiguous xri values */ if (n > 0) { if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) { break; } } sgls[n] = hw->io[io_index + n]->sgl; } if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd), io->indicator, n, sgls, NULL, &reqbuf)) { if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) { rc = OCS_HW_RTN_ERROR; ocs_log_err(hw->os, "SGL post failed\n"); break; } } } else { n = nremaining; } /* Add to tail if successful */ for (i = 0; i < n; i ++) { io->is_port_owned = 0; io->state = OCS_HW_IO_STATE_FREE; ocs_list_add_tail(&hw->io_free, io); io = hw->io[io_index+1]; io_index++; } } if (prereg) { ocs_dma_free(hw->os, &reqbuf); ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); } return rc; } static int32_t ocs_hw_flush(ocs_hw_t *hw) { uint32_t i = 0; /* Process any remaining completions */ for (i = 0; i < hw->eq_count; i++) { ocs_hw_process(hw, i, ~0); } return 0; } static int32_t ocs_hw_command_cancel(ocs_hw_t *hw) { ocs_lock(&hw->cmd_lock); /* * Manually clean up remaining commands. Note: since this calls * ocs_hw_command_process(), we'll also process the cmd_pending * list, so no need to manually clean that out. */ while (!ocs_list_empty(&hw->cmd_head)) { uint8_t mqe[SLI4_BMBX_SIZE] = { 0 }; ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head); ocs_log_test(hw->os, "hung command %08x\n", NULL == ctx ? UINT32_MAX : (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf))); ocs_unlock(&hw->cmd_lock); ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE); ocs_lock(&hw->cmd_lock); } ocs_unlock(&hw->cmd_lock); return 0; } /** * @brief Find IO given indicator (xri). * * @param hw Hal context. * @param indicator Indicator (xri) to look for. * * @return Returns io if found, NULL otherwise. */ ocs_hw_io_t * ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri) { uint32_t ioindex; ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0]; return hw->io[ioindex]; } /** * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists. * * @param hw Hal context. * @param io Pointer to the IO to cleanup. */ static void ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io) { ocs_hw_done_t done = io->done; ocs_hw_done_t abort_done = io->abort_done; /* first check active_wqe list and remove if there */ if (ocs_list_on_list(&io->wqe_link)) { ocs_list_remove(&hw->io_timed_wqe, io); } /* Remove from WQ pending list */ if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) { ocs_list_remove(&io->wq->pending_list, io); } if (io->done) { void *arg = io->arg; io->done = NULL; ocs_unlock(&hw->io_lock); done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg); ocs_lock(&hw->io_lock); } if (io->abort_done != NULL) { void *abort_arg = io->abort_arg; io->abort_done = NULL; ocs_unlock(&hw->io_lock); abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg); ocs_lock(&hw->io_lock); } } static int32_t ocs_hw_io_cancel(ocs_hw_t *hw) { ocs_hw_io_t *io = NULL; ocs_hw_io_t *tmp_io = NULL; uint32_t iters = 100; /* One second limit */ /* * Manually clean up outstanding IO. * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called. */ ocs_lock(&hw->io_lock); ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) { ocs_hw_done_t done = io->done; ocs_hw_done_t abort_done = io->abort_done; ocs_hw_io_cancel_cleanup(hw, io); /* * Since this is called in a reset/shutdown * case, If there is no callback, then just * free the IO. * * Note: A port owned XRI cannot be on * the in use list. We cannot call * ocs_hw_io_free() because we already * hold the io_lock. */ if (done == NULL && abort_done == NULL) { /* * Since this is called in a reset/shutdown * case, If there is no callback, then just * free the IO. */ ocs_hw_io_free_common(hw, io); ocs_list_remove(&hw->io_inuse, io); ocs_hw_io_free_move_correct_list(hw, io); } } /* * For port owned XRIs, they are not on the in use list, so * walk though XRIs and issue any callbacks. */ ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) { /* check list and remove if there */ if (ocs_list_on_list(&io->dnrx_link)) { ocs_list_remove(&hw->io_port_dnrx, io); ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ } ocs_hw_io_cancel_cleanup(hw, io); ocs_list_remove(&hw->io_port_owned, io); ocs_hw_io_free_common(hw, io); } ocs_unlock(&hw->io_lock); /* Give time for the callbacks to complete */ do { ocs_udelay(10000); iters--; } while (!ocs_list_empty(&hw->io_inuse) && iters); /* Leave a breadcrumb that cleanup is not yet complete. */ if (!ocs_list_empty(&hw->io_inuse)) { ocs_log_test(hw->os, "io_inuse list is not empty\n"); } return 0; } static int32_t ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size, ocs_dma_t *rsp) { sli4_sge_t *data = NULL; if (!hw || !io) { ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io); return OCS_HW_RTN_ERROR; } data = io->def_sgl.virt; /* setup command pointer */ data->buffer_address_high = ocs_addr32_hi(cmnd->phys); data->buffer_address_low = ocs_addr32_lo(cmnd->phys); data->buffer_length = cmnd_size; data++; /* setup response pointer */ data->buffer_address_high = ocs_addr32_hi(rsp->phys); data->buffer_address_low = ocs_addr32_lo(rsp->phys); data->buffer_length = rsp->size; return 0; } static int32_t __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe; if (status || read_topo->hdr.status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, read_topo->hdr.status); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return -1; } switch (read_topo->attention_type) { case SLI4_READ_TOPOLOGY_LINK_UP: hw->link.status = SLI_LINK_STATUS_UP; break; case SLI4_READ_TOPOLOGY_LINK_DOWN: hw->link.status = SLI_LINK_STATUS_DOWN; break; case SLI4_READ_TOPOLOGY_LINK_NO_ALPA: hw->link.status = SLI_LINK_STATUS_NO_ALPA; break; default: hw->link.status = SLI_LINK_STATUS_MAX; break; } switch (read_topo->topology) { case SLI4_READ_TOPOLOGY_NPORT: hw->link.topology = SLI_LINK_TOPO_NPORT; break; case SLI4_READ_TOPOLOGY_FC_AL: hw->link.topology = SLI_LINK_TOPO_LOOP; if (SLI_LINK_STATUS_UP == hw->link.status) { hw->link.loop_map = hw->loop_map.virt; } hw->link.fc_id = read_topo->acquired_al_pa; break; default: hw->link.topology = SLI_LINK_TOPO_MAX; break; } hw->link.medium = SLI_LINK_MEDIUM_FC; switch (read_topo->link_current.link_speed) { case SLI4_READ_TOPOLOGY_SPEED_1G: hw->link.speed = 1 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_2G: hw->link.speed = 2 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_4G: hw->link.speed = 4 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_8G: hw->link.speed = 8 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_16G: hw->link.speed = 16 * 1000; hw->link.loop_map = NULL; break; case SLI4_READ_TOPOLOGY_SPEED_32G: hw->link.speed = 32 * 1000; hw->link.loop_map = NULL; break; } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); return 0; } static int32_t __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_EXIT: /* ignore */ break; case OCS_EVT_HW_PORT_REQ_FREE: case OCS_EVT_HW_PORT_REQ_ATTACH: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } /* fall through */ default: ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); break; } return 0; } static void * __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_FREE_FAIL, sport); } break; default: break; } return NULL; } static void * __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) { ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id); } /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_FREE_OK, sport); } break; default: break; } return NULL; } static void * __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ATTACH_FAIL, sport); } if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; uint8_t *cmd = NULL; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* allocate memory and send unreg_vpi */ cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!cmd) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator, SLI4_UNREG_TYPE_PORT)) { ocs_log_err(hw->os, "UNREG_VPI format failure\n"); ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "UNREG_VPI command failure\n"); ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_freed, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* Forward to execute in mailbox completion processing context */ if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) { ocs_log_err(hw->os, "ocs_hw_async_call failed\n"); } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_freed, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); break; default: break; } return NULL; } static void * __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ATTACH_OK, sport); } if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; case OCS_EVT_HW_PORT_REQ_FREE: /* virtual/physical port request free */ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) { ocs_log_err(hw->os, "REG_VPI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "REG_VPI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_attached, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ALLOC_OK, sport); } /* If there is a pending free request, then handle it now */ if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; case OCS_EVT_HW_PORT_REQ_ATTACH: /* virtual port requests attach */ ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data); break; case OCS_EVT_HW_PORT_ATTACH_OK: /* physical port attached (as part of attaching domain) */ ocs_sm_transition(ctx, __ocs_hw_port_attached, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* virtual port request free */ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } else { /* * Note: BE3/Skyhawk will respond with a status of 0x20 * unless the reg_vpi has been issued, so we can * skip the unreg_vpi for these adapters. * * Send a nop to make sure that free doesn't occur in * same context */ ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ALLOC_FAIL, sport); } /* If there is a pending free request, then handle it now */ if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; uint8_t *payload = NULL; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* allocate memory for the service parameters */ if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) { ocs_log_err(hw->os, "Failed to allocate DMA memory\n"); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, &sport->dma, sport->indicator)) { ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n"); ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } break; case OCS_EVT_RESPONSE: payload = sport->dma.virt; ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload); ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(sport->sli_wwpn)); ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(sport->sli_wwnn)); ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data); break; case OCS_EVT_ERROR: ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; case OCS_EVT_EXIT: break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* no-op */ break; case OCS_EVT_HW_PORT_ALLOC_OK: ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL); break; case OCS_EVT_HW_PORT_ALLOC_FAIL: ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* If there is a pending free request, then handle it now */ if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL); return NULL; } /* TODO XXX transitioning to done only works if this is called * directly from ocs_hw_port_alloc BUT not if called from * read_sparm64. In the later case, we actually want to go * through report_ok/fail */ if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport->indicator, sport->domain->indicator)) { ocs_log_err(hw->os, "INIT_VPI allocation failure\n"); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "INIT_VPI command failure\n"); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_allocated, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; case OCS_EVT_EXIT: break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static int32_t __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_sli_port_t *sport = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_sm_event_t evt; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", sport->indicator, status, hdr->status); evt = OCS_EVT_ERROR; } else { evt = OCS_EVT_RESPONSE; } ocs_sm_post_event(&sport->ctx, evt, mqe); return 0; } static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_sli_port_t *sport = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_sm_event_t evt; uint8_t *mqecpy; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", sport->indicator, status, hdr->status); evt = OCS_EVT_ERROR; } else { evt = OCS_EVT_RESPONSE; } /* * In this case we have to malloc a mailbox command buffer, as it is reused * in the state machine post event call, and eventually freed */ mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mqecpy == NULL) { ocs_log_err(hw->os, "malloc mqecpy failed\n"); return -1; } ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE); ocs_sm_post_event(&sport->ctx, evt, mqecpy); return 0; } /*************************************************************************** * Domain state machine */ static int32_t __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_EXIT: /* ignore */ break; default: ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); break; } return 0; } static void * __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* free command buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } /* free SLI resources */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); /* TODO how to free FCFI (or do we at all)? */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ALLOC_FAIL, domain); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* free mailbox buffer and send alloc ok to physical sport */ ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL); /* now inform registered callbacks */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ATTACH_OK, domain); } break; case OCS_EVT_HW_DOMAIN_REQ_FREE: ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } /* free SLI resources */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); /* TODO how to free FCFI (or do we at all)? */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ATTACH_FAIL, domain); } break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt); if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) { ocs_log_err(hw->os, "REG_VFI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "REG_VFI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_domain_attached, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* free mailbox buffer and send alloc ok to physical sport */ ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL); ocs_hw_domain_add(hw, domain); /* now inform registered callbacks */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ALLOC_OK, domain); } break; case OCS_EVT_HW_DOMAIN_REQ_ATTACH: ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data); break; case OCS_EVT_HW_DOMAIN_REQ_FREE: /* unreg_fcfi/vfi */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL); } else { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) { ocs_log_err(hw->os, "READ_SPARM64 format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_EXIT: break; case OCS_EVT_RESPONSE: ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt); ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_sli_port_t *sport = domain->sport; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator, domain->fcf_indicator, sport->indicator)) { ocs_log_err(hw->os, "INIT_VFI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "INIT_VFI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_EXIT: break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: { sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; uint32_t i; /* Set the filter match/mask values from hw's filter_def values */ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { rq_cfg[i].rq_id = 0xffff; rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); } /* Set the rq_id for each, in order of RQ definition */ for (i = 0; i < hw->hw_rq_count; i++) { if (i >= ARRAY_SIZE(rq_cfg)) { ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n"); break; } rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id; } if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (hw->hw_mrq_count) { if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, domain->vlan_id, domain->fcf)) { ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } else { if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf, rq_cfg, domain->vlan_id)) { ocs_log_err(hw->os, "REG_FCFI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "REG_FCFI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; } case OCS_EVT_EXIT: break; case OCS_EVT_RESPONSE: if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi; /* * IF_TYPE 0 devices do not support explicit VFI and VPI initialization * and instead rely on implicit initialization during VFI registration. * Short circuit normal processing here for those devices. */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data); } break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { /* * For FC, the HW alread registered a FCFI * Copy FCF information into the domain and jump to INIT_VFI */ domain->fcf_indicator = hw->fcf_indicator; ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data); } else { ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (domain != NULL) { ocs_hw_t *hw = domain->hw; ocs_hw_domain_del(hw, domain); if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_FREE_FAIL, domain); } } /* free command buffer */ if (data != NULL) { ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE); } break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* Free DMA and mailbox buffer */ if (domain != NULL) { ocs_hw_t *hw = domain->hw; /* free VFI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); ocs_hw_domain_del(hw, domain); /* inform registered callbacks */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_FREE_OK, domain); } } if (data != NULL) { ocs_free(NULL, data, SLI4_BMBX_SIZE); } break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* if we're in the middle of a teardown, skip sending rediscover */ if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) { ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); break; } if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) { ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); } break; case OCS_EVT_RESPONSE: case OCS_EVT_ERROR: /* REDISCOVER_FCF can fail if none exist */ ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (data == NULL) { data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) { ocs_log_err(hw->os, "UNREG_FCFI format failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "UNREG_FCFI command failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: if (domain->req_rediscover_fcf) { domain->req_rediscover_fcf = FALSE; ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); } break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; uint8_t is_fc = FALSE; smtrace("domain"); is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC); switch (evt) { case OCS_EVT_ENTER: if (data == NULL) { data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain, SLI4_UNREG_TYPE_DOMAIN)) { ocs_log_err(hw->os, "UNREG_VFI format failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "UNREG_VFI command failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_ERROR: if (is_fc) { ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); } break; case OCS_EVT_RESPONSE: if (is_fc) { ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } /* callback for domain alloc/attach/free */ static int32_t __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_domain_t *domain = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_sm_event_t evt; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n", domain->indicator, status, hdr->status); evt = OCS_EVT_ERROR; } else { evt = OCS_EVT_RESPONSE; } ocs_sm_post_event(&domain->sm, evt, mqe); return 0; } static int32_t target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_io_t *io = NULL; ocs_hw_io_t *io_next = NULL; uint64_t ticks_current = ocs_get_os_ticks(); uint32_t sec_elapsed; + ocs_hw_rtn_e rc; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n", status, hdr->status); /* go ahead and proceed with wqe timer checks... */ } /* loop through active WQE list and check for timeouts */ ocs_lock(&hw->io_lock); - ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) { - sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq()); + ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) { + sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq()); - /* - * If elapsed time > timeout, abort it. No need to check type since - * it wouldn't be on this list unless it was a target WQE - */ - if (sec_elapsed > io->tgt_wqe_timeout) { - ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n", - io->indicator, io->reqtag, io->type); + /* + * If elapsed time > timeout, abort it. No need to check type since + * it wouldn't be on this list unless it was a target WQE + */ + if (sec_elapsed > io->tgt_wqe_timeout) { + ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n", + io->indicator, io->reqtag, io->type); - /* remove from active_wqe list so won't try to abort again */ - ocs_list_remove(&hw->io_timed_wqe, io); + /* remove from active_wqe list so won't try to abort again */ + ocs_list_remove(&hw->io_timed_wqe, io); - /* save status of "timed out" for when abort completes */ - io->status_saved = 1; - io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT; - io->saved_ext = 0; - io->saved_len = 0; + /* save status of "timed out" for when abort completes */ + io->status_saved = 1; + io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT; + io->saved_ext = 0; + io->saved_len = 0; - /* now abort outstanding IO */ - ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); + /* now abort outstanding IO */ + rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); + if (rc) { + ocs_log_test(hw->os, + "abort failed xri=%#x tag=%#x rc=%d\n", + io->indicator, io->reqtag, rc); } - /* - * need to go through entire list since each IO could have a - * different timeout value - */ } + /* + * need to go through entire list since each IO could have a + * different timeout value + */ + } ocs_unlock(&hw->io_lock); /* if we're not in the middle of shutting down, schedule next timer */ if (!hw->active_wqe_timer_shutdown) { ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS); } hw->in_active_wqe_timer = FALSE; return 0; } static void target_wqe_timer_cb(void *arg) { ocs_hw_t *hw = (ocs_hw_t *)arg; /* delete existing timer; will kick off new timer after checking wqe timeouts */ hw->in_active_wqe_timer = TRUE; ocs_del_timer(&hw->wqe_timer); /* Forward timer callback to execute in the mailbox completion processing context */ if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) { ocs_log_test(hw->os, "ocs_hw_async_call failed\n"); } } static void shutdown_target_wqe_timer(ocs_hw_t *hw) { uint32_t iters = 100; if (hw->config.emulate_tgt_wqe_timeout) { /* request active wqe timer shutdown, then wait for it to complete */ hw->active_wqe_timer_shutdown = TRUE; /* delete WQE timer and wait for timer handler to complete (if necessary) */ ocs_del_timer(&hw->wqe_timer); /* now wait for timer handler to complete (if necessary) */ while (hw->in_active_wqe_timer && iters) { /* * if we happen to have just sent NOP mailbox command, make sure * completions are being processed */ ocs_hw_flush(hw); iters--; } if (iters == 0) { ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n"); } } } /** * @brief Determine if HW IO is owned by the port. * * @par Description * Determines if the given HW IO has been posted to the chip. * * @param hw Hardware context allocated by the caller. * @param io HW IO. * * @return Returns TRUE if given HW IO is port-owned. */ uint8_t ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) { /* Check to see if this is a port owned XRI */ return io->is_port_owned; } /** * @brief Return TRUE if exchange is port-owned. * * @par Description * Test to see if the xri is a port-owned xri. * * @param hw Hardware context. * @param xri Exchange indicator. * * @return Returns TRUE if XRI is a port owned XRI. */ uint8_t ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri) { ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri); return (io == NULL ? FALSE : io->is_port_owned); } /** * @brief Returns an XRI from the port owned list to the host. * * @par Description * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes. * * @param hw Hardware context. * @param xri_base The starting XRI number. * @param xri_count The number of XRIs to free from the base. */ static void ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count) { ocs_hw_io_t *io; uint32_t i; for (i = 0; i < xri_count; i++) { io = ocs_hw_io_lookup(hw, xri_base + i); /* * if this is an auto xfer rdy XRI, then we need to release any * buffer attached to the XRI before moving the XRI back to the free pool. */ if (hw->auto_xfer_rdy_enabled) { ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io); } ocs_lock(&hw->io_lock); ocs_list_remove(&hw->io_port_owned, io); io->is_port_owned = 0; ocs_list_add_tail(&hw->io_free, io); ocs_unlock(&hw->io_lock); } } /** * @brief Called when the POST_XRI command completes. * * @par Description * Free the mailbox command buffer and reclaim the XRIs on failure. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe; /* Reclaim the XRIs as host owned if the command fails */ if (status != 0) { ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n", status, post_xri->xri_base, post_xri->xri_count); ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port. * * @param hw Hardware context. * @param xri_start The starting XRI to post. * @param num_to_post The number of XRIs to post. * * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS. */ static ocs_hw_rtn_e ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post) { uint8_t *post_xri; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* Since we need to allocate for mailbox queue, just always allocate */ post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (post_xri == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } /* Register the XRIs */ if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE, xri_start, num_to_post)) { rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "post_xri failed\n"); } } return rc; } /** * @brief Move XRIs from the host-controlled pool to the port. * * @par Description * Removes IOs from the free list and moves them to the port. * * @param hw Hardware context. * @param num_xri The number of XRIs being requested to move to the chip. * * @return Returns the number of XRIs that were moved. */ uint32_t ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri) { ocs_hw_io_t *io; uint32_t i; uint32_t num_posted = 0; /* * Note: We cannot use ocs_hw_io_alloc() because that would place the * IO on the io_inuse list. We need to move from the io_free to * the io_port_owned list. */ ocs_lock(&hw->io_lock); for (i = 0; i < num_xri; i++) { if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { ocs_hw_rtn_e rc; /* * if this is an auto xfer rdy XRI, then we need to attach a * buffer to the XRI before submitting it to the chip. If a * buffer is unavailable, then we cannot post it, so return it * to the free pool. */ if (hw->auto_xfer_rdy_enabled) { /* Note: uses the IO lock to get the auto xfer rdy buffer */ ocs_unlock(&hw->io_lock); rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io); ocs_lock(&hw->io_lock); if (rc != OCS_HW_RTN_SUCCESS) { ocs_list_add_head(&hw->io_free, io); break; } } ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator); io->is_port_owned = 1; ocs_list_add_tail(&hw->io_port_owned, io); /* Post XRI */ if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) { ocs_hw_reclaim_xri(hw, io->indicator, i); break; } num_posted++; } else { /* no more free XRIs */ break; } } ocs_unlock(&hw->io_lock); return num_posted; } /** * @brief Called when the RELEASE_XRI command completes. * * @par Description * Move the IOs back to the free pool on success. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe; uint8_t i; /* Reclaim the XRIs as host owned if the command fails */ if (status != 0) { ocs_log_err(hw->os, "Status 0x%x\n", status); } else { for (i = 0; i < release_xri->released_xri_count; i++) { uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 : release_xri->xri_tbl[i/2].xri_tag1); ocs_hw_reclaim_xri(hw, xri, 1); } } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Move XRIs from the port-controlled pool to the host. * * Requests XRIs from the FW to return to the host-owned pool. * * @param hw Hardware context. * @param num_xri The number of XRIs being requested to moved from the chip. * * @return Returns 0 for success, or a negative error code value for failure. */ ocs_hw_rtn_e ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri) { uint8_t *release_xri; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* non-local buffer required for mailbox queue */ release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (release_xri == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } /* release the XRIs */ if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) { rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "release_xri failed\n"); } } /* If we are polling or an error occurred, then free the mailbox buffer */ if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE); } return rc; } /** * @brief Allocate an ocs_hw_rx_buffer_t array. * * @par Description * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory. * * @param hw Pointer to HW object. * @param rqindex RQ index for this buffer. * @param count Count of buffers in array. * @param size Size of buffer. * * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array. */ static ocs_hw_rq_buffer_t * ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size) { ocs_t *ocs = hw->os; ocs_hw_rq_buffer_t *rq_buf = NULL; ocs_hw_rq_buffer_t *prq; uint32_t i; if (count != 0) { rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO); if (rq_buf == NULL) { ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n"); return NULL; } for (i = 0, prq = rq_buf; i < count; i ++, prq++) { prq->rqindex = rqindex; if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "DMA allocation failed\n"); ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); rq_buf = NULL; break; } } } return rq_buf; } /** * @brief Free an ocs_hw_rx_buffer_t array. * * @par Description * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory. * * @param hw Pointer to HW object. * @param rq_buf Pointer to ocs_hw_rx_buffer_t array. * @param count Count of buffers in array. * * @return None. */ static void ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count) { ocs_t *ocs = hw->os; uint32_t i; ocs_hw_rq_buffer_t *prq; if (rq_buf != NULL) { for (i = 0, prq = rq_buf; i < count; i++, prq++) { ocs_dma_free(ocs, &prq->dma); } ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); } } /** * @brief Allocate the RQ data buffers. * * @param hw Pointer to HW object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rx_allocate(ocs_hw_t *hw) { ocs_t *ocs = hw->os; uint32_t i; int32_t rc = OCS_HW_RTN_SUCCESS; uint32_t rqindex = 0; hw_rq_t *rq; uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR; uint32_t payload_size = hw->config.rq_default_buffer_size; rqindex = 0; for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; /* Allocate header buffers */ rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size); if (rq->hdr_buf == NULL) { ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n"); rc = OCS_HW_RTN_ERROR; break; } ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id, rq->entry_count, hdr_size); rqindex++; /* Allocate payload buffers */ rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size); if (rq->payload_buf == NULL) { ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n"); rc = OCS_HW_RTN_ERROR; break; } ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id, rq->entry_count, payload_size); rqindex++; } return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS; } /** * @brief Post the RQ data buffers to the chip. * * @param hw Pointer to HW object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rx_post(ocs_hw_t *hw) { uint32_t i; uint32_t idx; uint32_t rq_idx; int32_t rc = 0; /* * In RQ pair mode, we MUST post the header and payload buffer at the * same time. */ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { hw_rq_t *rq = hw->hw_rq[rq_idx]; for (i = 0; i < rq->entry_count-1; i++) { ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++); ocs_hw_assert(seq != NULL); seq->header = &rq->hdr_buf[i]; seq->payload = &rq->payload_buf[i]; rc = ocs_hw_sequence_free(hw, seq); if (rc) { break; } } if (rc) { break; } } return rc; } /** * @brief Free the RQ data buffers. * * @param hw Pointer to HW object. * */ void ocs_hw_rx_free(ocs_hw_t *hw) { hw_rq_t *rq; uint32_t i; /* Free hw_rq buffers */ for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; if (rq != NULL) { ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count); rq->hdr_buf = NULL; ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count); rq->payload_buf = NULL; } } } /** * @brief HW async call context structure. */ typedef struct { ocs_hw_async_cb_t callback; void *arg; uint8_t cmd[SLI4_BMBX_SIZE]; } ocs_hw_async_call_ctx_t; /** * @brief HW async callback handler * * @par Description * This function is called when the NOP mailbox command completes. The callback stored * in the requesting context is invoked. * * @param hw Pointer to HW object. * @param status Completion status. * @param mqe Pointer to mailbox completion queue entry. * @param arg Caller-provided argument. * * @return None. */ static void ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_async_call_ctx_t *ctx = arg; if (ctx != NULL) { if (ctx->callback != NULL) { (*ctx->callback)(hw, status, mqe, ctx->arg); } ocs_free(hw->os, ctx, sizeof(*ctx)); } } /** * @brief Make an async callback using NOP mailbox command * * @par Description * Post a NOP mailbox command; the callback with argument is invoked upon completion * while in the event processing context. * * @param hw Pointer to HW object. * @param callback Pointer to callback function. * @param arg Caller-provided callback. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg) { int32_t rc = 0; ocs_hw_async_call_ctx_t *ctx; /* * Allocate a callback context (which includes the mailbox command buffer), we need * this to be persistent as the mailbox command submission may be queued and executed later * execution. */ ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT); if (ctx == NULL) { ocs_log_err(hw->os, "failed to malloc async call context\n"); return OCS_HW_RTN_NO_MEMORY; } ctx->callback = callback; ctx->arg = arg; /* Build and send a NOP mailbox command */ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) { ocs_log_err(hw->os, "COMMON_NOP format failure\n"); ocs_free(hw->os, ctx, sizeof(*ctx)); rc = -1; } if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) { ocs_log_err(hw->os, "COMMON_NOP command failure\n"); ocs_free(hw->os, ctx, sizeof(*ctx)); rc = -1; } return rc; } /** * @brief Initialize the reqtag pool. * * @par Description * The WQ request tag pool is initialized. * * @param hw Pointer to HW object. * * @return Returns 0 on success, or a negative error code value on failure. */ ocs_hw_rtn_e ocs_hw_reqtag_init(ocs_hw_t *hw) { if (hw->wq_reqtag_pool == NULL) { hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE); if (hw->wq_reqtag_pool == NULL) { ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n"); return OCS_HW_RTN_NO_MEMORY; } } ocs_hw_reqtag_reset(hw); return OCS_HW_RTN_SUCCESS; } /** * @brief Allocate a WQ request tag. * * Allocate and populate a WQ request tag from the WQ request tag pool. * * @param hw Pointer to HW object. * @param callback Callback function. * @param arg Pointer to callback argument. * * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated. */ hw_wq_callback_t * ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) { hw_wq_callback_t *wqcb; ocs_hw_assert(callback != NULL); wqcb = ocs_pool_get(hw->wq_reqtag_pool); if (wqcb != NULL) { ocs_hw_assert(wqcb->callback == NULL); wqcb->callback = callback; wqcb->arg = arg; } return wqcb; } /** * @brief Free a WQ request tag. * * Free the passed in WQ request tag. * * @param hw Pointer to HW object. * @param wqcb Pointer to WQ request tag object to free. * * @return None. */ void ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb) { ocs_hw_assert(wqcb->callback != NULL); wqcb->callback = NULL; wqcb->arg = NULL; ocs_pool_put(hw->wq_reqtag_pool, wqcb); } /** * @brief Return WQ request tag by index. * * @par Description * Return pointer to WQ request tag object given an index. * * @param hw Pointer to HW object. * @param instance_index Index of WQ request tag to return. * * @return Pointer to WQ request tag, or NULL. */ hw_wq_callback_t * ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index) { hw_wq_callback_t *wqcb; wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index); if (wqcb == NULL) { ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index); } return wqcb; } /** * @brief Reset the WQ request tag pool. * * @par Description * Reset the WQ request tag pool, returning all to the free list. * * @param hw pointer to HW object. * * @return None. */ void ocs_hw_reqtag_reset(ocs_hw_t *hw) { hw_wq_callback_t *wqcb; uint32_t i; /* Remove all from freelist */ while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) { ; } /* Put them all back */ for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) { wqcb->instance_index = i; wqcb->callback = NULL; wqcb->arg = NULL; ocs_pool_put(hw->wq_reqtag_pool, wqcb); } } /** * @brief Handle HW assertion * * HW assert, display diagnostic message, and abort. * * @param cond string describing failing assertion condition * @param filename file name * @param linenum line number * * @return none */ void _ocs_hw_assert(const char *cond, const char *filename, int linenum) { ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond); ocs_abort(); /* no return */ } /** * @brief Handle HW verify * * HW verify, display diagnostic message, dump stack and return. * * @param cond string describing failing verify condition * @param filename file name * @param linenum line number * * @return none */ void _ocs_hw_verify(const char *cond, const char *filename, int linenum) { ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond); ocs_print_stack(); } /** * @brief Reque XRI * * @par Description * Reque XRI * * @param hw Pointer to HW object. * @param io Pointer to HW IO * * @return Return 0 if successful else returns -1 */ int32_t ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io ) { int32_t rc = 0; rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1); if (rc) { ocs_list_add_tail(&hw->io_port_dnrx, io); rc = -1; goto exit_ocs_hw_reque_xri; } io->auto_xfer_rdy_dnrx = 0; io->type = OCS_HW_IO_DNRX_REQUEUE; if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) { /* Clear buffer from XRI */ ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf); io->axr_buf = NULL; ocs_log_err(hw->os, "requeue_xri WQE error\n"); ocs_list_add_tail(&hw->io_port_dnrx, io); rc = -1; goto exit_ocs_hw_reque_xri; } if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); OCS_STAT(io->wq->use_count++); rc = hw_wq_write(io->wq, &io->wqe); if (rc < 0) { ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc); rc = -1; } exit_ocs_hw_reque_xri: return 0; } uint32_t ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn) { sli4_t *sli4 = &ocs->hw.sli; ocs_dma_t dma; uint8_t *payload = NULL; int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan; /* allocate memory for the service parameters */ if (ocs_dma_alloc(ocs, &dma, 112, 4)) { ocs_log_err(ocs, "Failed to allocate DMA memory\n"); return 1; } if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &dma, indicator)) { ocs_log_err(ocs, "READ_SPARM64 allocation failure\n"); ocs_dma_free(ocs, &dma); return 1; } if (sli_bmbx_command(sli4)) { ocs_log_err(ocs, "READ_SPARM64 command failure\n"); ocs_dma_free(ocs, &dma); return 1; } payload = dma.virt; ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn)); ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn)); ocs_dma_free(ocs, &dma); return 0; } /** * @page fc_hw_api_overview HW APIs * - @ref devInitShutdown * - @ref domain * - @ref port * - @ref node * - @ref io * - @ref interrupt * *
* The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4 * message details, but the higher level code must still manage domains, ports, * IT nexuses, and IOs. The HW API is designed to help the higher level manage * these objects.

* * The HW uses function callbacks to notify the higher-level code of events * that are received from the chip. There are currently three types of * functions that may be registered: * *
  • domain – This function is called whenever a domain event is generated * within the HW. Examples include a new FCF is discovered, a connection * to a domain is disrupted, and allocation callbacks.
  • *
  • unsolicited – This function is called whenever new data is received in * the SLI-4 receive queue.
  • *
  • rnode – This function is called for remote node events, such as attach status * and allocation callbacks.
* * Upper layer functions may be registered by using the ocs_hw_callback() function. * * FC/FCoE HW *

FC/FCoE HW API

* The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible * interface for creating the necessary common objects and sending I/Os. It may be used * “as is” in customer implementations or it can serve as an example of typical interactions * between a driver and the SLI-4 hardware. The broad categories of functionality include: * *
  • Setting-up and tearing-down of the HW.
  • *
  • Allocating and using the common objects (SLI Port, domain, remote node).
  • *
  • Sending and receiving I/Os.
* *

HW Setup

* To set up the HW: * *
    *
  1. Set up the HW object using ocs_hw_setup().
    * This step performs a basic configuration of the SLI-4 component and the HW to * enable querying the hardware for its capabilities. At this stage, the HW is not * capable of general operations (such as, receiving events or sending I/Os).


  2. *
  3. Configure the HW according to the driver requirements.
    * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as * well as configures the amount of resources required (ocs_hw_set()). The driver * must also register callback functions (ocs_hw_callback()) to receive notification of * various asynchronous events.

    * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This * step creates the underlying queues, commits resources to the hardware, and * prepares the hardware for operation. While the hardware is operational, the * port is not online, and cannot send or receive data.


  4. *

    *
  5. Finally, the driver can bring the port online (ocs_hw_port_control()).
    * When the link comes up, the HW determines if a domain is present and notifies the * driver using the domain callback function. This is the starting point of the driver's * interaction with the common objects.

    * @b Note: For FCoE, there may be more than one domain available and, therefore, * more than one callback.
  6. *
* *

Allocating and Using Common Objects

* Common objects provide a mechanism through which the various OneCore Storage * driver components share and track information. These data structures are primarily * used to track SLI component information but can be extended by other components, if * needed. The main objects are: * *
  • DMA – the ocs_dma_t object describes a memory region suitable for direct * memory access (DMA) transactions.
  • *
  • SCSI domain – the ocs_domain_t object represents the SCSI domain, including * any infrastructure devices such as FC switches and FC forwarders. The domain * object contains both an FCFI and a VFI.
  • *
  • SLI Port (sport) – the ocs_sli_port_t object represents the connection between * the driver and the SCSI domain. The SLI Port object contains a VPI.
  • *
  • Remote node – the ocs_remote_node_t represents a connection between the SLI * Port and another device in the SCSI domain. The node object contains an RPI.
* * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote * node common objects and establish the connections between them. The goal is to * connect the driver to the SCSI domain to exchange I/Os with other devices. These * common object connections are shown in the following figure, FC Driver Common Objects: * FC Driver Common Objects * * The first step is to create a connection to the domain by allocating an SLI Port object. * The SLI Port object represents a particular FC ID and must be initialized with one. With * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying * a domain, the driver allocates a domain object and attaches to it using the previous SLI * port object.

* * @b Note: In some cases, the driver may need to negotiate service parameters (that is, * FLOGI) with the domain before attaching.

* * Once attached to the domain, the driver can discover and attach to other devices * (remote nodes). The exact discovery method depends on the driver, but it typically * includes using a position map, querying the fabric name server, or an out-of-band * method. In most cases, it is necessary to log in with devices before performing I/Os. * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port * before exchanging FCP I/O.

* * @b Note: The HW manages both the well known fabric address and the name server as * nodes in the domain. Therefore, the driver must allocate node objects prior to * communicating with either of these entities. * *

Sending and Receiving I/Os

* The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the * commands are conceptually similar. Since the commands complete asynchronously, * the caller must provide a HW I/O object that maintains the I/O state, as well as * provide a callback function. The driver may use the same callback function for all I/O * operations, but each operation must use a unique HW I/O object. In the SLI-4 * architecture, there is a direct association between the HW I/O object and the SGL used * to describe the data. Therefore, a driver typically performs the following operations: * *
  • Allocates a HW I/O object (ocs_hw_io_alloc()).
  • *
  • Formats the SGL, specifying both the HW I/O object and the SGL. * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).
  • *
  • Sends the HW I/O (ocs_hw_io_send()).
* *

HW Tear Down

* To tear-down the HW: * *
  1. Take the port offline (ocs_hw_port_control()) to prevent receiving further * data andevents.
  2. *
  3. Destroy the HW object (ocs_hw_teardown()).
  4. *
  5. Free any memory used by the HW, such as buffers for unsolicited data.
*
*
* */ /** * This contains all hw runtime workaround code. Based on the asic type, * asic revision, and range of fw revisions, a particular workaround may be enabled. * * A workaround may consist of overriding a particular HW/SLI4 value that was initialized * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to * control specific runtime behavior. * * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than * what we might previously see as "if this is a BE3, then do xxx" * */ #define HW_FWREV_ZERO (0ull) #define HW_FWREV_MAX (~0ull) #define SLI4_ASIC_TYPE_ANY 0 #define SLI4_ASIC_REV_ANY 0 /** * @brief Internal definition of workarounds */ typedef enum { HW_WORKAROUND_TEST = 1, HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */ HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */ HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, HW_WORKAROUND_WQE_COUNT_METHOD, HW_WORKAROUND_RQE_COUNT_METHOD, HW_WORKAROUND_USE_UNREGISTERD_RPI, HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */ HW_WORKAROUND_DISABLE_SET_DUMP_LOC, HW_WORKAROUND_USE_DIF_QUARANTINE, HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */ HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */ HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */ HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */ HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */ } hw_workaround_e; /** * @brief Internal workaround structure instance */ typedef struct { sli4_asic_type_e asic_type; sli4_asic_rev_e asic_rev; uint64_t fwrev_low; uint64_t fwrev_high; hw_workaround_e workaround; uint32_t value; } hw_workaround_t; static hw_workaround_t hw_workarounds[] = { {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_TEST, 999}, /* Bug: 127585: if_type == 2 returns 0 for total length placed on * FCP_TSEND64_WQE completions. Note, original driver code enables this * workaround for all asic types */ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0}, /* Bug: unknown, Lancer A0 has mis-reported max queue depth */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_MAX_QUEUE, 2048}, /* Bug: 143399, BE3 has mis-reported max RQ queue depth */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0), HW_WORKAROUND_MAX_RQ, 2048}, /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0), HW_WORKAROUND_MAX_RQ, 2048}, /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), HW_WORKAROUND_WQE_COUNT_METHOD, 1}, /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), HW_WORKAROUND_RQE_COUNT_METHOD, 1}, /* Bug: 142968, BE3 UE with RPI == 0xffff */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_USE_UNREGISTERD_RPI, 0}, /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0}, {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0), HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0}, /* Bug: 160124, Skyhawk quarantine DIF XRIs */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_USE_DIF_QUARANTINE, 0}, /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_USE_DIF_SEC_XRI, 0}, /* Bug: xxxxxx, FCFI reported in SRB not corrrect */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0}, #if 0 /* Bug: 165642, FW version check for driver */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER), HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, #endif {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK), HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, /* Bug 177061, Lancer FW does not set the SGLC bit */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_SGLC_MISREPORTED, 0}, /* BZ 181208/183914, enable this workaround for ALL revisions */ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0}, }; /** * @brief Function prototypes */ static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w); /** * @brief Parse the firmware version (name) * * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined * by the HW_FWREV() macro * * @param fwrev_string pointer to the firmware string * * @return packed firmware revision value */ static uint64_t parse_fw_version(const char *fwrev_string) { int v[4] = {0}; const char *p; int i; for (p = fwrev_string, i = 0; *p && (i < 4); i ++) { v[i] = ocs_strtoul(p, 0, 0); while(*p && *p != '.') { p ++; } if (*p) { p ++; } } /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */ if (v[2] == 9999) { return HW_FWREV_MAX; } else { return HW_FWREV(v[0], v[1], v[2], v[3]); } } /** * @brief Test for a workaround match * * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match. * * @param hw Pointer to the HW structure * @param w Pointer to a workaround structure entry * * @return Return TRUE for a match */ static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w) { return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) && ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) && (w->fwrev_low <= hw->workaround.fwrev) && ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high))); } /** * @brief Setup HW runtime workarounds * * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds * based on the HW/SLI setup. * * @param hw Pointer to HW structure * * @return none */ void ocs_hw_workaround_setup(struct ocs_hw_s *hw) { hw_workaround_t *w; sli4_t *sli4 = &hw->sli; uint32_t i; /* Initialize the workaround settings */ ocs_memset(&hw->workaround, 0, sizeof(hw->workaround)); /* If hw_war_version is non-null, then its a value that was set by a module parameter * (sorry for the break in abstraction, but workarounds are ... well, workarounds) */ if (hw->hw_war_version) { hw->workaround.fwrev = parse_fw_version(hw->hw_war_version); } else { hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]); } /* Walk the workaround list, if a match is found, then handle it */ for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) { if (ocs_hw_workaround_match(hw, w)) { switch(w->workaround) { case HW_WORKAROUND_TEST: { ocs_log_debug(hw->os, "Override: test: %d\n", w->value); break; } case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: { ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n"); hw->workaround.retain_tsend_io_length = 1; break; } case HW_WORKAROUND_MAX_QUEUE: { sli4_qtype_e q; ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value); for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { if (hw->num_qentries[q] > w->value) { hw->num_qentries[q] = w->value; } } break; } case HW_WORKAROUND_MAX_RQ: { ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value); if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) { hw->num_qentries[SLI_QTYPE_RQ] = w->value; } break; } case HW_WORKAROUND_WQE_COUNT_METHOD: { ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value); sli4->config.count_method[SLI_QTYPE_WQ] = w->value; sli_calc_max_qentries(sli4); break; } case HW_WORKAROUND_RQE_COUNT_METHOD: { ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value); sli4->config.count_method[SLI_QTYPE_RQ] = w->value; sli_calc_max_qentries(sli4); break; } case HW_WORKAROUND_USE_UNREGISTERD_RPI: ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n"); hw->workaround.use_unregistered_rpi = TRUE; /* * Allocate an RPI that is never registered, to be used in the case where * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF */ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid, &hw->workaround.unregistered_index)) { ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n"); hw->workaround.use_unregistered_rpi = FALSE; } break; case HW_WORKAROUND_DISABLE_AR_TGT_DIF: ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n"); hw->workaround.disable_ar_tgt_dif = TRUE; break; case HW_WORKAROUND_DISABLE_SET_DUMP_LOC: ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n"); hw->workaround.disable_dump_loc = TRUE; break; case HW_WORKAROUND_USE_DIF_QUARANTINE: ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n"); hw->workaround.use_dif_quarantine = TRUE; break; case HW_WORKAROUND_USE_DIF_SEC_XRI: ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n"); hw->workaround.use_dif_sec_xri = TRUE; break; case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB: ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n"); hw->workaround.override_fcfi = TRUE; break; case HW_WORKAROUND_FW_VERSION_TOO_LOW: ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n"); hw->workaround.fw_version_too_low = TRUE; break; case HW_WORKAROUND_SGLC_MISREPORTED: ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n"); hw->workaround.sglc_misreported = TRUE; break; case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE: ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n"); hw->workaround.ignore_send_frame = TRUE; break; } /* switch(w->workaround) */ } } } Index: head/sys/dev/ocs_fc/ocs_hw_queues.c =================================================================== --- head/sys/dev/ocs_fc/ocs_hw_queues.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_hw_queues.c (revision 343349) @@ -1,2602 +1,2613 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * */ #include "ocs_os.h" #include "ocs_hw.h" #include "ocs_hw_queues.h" #define HW_QTOP_DEBUG 0 /** * @brief Initialize queues * * Given the parsed queue topology spec, the SLI queues are created and * initialized * * @param hw pointer to HW object * @param qtop pointer to queue topology * * @return returns 0 for success, an error code value for failure. */ ocs_hw_rtn_e ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop) { uint32_t i, j; uint32_t default_lengths[QTOP_LAST], len; uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0; uint8_t rqset_filter_mask = 0; hw_eq_t *eqs[hw->config.n_rq]; hw_cq_t *cqs[hw->config.n_rq]; hw_rq_t *rqs[hw->config.n_rq]; ocs_hw_qtop_entry_t *qt, *next_qt; ocs_hw_mrq_t mrq; bool use_mrq = FALSE; hw_eq_t *eq = NULL; hw_cq_t *cq = NULL; hw_wq_t *wq = NULL; hw_rq_t *rq = NULL; hw_mq_t *mq = NULL; mrq.num_pairs = 0; default_lengths[QTOP_EQ] = 1024; default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ]; default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ]; default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ]; default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH; ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG); hw->eq_count = 0; hw->cq_count = 0; hw->mq_count = 0; hw->wq_count = 0; hw->rq_count = 0; hw->hw_rq_count = 0; ocs_list_init(&hw->eq_list, hw_eq_t, link); /* If MRQ is requested, Check if it is supported by SLI. */ if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) { ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n"); return OCS_HW_RTN_ERROR; } if (hw->config.n_rq > 1) use_mrq = TRUE; /* Allocate class WQ pools */ for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) { hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ); if (hw->wq_class_array[i] == NULL) { ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n"); return OCS_HW_RTN_NO_MEMORY; } } /* Allocate per CPU WQ pools */ for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) { hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ); if (hw->wq_cpu_array[i] == NULL) { ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n"); return OCS_HW_RTN_NO_MEMORY; } } ocs_hw_assert(qtop != NULL); for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) { if (i == qtop->inuse_count - 1) next_qt = NULL; else next_qt = qt + 1; switch(qt->entry) { case QTOP_EQ: len = (qt->len) ? qt->len : default_lengths[QTOP_EQ]; if (qt->set_default) { default_lengths[QTOP_EQ] = len; break; } eq = hw_new_eq(hw, len); if (eq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } break; case QTOP_CQ: len = (qt->len) ? qt->len : default_lengths[QTOP_CQ]; if (qt->set_default) { default_lengths[QTOP_CQ] = len; break; } + + if (!eq || !next_qt) { + goto fail; + } /* If this CQ is for MRQ, then delay the creation */ if (!use_mrq || next_qt->entry != QTOP_RQ) { cq = hw_new_cq(eq, len); if (cq == NULL) { - hw_queue_teardown(hw); - return OCS_HW_RTN_NO_MEMORY; + goto fail; } } break; case QTOP_WQ: { len = (qt->len) ? qt->len : default_lengths[QTOP_WQ]; if (qt->set_default) { default_lengths[QTOP_WQ] = len; break; } if ((hw->ulp_start + qt->ulp) > hw->ulp_max) { ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp); hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } + + if (cq == NULL) + goto fail; wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp); if (wq == NULL) { - hw_queue_teardown(hw); - return OCS_HW_RTN_NO_MEMORY; + goto fail; } /* Place this WQ on the EQ WQ array */ if (ocs_varray_add(eq->wq_array, wq)) { ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* Place this WQ on the HW class array */ if (qt->class < ARRAY_SIZE(hw->wq_class_array)) { if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) { ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } } else { ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given * by the EQ instance modulo number of CPUs */ if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) { ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } break; } case QTOP_RQ: { len = (qt->len) ? qt->len : default_lengths[QTOP_RQ]; if (qt->set_default) { default_lengths[QTOP_RQ] = len; break; } if ((hw->ulp_start + qt->ulp) > hw->ulp_max) { ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp); hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } if (use_mrq) { mrq.rq_cfg[mrq.num_pairs].len = len; mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp; mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask; mrq.rq_cfg[mrq.num_pairs].eq = eq; mrq.num_pairs ++; } else { rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp); if (rq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } rq->filter_mask = qt->filter_mask; } break; } case QTOP_MQ: len = (qt->len) ? qt->len : default_lengths[QTOP_MQ]; if (qt->set_default) { default_lengths[QTOP_MQ] = len; break; } + if (cq == NULL) + goto fail; + mq = hw_new_mq(cq, len); if (mq == NULL) { - hw_queue_teardown(hw); - return OCS_HW_RTN_NO_MEMORY; + goto fail; } break; default: ocs_hw_assert(0); break; } } if (mrq.num_pairs) { /* First create normal RQs. */ for (i = 0; i < mrq.num_pairs; i++) { for (j = 0; j < mrq.num_pairs; j++) { if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) { /* This should be created using set */ if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) { ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } else if (!rqset_filter_mask){ rqset_filter_mask = mrq.rq_cfg[i].filter_mask; rqset_len = mrq.rq_cfg[i].len; rqset_ulp = mrq.rq_cfg[i].ulp; } eqs[rqset_count] = mrq.rq_cfg[i].eq; rqset_count++; break; } } if (j == mrq.num_pairs) { /* Normal RQ */ cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]); if (cq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp); if (rq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } rq->filter_mask = mrq.rq_cfg[i].filter_mask; } } /* Now create RQ Set */ if (rqset_count) { if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) { ocs_log_crit(hw->os, "Max Supported MRQ pairs = %d\n", OCE_HW_MAX_NUM_MRQ_PAIRS); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* Create CQ set */ if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) { hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* Create RQ set */ if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) { hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } for (i = 0; i < rqset_count ; i++) { rqs[i]->filter_mask = rqset_filter_mask; rqs[i]->is_mrq = TRUE; rqs[i]->base_mrq_id = rqs[0]->hdr->id; } hw->hw_mrq_count = rqset_count; } } return OCS_HW_RTN_SUCCESS; +fail: + hw_queue_teardown(hw); + return OCS_HW_RTN_NO_MEMORY; } /** * @brief Allocate a new EQ object * * A new EQ object is instantiated * * @param hw pointer to HW object * @param entry_count number of entries in the EQ * * @return pointer to allocated EQ object */ hw_eq_t* hw_new_eq(ocs_hw_t *hw, uint32_t entry_count) { hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT); if (eq != NULL) { eq->type = SLI_QTYPE_EQ; eq->hw = hw; eq->entry_count = entry_count; eq->instance = hw->eq_count++; eq->queue = &hw->eq[eq->instance]; ocs_list_init(&eq->cq_list, hw_cq_t, link); eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ); if (eq->wq_array == NULL) { ocs_free(hw->os, eq, sizeof(*eq)); eq = NULL; } else { if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) { ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance); ocs_free(hw->os, eq, sizeof(*eq)); eq = NULL; } else { sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8); hw->hw_eq[eq->instance] = eq; ocs_list_add_tail(&hw->eq_list, eq); ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id, eq->entry_count); } } } return eq; } /** * @brief Allocate a new CQ object * * A new CQ object is instantiated * * @param eq pointer to parent EQ object * @param entry_count number of entries in the CQ * * @return pointer to allocated CQ object */ hw_cq_t* hw_new_cq(hw_eq_t *eq, uint32_t entry_count) { ocs_hw_t *hw = eq->hw; hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT); if (cq != NULL) { cq->eq = eq; cq->type = SLI_QTYPE_CQ; cq->instance = eq->hw->cq_count++; cq->entry_count = entry_count; cq->queue = &hw->cq[cq->instance]; ocs_list_init(&cq->q_list, hw_q_t, link); if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) { ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n", eq->instance, eq->entry_count); ocs_free(hw->os, cq, sizeof(*cq)); cq = NULL; } else { hw->hw_cq[cq->instance] = cq; ocs_list_add_tail(&eq->cq_list, cq); ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id, cq->entry_count); } } return cq; } /** * @brief Allocate a new CQ Set of objects. * * @param eqs pointer to a set of EQ objects. * @param cqs pointer to a set of CQ objects to be returned. * @param num_cqs number of CQ queues in the set. * @param entry_count number of entries in the CQ. * * @return 0 on success and -1 on failure. */ uint32_t hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count) { uint32_t i; ocs_hw_t *hw = eqs[0]->hw; sli4_t *sli4 = &hw->sli; hw_cq_t *cq = NULL; sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT]; /* Initialise CQS pointers to NULL */ for (i = 0; i < num_cqs; i++) { cqs[i] = NULL; } for (i = 0; i < num_cqs; i++) { cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT); if (cq == NULL) goto error; cqs[i] = cq; cq->eq = eqs[i]; cq->type = SLI_QTYPE_CQ; cq->instance = hw->cq_count++; cq->entry_count = entry_count; cq->queue = &hw->cq[cq->instance]; qs[i] = cq->queue; assocs[i] = eqs[i]->queue; ocs_list_init(&cq->q_list, hw_q_t, link); } if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) { ocs_log_err(NULL, "Failed to create CQ Set. \n"); goto error; } for (i = 0; i < num_cqs; i++) { hw->hw_cq[cqs[i]->instance] = cqs[i]; ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]); } return 0; error: for (i = 0; i < num_cqs; i++) { if (cqs[i]) { ocs_free(hw->os, cqs[i], sizeof(*cqs[i])); cqs[i] = NULL; } } return -1; } /** * @brief Allocate a new MQ object * * A new MQ object is instantiated * * @param cq pointer to parent CQ object * @param entry_count number of entries in the MQ * * @return pointer to allocated MQ object */ hw_mq_t* hw_new_mq(hw_cq_t *cq, uint32_t entry_count) { ocs_hw_t *hw = cq->eq->hw; hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT); if (mq != NULL) { mq->cq = cq; mq->type = SLI_QTYPE_MQ; mq->instance = cq->eq->hw->mq_count++; mq->entry_count = entry_count; mq->entry_size = OCS_HW_MQ_DEPTH; mq->queue = &hw->mq[mq->instance]; if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ, mq->queue, mq->entry_size, cq->queue, 0)) { ocs_log_err(hw->os, "MQ allocation failure\n"); ocs_free(hw->os, mq, sizeof(*mq)); mq = NULL; } else { hw->hw_mq[mq->instance] = mq; ocs_list_add_tail(&cq->q_list, mq); ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id, mq->entry_count); } } return mq; } /** * @brief Allocate a new WQ object * * A new WQ object is instantiated * * @param cq pointer to parent CQ object * @param entry_count number of entries in the WQ * @param class WQ class * @param ulp index of chute * * @return pointer to allocated WQ object */ hw_wq_t* hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp) { ocs_hw_t *hw = cq->eq->hw; hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT); if (wq != NULL) { wq->hw = cq->eq->hw; wq->cq = cq; wq->type = SLI_QTYPE_WQ; wq->instance = cq->eq->hw->wq_count++; wq->entry_count = entry_count; wq->queue = &hw->wq[wq->instance]; wq->ulp = ulp; wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT; wq->wqec_count = wq->wqec_set_count; wq->free_count = wq->entry_count - 1; wq->class = class; ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link); if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) { ocs_log_err(hw->os, "WQ allocation failure\n"); ocs_free(hw->os, wq, sizeof(*wq)); wq = NULL; } else { hw->hw_wq[wq->instance] = wq; ocs_list_add_tail(&cq->q_list, wq); ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id, wq->entry_count, wq->class, wq->ulp); } } return wq; } /** * @brief Allocate a hw_rq_t object * * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair) * * @param cq pointer to parent CQ object * @param entry_count number of entries in the RQs * @param ulp ULP index for this RQ * * @return pointer to newly allocated hw_rq_t */ hw_rq_t* hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp) { ocs_hw_t *hw = cq->eq->hw; hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT); uint32_t max_hw_rq; ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq); if (rq != NULL) { rq->instance = hw->hw_rq_count++; rq->cq = cq; rq->type = SLI_QTYPE_RQ; rq->ulp = ulp; rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR)); /* Create the header RQ */ ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq)); rq->hdr = &hw->rq[hw->rq_count]; rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE; if (sli_fc_rq_alloc(&hw->sli, rq->hdr, rq->entry_count, rq->hdr_entry_size, cq->queue, ulp, TRUE)) { ocs_log_err(hw->os, "RQ allocation failure - header\n"); ocs_free(hw->os, rq, sizeof(*rq)); return NULL; } hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */ hw->rq_count++; ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr size %4d ulp %d\n", rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp); /* Create the default data RQ */ ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq)); rq->data = &hw->rq[hw->rq_count]; rq->data_entry_size = hw->config.rq_default_buffer_size; if (sli_fc_rq_alloc(&hw->sli, rq->data, rq->entry_count, rq->data_entry_size, cq->queue, ulp, FALSE)) { ocs_log_err(hw->os, "RQ allocation failure - first burst\n"); ocs_free(hw->os, rq, sizeof(*rq)); return NULL; } hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */ hw->rq_count++; ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance, rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp); hw->hw_rq[rq->instance] = rq; ocs_list_add_tail(&cq->q_list, rq); rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) * rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT); if (rq->rq_tracker == NULL) { ocs_log_err(hw->os, "RQ tracker buf allocation failure\n"); return NULL; } } return rq; } /** * @brief Allocate a hw_rq_t object SET * * Allocate an RQ object SET, where each element in set * encapsulates 2 SLI queues (for rq pair) * * @param cqs pointers to be associated with RQs. * @param rqs RQ pointers to be returned on success. * @param num_rq_pairs number of rq pairs in the Set. * @param entry_count number of entries in the RQs * @param ulp ULP index for this RQ * * @return 0 in success and -1 on failure. */ uint32_t hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp) { ocs_hw_t *hw = cqs[0]->eq->hw; hw_rq_t *rq = NULL; sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL }; uint32_t max_hw_rq, i, q_count; ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq); /* Initialise RQS pointers */ for (i = 0; i < num_rq_pairs; i++) { rqs[i] = NULL; } for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) { rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT); if (rq == NULL) goto error; rqs[i] = rq; rq->instance = hw->hw_rq_count++; rq->cq = cqs[i]; rq->type = SLI_QTYPE_RQ; rq->ulp = ulp; rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR)); /* Header RQ */ rq->hdr = &hw->rq[hw->rq_count]; rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE; hw->hw_rq_lookup[hw->rq_count] = rq->instance; hw->rq_count++; qs[q_count] = rq->hdr; /* Data RQ */ rq->data = &hw->rq[hw->rq_count]; rq->data_entry_size = hw->config.rq_default_buffer_size; hw->hw_rq_lookup[hw->rq_count] = rq->instance; hw->rq_count++; qs[q_count + 1] = rq->data; rq->rq_tracker = NULL; } if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs, cqs[0]->queue->id, rqs[0]->entry_count, rqs[0]->hdr_entry_size, rqs[0]->data_entry_size, ulp)) { ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id); goto error; } for (i = 0; i < num_rq_pairs; i++) { hw->hw_rq[rqs[i]->instance] = rqs[i]; ocs_list_add_tail(&cqs[i]->q_list, rqs[i]); rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) * rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT); if (rqs[i]->rq_tracker == NULL) { ocs_log_err(hw->os, "RQ tracker buf allocation failure\n"); goto error; } } return 0; error: for (i = 0; i < num_rq_pairs; i++) { if (rqs[i] != NULL) { if (rqs[i]->rq_tracker != NULL) { - ocs_free(hw->os, rq->rq_tracker, - sizeof(ocs_hw_sequence_t*) * rq->entry_count); + ocs_free(hw->os, rqs[i]->rq_tracker, + sizeof(ocs_hw_sequence_t*) * + rqs[i]->entry_count); } ocs_free(hw->os, rqs[i], sizeof(*rqs[i])); } } return -1; } /** * @brief Free an EQ object * * The EQ object and any child queue objects are freed * * @param eq pointer to EQ object * * @return none */ void hw_del_eq(hw_eq_t *eq) { if (eq != NULL) { hw_cq_t *cq; hw_cq_t *cq_next; ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) { hw_del_cq(cq); } ocs_varray_free(eq->wq_array); ocs_list_remove(&eq->hw->eq_list, eq); eq->hw->hw_eq[eq->instance] = NULL; ocs_free(eq->hw->os, eq, sizeof(*eq)); } } /** * @brief Free a CQ object * * The CQ object and any child queue objects are freed * * @param cq pointer to CQ object * * @return none */ void hw_del_cq(hw_cq_t *cq) { if (cq != NULL) { hw_q_t *q; hw_q_t *q_next; ocs_list_foreach_safe(&cq->q_list, q, q_next) { switch(q->type) { case SLI_QTYPE_MQ: hw_del_mq((hw_mq_t*) q); break; case SLI_QTYPE_WQ: hw_del_wq((hw_wq_t*) q); break; case SLI_QTYPE_RQ: hw_del_rq((hw_rq_t*) q); break; default: break; } } ocs_list_remove(&cq->eq->cq_list, cq); cq->eq->hw->hw_cq[cq->instance] = NULL; ocs_free(cq->eq->hw->os, cq, sizeof(*cq)); } } /** * @brief Free a MQ object * * The MQ object is freed * * @param mq pointer to MQ object * * @return none */ void hw_del_mq(hw_mq_t *mq) { if (mq != NULL) { ocs_list_remove(&mq->cq->q_list, mq); mq->cq->eq->hw->hw_mq[mq->instance] = NULL; ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq)); } } /** * @brief Free a WQ object * * The WQ object is freed * * @param wq pointer to WQ object * * @return none */ void hw_del_wq(hw_wq_t *wq) { if (wq != NULL) { ocs_list_remove(&wq->cq->q_list, wq); wq->cq->eq->hw->hw_wq[wq->instance] = NULL; ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq)); } } /** * @brief Free an RQ object * * The RQ object is freed * * @param rq pointer to RQ object * * @return none */ void hw_del_rq(hw_rq_t *rq) { - ocs_hw_t *hw = rq->cq->eq->hw; if (rq != NULL) { + ocs_hw_t *hw = rq->cq->eq->hw; /* Free RQ tracker */ if (rq->rq_tracker != NULL) { ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count); rq->rq_tracker = NULL; } ocs_list_remove(&rq->cq->q_list, rq); hw->hw_rq[rq->instance] = NULL; ocs_free(hw->os, rq, sizeof(*rq)); } } /** * @brief Display HW queue objects * * The HW queue objects are displayed using ocs_log * * @param hw pointer to HW object * * @return none */ void hw_queue_dump(ocs_hw_t *hw) { hw_eq_t *eq; hw_cq_t *cq; hw_q_t *q; hw_mq_t *mq; hw_wq_t *wq; hw_rq_t *rq; ocs_list_foreach(&hw->eq_list, eq) { ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id); ocs_list_foreach(&eq->cq_list, cq) { ocs_printf(" cq[%d] id %2d current\n", cq->instance, cq->queue->id); ocs_list_foreach(&cq->q_list, q) { switch(q->type) { case SLI_QTYPE_MQ: mq = (hw_mq_t *) q; ocs_printf(" mq[%d] id %2d\n", mq->instance, mq->queue->id); break; case SLI_QTYPE_WQ: wq = (hw_wq_t *) q; ocs_printf(" wq[%d] id %2d\n", wq->instance, wq->queue->id); break; case SLI_QTYPE_RQ: rq = (hw_rq_t *) q; ocs_printf(" rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id); break; default: break; } } } } } /** * @brief Teardown HW queue objects * * The HW queue objects are freed * * @param hw pointer to HW object * * @return none */ void hw_queue_teardown(ocs_hw_t *hw) { uint32_t i; hw_eq_t *eq; hw_eq_t *eq_next; if (ocs_list_valid(&hw->eq_list)) { ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) { hw_del_eq(eq); } } for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) { ocs_varray_free(hw->wq_cpu_array[i]); hw->wq_cpu_array[i] = NULL; } for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) { ocs_varray_free(hw->wq_class_array[i]); hw->wq_class_array[i] = NULL; } } /** * @brief Allocate a WQ to an IO object * * The next work queue index is used to assign a WQ to an IO. * * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is * selected. * * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that * the IO request came in on is selected. * * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the * CPU the request is made on is selected. * * @param hw pointer to HW object * @param io pointer to IO object * * @return Return pointer to next WQ */ hw_wq_t * ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io) { hw_eq_t *eq; hw_wq_t *wq = NULL; switch(io->wq_steering) { case OCS_HW_WQ_STEERING_CLASS: if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) { wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]); } break; case OCS_HW_WQ_STEERING_REQUEST: eq = io->eq; if (likely(eq != NULL)) { wq = ocs_varray_iter_next(eq->wq_array); } break; case OCS_HW_WQ_STEERING_CPU: { uint32_t cpuidx = ocs_thread_getcpu(); if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) { wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]); } break; } } if (unlikely(wq == NULL)) { wq = hw->hw_wq[0]; } return wq; } /** * @brief Return count of EQs for a queue topology object * * The EQ count for in the HWs queue topology (hw->qtop) object is returned * * @param hw pointer to HW object * * @return count of EQs */ uint32_t ocs_hw_qtop_eq_count(ocs_hw_t *hw) { return hw->qtop->entry_counts[QTOP_EQ]; } #define TOKEN_LEN 32 /** * @brief return string given a QTOP entry * * @param entry QTOP entry * * @return returns string or "unknown" */ #if HW_QTOP_DEBUG static char * qtopentry2s(ocs_hw_qtop_entry_e entry) { switch(entry) { #define P(x) case x: return #x; P(QTOP_EQ) P(QTOP_CQ) P(QTOP_WQ) P(QTOP_RQ) P(QTOP_MQ) P(QTOP_THREAD_START) P(QTOP_THREAD_END) P(QTOP_LAST) #undef P } return "unknown"; } #endif /** * @brief Declare token types */ typedef enum { TOK_LPAREN = 1, TOK_RPAREN, TOK_COLON, TOK_EQUALS, TOK_QUEUE, TOK_ATTR_NAME, TOK_NUMBER, TOK_NUMBER_VALUE, TOK_NUMBER_LIST, } tok_type_e; /** * @brief Declare token sub-types */ typedef enum { TOK_SUB_EQ = 100, TOK_SUB_CQ, TOK_SUB_RQ, TOK_SUB_MQ, TOK_SUB_WQ, TOK_SUB_LEN, TOK_SUB_CLASS, TOK_SUB_ULP, TOK_SUB_FILTER, } tok_subtype_e; /** * @brief convert queue subtype to QTOP entry * * @param q queue subtype * * @return QTOP entry or 0 */ static ocs_hw_qtop_entry_e subtype2qtop(tok_subtype_e q) { switch(q) { case TOK_SUB_EQ: return QTOP_EQ; case TOK_SUB_CQ: return QTOP_CQ; case TOK_SUB_RQ: return QTOP_RQ; case TOK_SUB_MQ: return QTOP_MQ; case TOK_SUB_WQ: return QTOP_WQ; default: break; } return 0; } /** * @brief Declare token object */ typedef struct { tok_type_e type; tok_subtype_e subtype; char string[TOKEN_LEN]; } tok_t; /** * @brief Declare token array object */ typedef struct { tok_t *tokens; /* Pointer to array of tokens */ uint32_t alloc_count; /* Number of tokens in the array */ uint32_t inuse_count; /* Number of tokens posted to array */ uint32_t iter_idx; /* Iterator index */ } tokarray_t; /** * @brief Declare token match structure */ typedef struct { char *s; tok_type_e type; tok_subtype_e subtype; } tokmatch_t; /** * @brief test if character is ID start character * * @param c character to test * * @return TRUE if character is an ID start character */ static int32_t idstart(int c) { return isalpha(c) || (c == '_') || (c == '$'); } /** * @brief test if character is an ID character * * @param c character to test * * @return TRUE if character is an ID character */ static int32_t idchar(int c) { return idstart(c) || ocs_isdigit(c); } /** * @brief Declare single character matches */ static tokmatch_t cmatches[] = { {"(", TOK_LPAREN}, {")", TOK_RPAREN}, {":", TOK_COLON}, {"=", TOK_EQUALS}, }; /** * @brief Declare identifier match strings */ static tokmatch_t smatches[] = { {"eq", TOK_QUEUE, TOK_SUB_EQ}, {"cq", TOK_QUEUE, TOK_SUB_CQ}, {"rq", TOK_QUEUE, TOK_SUB_RQ}, {"mq", TOK_QUEUE, TOK_SUB_MQ}, {"wq", TOK_QUEUE, TOK_SUB_WQ}, {"len", TOK_ATTR_NAME, TOK_SUB_LEN}, {"class", TOK_ATTR_NAME, TOK_SUB_CLASS}, {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP}, {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER}, }; /** * @brief Scan string and return next token * * The string is scanned and the next token is returned * * @param s input string to scan * @param tok pointer to place scanned token * * @return pointer to input string following scanned token, or NULL */ static const char * tokenize(const char *s, tok_t *tok) { uint32_t i; memset(tok, 0, sizeof(*tok)); /* Skip over whitespace */ while (*s && ocs_isspace(*s)) { s++; } /* Return if nothing left in this string */ if (*s == 0) { return NULL; } /* Look for single character matches */ for (i = 0; i < ARRAY_SIZE(cmatches); i++) { if (cmatches[i].s[0] == *s) { tok->type = cmatches[i].type; tok->subtype = cmatches[i].subtype; tok->string[0] = *s++; return s; } } /* Scan for a hex number or decimal */ if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) { char *p = tok->string; tok->type = TOK_NUMBER; *p++ = *s++; *p++ = *s++; while ((*s == '.') || ocs_isxdigit(*s)) { if ((p - tok->string) < (int32_t)sizeof(tok->string)) { *p++ = *s; } if (*s == ',') { tok->type = TOK_NUMBER_LIST; } s++; } *p = 0; return s; } else if (ocs_isdigit(*s)) { char *p = tok->string; tok->type = TOK_NUMBER; while ((*s == ',') || ocs_isdigit(*s)) { if ((p - tok->string) < (int32_t)sizeof(tok->string)) { *p++ = *s; } if (*s == ',') { tok->type = TOK_NUMBER_LIST; } s++; } *p = 0; return s; } /* Scan for an ID */ if (idstart(*s)) { char *p = tok->string; for (*p++ = *s++; idchar(*s); s++) { if ((p - tok->string) < TOKEN_LEN) { *p++ = *s; } } /* See if this is a $ number value */ if (tok->string[0] == '$') { tok->type = TOK_NUMBER_VALUE; } else { /* Look for a string match */ for (i = 0; i < ARRAY_SIZE(smatches); i++) { if (strcmp(smatches[i].s, tok->string) == 0) { tok->type = smatches[i].type; tok->subtype = smatches[i].subtype; return s; } } } } return s; } /** * @brief convert token type to string * * @param type token type * * @return string, or "unknown" */ static const char * token_type2s(tok_type_e type) { switch(type) { #define P(x) case x: return #x; P(TOK_LPAREN) P(TOK_RPAREN) P(TOK_COLON) P(TOK_EQUALS) P(TOK_QUEUE) P(TOK_ATTR_NAME) P(TOK_NUMBER) P(TOK_NUMBER_VALUE) P(TOK_NUMBER_LIST) #undef P } return "unknown"; } /** * @brief convert token sub-type to string * * @param subtype token sub-type * * @return string, or "unknown" */ static const char * token_subtype2s(tok_subtype_e subtype) { switch(subtype) { #define P(x) case x: return #x; P(TOK_SUB_EQ) P(TOK_SUB_CQ) P(TOK_SUB_RQ) P(TOK_SUB_MQ) P(TOK_SUB_WQ) P(TOK_SUB_LEN) P(TOK_SUB_CLASS) P(TOK_SUB_ULP) P(TOK_SUB_FILTER) #undef P } return ""; } /** * @brief Generate syntax error message * * A syntax error message is found, the input tokens are dumped up to and including * the token that failed as indicated by the current iterator index. * * @param hw pointer to HW object * @param tokarray pointer to token array object * * @return none */ static void tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray) { uint32_t i; tok_t *tok; ocs_log_test(hw->os, "Syntax error:\n"); for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) { ocs_log_test(hw->os, "%s [%2d] %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : " ", i, token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string); } } /** * @brief parse a number * * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value * * @param hw pointer to HW object * @param qtop pointer to QTOP object * @param tok pointer to token to parse * * @return numeric value */ static uint32_t tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok) { uint32_t rval = 0; uint32_t num_cpus = ocs_get_num_cpus(); switch(tok->type) { case TOK_NUMBER_VALUE: if (ocs_strcmp(tok->string, "$ncpu") == 0) { rval = num_cpus; } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) { rval = num_cpus - 1; } else if (ocs_strcmp(tok->string, "$nwq") == 0) { if (hw != NULL) { rval = hw->config.n_wq; } } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) { rval = MIN(num_cpus, OCS_HW_MAX_MRQS); } else if (ocs_strcmp(tok->string, "$nulp") == 0) { rval = hw->ulp_max - hw->ulp_start + 1; } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-1]; } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-2]; } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-3]; } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-4]; } else { rval = ocs_strtoul(tok->string, 0, 0); } break; case TOK_NUMBER: rval = ocs_strtoul(tok->string, 0, 0); break; default: break; } return rval; } /** * @brief parse an array of tokens * * The tokens are semantically parsed, to generate QTOP entries. * * @param hw pointer to HW object * @param tokarray array array of tokens * @param qtop ouptut QTOP object * * @return returns 0 for success, a negative error code value for failure. */ static int32_t parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop) { ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count; tok_t *tok; for (; (tokarray->iter_idx < tokarray->inuse_count) && ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) { if (qtop->inuse_count >= qtop->alloc_count) { return -1; } qt = qtop->entries + qtop->inuse_count; switch (tok[0].type) { case TOK_QUEUE: qt->entry = subtype2qtop(tok[0].subtype); qt->set_default = FALSE; qt->len = 0; qt->class = 0; qtop->inuse_count++; tokarray->iter_idx++; /* Advance current token index */ /* Parse for queue attributes, possibly multiple instances */ while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) { tok = &tokarray->tokens[tokarray->iter_idx]; if( (tok[0].type == TOK_COLON) && (tok[1].type == TOK_ATTR_NAME) && (tok[2].type == TOK_EQUALS) && ((tok[3].type == TOK_NUMBER) || (tok[3].type == TOK_NUMBER_VALUE) || (tok[3].type == TOK_NUMBER_LIST))) { switch (tok[1].subtype) { case TOK_SUB_LEN: qt->len = tok_getnumber(hw, qtop, &tok[3]); break; case TOK_SUB_CLASS: qt->class = tok_getnumber(hw, qtop, &tok[3]); break; case TOK_SUB_ULP: qt->ulp = tok_getnumber(hw, qtop, &tok[3]); break; case TOK_SUB_FILTER: if (tok[3].type == TOK_NUMBER_LIST) { uint32_t mask = 0; char *p = tok[3].string; while ((p != NULL) && *p) { uint32_t v; v = ocs_strtoul(p, 0, 0); if (v < 32) { mask |= (1U << v); } p = ocs_strchr(p, ','); if (p != NULL) { p++; } } qt->filter_mask = mask; } else { qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3])); } break; default: break; } /* Advance current token index */ tokarray->iter_idx += 4; } else { break; } } qtop->entry_counts[qt->entry]++; break; case TOK_ATTR_NAME: if ( ((tokarray->iter_idx + 5) <= tokarray->inuse_count) && (tok[1].type == TOK_COLON) && (tok[2].type == TOK_QUEUE) && (tok[3].type == TOK_EQUALS) && ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) { qt->entry = subtype2qtop(tok[2].subtype); qt->set_default = TRUE; switch(tok[0].subtype) { case TOK_SUB_LEN: qt->len = tok_getnumber(hw, qtop, &tok[4]); break; case TOK_SUB_CLASS: qt->class = tok_getnumber(hw, qtop, &tok[4]); break; case TOK_SUB_ULP: qt->ulp = tok_getnumber(hw, qtop, &tok[4]); break; default: break; } qtop->inuse_count++; tokarray->iter_idx += 5; } else { tok_syntax(hw, tokarray); return -1; } break; case TOK_NUMBER: case TOK_NUMBER_VALUE: { uint32_t rpt_count = 1; uint32_t i; rpt_count = tok_getnumber(hw, qtop, tok); if (tok[1].type == TOK_LPAREN) { uint32_t iter_idx_save; tokarray->iter_idx += 2; /* save token array iteration index */ iter_idx_save = tokarray->iter_idx; for (i = 0; i < rpt_count; i++) { uint32_t rptcount_idx = qtop->rptcount_idx; if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) { qtop->rptcount[qtop->rptcount_idx++] = i; } /* restore token array iteration index */ tokarray->iter_idx = iter_idx_save; /* parse, append to qtop */ parse_topology(hw, tokarray, qtop); qtop->rptcount_idx = rptcount_idx; } } break; } case TOK_RPAREN: tokarray->iter_idx++; return 0; default: tok_syntax(hw, tokarray); return -1; } } return 0; } /** * @brief Parse queue topology string * * The queue topology object is allocated, and filled with the results of parsing the * passed in queue topology string * * @param hw pointer to HW object * @param qtop_string input queue topology string * * @return pointer to allocated QTOP object, or NULL if there was an error */ ocs_hw_qtop_t * ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string) { ocs_hw_qtop_t *qtop; tokarray_t tokarray; const char *s; #if HW_QTOP_DEBUG uint32_t i; ocs_hw_qtop_entry_t *qt; #endif ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string); /* Allocate a token array */ tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT); if (tokarray.tokens == NULL) { return NULL; } tokarray.alloc_count = MAX_TOKENS; tokarray.inuse_count = 0; tokarray.iter_idx = 0; /* Parse the tokens */ for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) && ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) { tokarray.inuse_count++;; } /* Allocate a queue topology structure */ qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT); if (qtop == NULL) { ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens)); ocs_log_err(hw->os, "malloc qtop failed\n"); return NULL; } qtop->os = hw->os; /* Allocate queue topology entries */ qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT); if (qtop->entries == NULL) { ocs_log_err(hw->os, "malloc qtop entries failed\n"); ocs_free(hw->os, qtop, sizeof(*qtop)); ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens)); return NULL; } qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES; qtop->inuse_count = 0; /* Parse the tokens */ parse_topology(hw, &tokarray, qtop); #if HW_QTOP_DEBUG for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) { ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len, qt->class, qt->ulp); } #endif /* Free the tokens array */ ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens)); return qtop; } /** * @brief free queue topology object * * @param qtop pointer to QTOP object * * @return none */ void ocs_hw_qtop_free(ocs_hw_qtop_t *qtop) { if (qtop != NULL) { if (qtop->entries != NULL) { ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries)); } ocs_free(qtop->os, qtop, sizeof(*qtop)); } } /* Uncomment this to turn on RQ debug */ // #define ENABLE_DEBUG_RQBUF static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id); static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex); static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq); static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq); /** * @brief Process receive queue completions for RQ Pair mode. * * @par Description * RQ completions are processed. In RQ pair mode, a single header and single payload * buffer are received, and passed to the function that has registered for unsolicited * callbacks. * * @param hw Hardware context. * @param cq Pointer to HW completion queue. * @param cqe Completion queue entry. * * @return Returns 0 for success, or a negative error code value for failure. */ int32_t ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe) { uint16_t rq_id; uint32_t index; int32_t rqindex; int32_t rq_status; uint32_t h_len; uint32_t p_len; ocs_hw_sequence_t *seq; rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index); if (0 != rq_status) { switch (rq_status) { case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED: case SLI4_FC_ASYNC_RQ_DMA_FAILURE: /* just get RQ buffer then return to chip */ rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n", rq_status, rq_id); break; } /* get RQ buffer */ seq = ocs_hw_rqpair_get(hw, rqindex, index); /* return to chip */ if (ocs_hw_rqpair_sequence_free(hw, seq)) { ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n", rq_status); break; } break; case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED: case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC: /* since RQ buffers were not consumed, cannot return them to chip */ /* fall through */ ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status); default: break; } return -1; } rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id); return -1; } OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++; rq->payload_use_count++;}) seq = ocs_hw_rqpair_get(hw, rqindex, index); ocs_hw_assert(seq != NULL); seq->hw = hw; seq->auto_xrdy = 0; seq->out_of_xris = 0; seq->xri = 0; seq->hio = NULL; sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len); seq->header->dma.len = h_len; seq->payload->dma.len = p_len; seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe); seq->hw_priv = cq->eq; /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, seq); } return 0; } /** * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy * * @par Description * RQ completions are processed. In RQ pair mode, a single header and single payload * buffer are received, and passed to the function that has registered for unsolicited * callbacks. * * @param hw Hardware context. * @param cq Pointer to HW completion queue. * @param cqe Completion queue entry. * * @return Returns 0 for success, or a negative error code value for failure. */ int32_t ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe) { /* Seems silly to call a SLI function to decode - use the structure directly for performance */ sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe; uint16_t rq_id; uint32_t index; int32_t rqindex; int32_t rq_status; uint32_t h_len; uint32_t p_len; ocs_hw_sequence_t *seq; uint8_t axr_lock_taken = 0; #if defined(OCS_DISC_SPIN_DELAY) uint32_t delay = 0; char prop_buf[32]; #endif rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index); if (0 != rq_status) { switch (rq_status) { case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED: case SLI4_FC_ASYNC_RQ_DMA_FAILURE: /* just get RQ buffer then return to chip */ rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n", rq_status, rq_id); break; } /* get RQ buffer */ seq = ocs_hw_rqpair_get(hw, rqindex, index); /* return to chip */ if (ocs_hw_rqpair_sequence_free(hw, seq)) { ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n", rq_status); break; } break; case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED: case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC: /* since RQ buffers were not consumed, cannot return them to chip */ ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status); /* fall through */ default: break; } return -1; } rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id); return -1; } OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++; rq->payload_use_count++;}) seq = ocs_hw_rqpair_get(hw, rqindex, index); ocs_hw_assert(seq != NULL); seq->hw = hw; seq->auto_xrdy = opt_wr->agxr; seq->out_of_xris = opt_wr->oox; seq->xri = opt_wr->xri; seq->hio = NULL; sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len); seq->header->dma.len = h_len; seq->payload->dma.len = p_len; seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe); seq->hw_priv = cq->eq; if (seq->auto_xrdy) { fc_header_t *fc_hdr = seq->header->dma.virt; seq->hio = ocs_hw_io_lookup(hw, seq->xri); ocs_lock(&seq->hio->axr_lock); axr_lock_taken = 1; /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */ seq->hio->axr_buf->fcfi = seq->fcfi; seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id; seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id; seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id; seq->hio->axr_buf->cmd_cqe = 1; /* * Since auto xfer rdy is used for this IO, then clear the sequence * initiative bit in the header so that the upper layers wait for the * data. This should flow exactly like the first burst case. */ fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE); /* If AXR CMD CQE came before previous TRSP CQE of same XRI */ if (seq->hio->type == OCS_HW_IO_TARGET_RSP) { seq->hio->axr_buf->call_axr_cmd = 1; seq->hio->axr_buf->cmd_seq = seq; goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd; } } /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, seq); } if (seq->auto_xrdy) { /* If data cqe came before cmd cqe in out of order in case of AXR */ if(seq->hio->axr_buf->data_cqe == 1) { #if defined(OCS_DISC_SPIN_DELAY) if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { delay = ocs_strtoul(prop_buf, 0, 0); ocs_udelay(delay); } #endif /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq); } } } exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd: if(axr_lock_taken) { ocs_unlock(&seq->hio->axr_lock); } return 0; } /** * @brief Process CQ completions for Auto xfer rdy data phases. * * @par Description * The data is DMA'd into the data buffer posted to the SGL prior to the XRI * being assigned to an IO. When the completion is received, All of the data * is in the single buffer. * * @param hw Hardware context. * @param cq Pointer to HW completion queue. * @param cqe Completion queue entry. * * @return Returns 0 for success, or a negative error code value for failure. */ int32_t ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe) { /* Seems silly to call a SLI function to decode - use the structure directly for performance */ sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe; ocs_hw_sequence_t *seq; ocs_hw_io_t *io; ocs_hw_auto_xfer_rdy_buffer_t *buf; #if defined(OCS_DISC_SPIN_DELAY) uint32_t delay = 0; char prop_buf[32]; #endif /* Look up the IO */ io = ocs_hw_io_lookup(hw, opt_wr->xri); ocs_lock(&io->axr_lock); buf = io->axr_buf; buf->data_cqe = 1; seq = &buf->seq; seq->hw = hw; seq->auto_xrdy = 1; seq->out_of_xris = 0; seq->xri = opt_wr->xri; seq->hio = io; seq->header = &buf->header; seq->payload = &buf->payload; seq->header->dma.len = sizeof(fc_header_t); seq->payload->dma.len = opt_wr->total_data_placed; seq->fcfi = buf->fcfi; seq->hw_priv = cq->eq; if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) { seq->status = OCS_HW_UNSOL_SUCCESS; } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) { seq->status = OCS_HW_UNSOL_ABTS_RCVD; } else { seq->status = OCS_HW_UNSOL_ERROR; } /* If AXR CMD CQE came before previous TRSP CQE of same XRI */ if(io->type == OCS_HW_IO_TARGET_RSP) { io->axr_buf->call_axr_data = 1; goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data; } if(!buf->cmd_cqe) { /* if data cqe came before cmd cqe, return here, cmd cqe will handle */ goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data; } #if defined(OCS_DISC_SPIN_DELAY) if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { delay = ocs_strtoul(prop_buf, 0, 0); ocs_udelay(delay); } #endif /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, seq); } exit_ocs_hw_rqpair_process_auto_xfr_rdy_data: ocs_unlock(&io->axr_lock); return 0; } /** * @brief Return pointer to RQ buffer entry. * * @par Description * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex. * * @param hw Hardware context. * @param rqindex Index of the RQ that is being processed. * @param bufindex Index into the RQ that is being processed. * * @return Pointer to the sequence structure, or NULL otherwise. */ static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex) { sli4_queue_t *rq_hdr = &hw->rq[rqindex]; sli4_queue_t *rq_payload = &hw->rq[rqindex+1]; ocs_hw_sequence_t *seq = NULL; hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; #if defined(ENABLE_DEBUG_RQBUF) uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff); #endif if (bufindex >= rq_hdr->length) { ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n", rqindex, bufindex, rq_hdr->length, rq_hdr->id); return NULL; } sli_queue_lock(rq_hdr); sli_queue_lock(rq_payload); #if defined(ENABLE_DEBUG_RQBUF) /* Put a debug value into the rq, to track which entries are still valid */ _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value); _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value); #endif seq = rq->rq_tracker[bufindex]; rq->rq_tracker[bufindex] = NULL; if (seq == NULL ) { ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n", rqindex, bufindex, rq_hdr->index); } sli_queue_unlock(rq_payload); sli_queue_unlock(rq_hdr); return seq; } /** * @brief Posts an RQ buffer to a queue and update the verification structures * * @param hw hardware context * @param seq Pointer to sequence object. * * @return Returns 0 on success, or a non-zero value otherwise. */ static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex]; sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex]; uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex]; hw_rq_t *rq = hw->hw_rq[hw_rq_index]; uint32_t phys_hdr[2]; uint32_t phys_payload[2]; int32_t qindex_hdr; int32_t qindex_payload; /* Update the RQ verification lookup tables */ phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys); phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys); phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys); phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys); sli_queue_lock(rq_hdr); sli_queue_lock(rq_payload); /* * Note: The header must be posted last for buffer pair mode because * posting on the header queue posts the payload queue as well. * We do not ring the payload queue independently in RQ pair mode. */ qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload); qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr); if (qindex_hdr < 0 || qindex_payload < 0) { ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id); sli_queue_unlock(rq_payload); sli_queue_unlock(rq_hdr); return OCS_HW_RTN_ERROR; } /* ensure the indexes are the same */ ocs_hw_assert(qindex_hdr == qindex_payload); /* Update the lookup table */ if (rq->rq_tracker[qindex_hdr] == NULL) { rq->rq_tracker[qindex_hdr] = seq; } else { ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n", hw_rq_index, qindex_hdr); } sli_queue_unlock(rq_payload); sli_queue_unlock(rq_hdr); return OCS_HW_RTN_SUCCESS; } /** * @brief Return RQ buffers (while in RQ pair mode). * * @par Description * The header and payload buffers are returned to the Receive Queue. * * @param hw Hardware context. * @param seq Header/payload sequence buffers. * * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure. */ ocs_hw_rtn_e ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Check for auto xfer rdy dummy buffers and call the proper release function. */ if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) { return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq); } /* * Post the data buffer first. Because in RQ pair mode, ringing the * doorbell of the header ring will post the data buffer as well. */ if (ocs_hw_rqpair_put(hw, seq)) { ocs_log_err(hw->os, "error writing buffers\n"); return OCS_HW_RTN_ERROR; } return rc; } /** * @brief Find the RQ index of RQ_ID. * * @param hw Hardware context. * @param rq_id RQ ID to find. * * @return Returns the RQ index, or -1 if not found */ static inline int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id) { return ocs_hw_queue_hash_find(hw->rq_hash, rq_id); } /** * @ingroup devInitShutdown * @brief Allocate auto xfer rdy buffers. * * @par Description * Allocates the auto xfer rdy buffers and places them on the free list. * * @param hw Hardware context allocated by the caller. * @param num_buffers Number of buffers to allocate. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers) { ocs_hw_auto_xfer_rdy_buffer_t *buf; uint32_t i; hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE); if (hw->auto_xfer_rdy_buf_pool == NULL) { ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n"); return OCS_HW_RTN_NO_MEMORY; } for (i = 0; i < num_buffers; i++) { /* allocate the wrapper object */ buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i); ocs_hw_assert(buf != NULL); /* allocate the auto xfer ready buffer */ if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "DMA allocation failed\n"); ocs_free(hw->os, buf, sizeof(*buf)); return OCS_HW_RTN_NO_MEMORY; } /* build a fake data header in big endian */ buf->hdr.info = FC_RCTL_INFO_SOL_DATA; buf->hdr.r_ctl = FC_RCTL_FC4_DATA; buf->hdr.type = FC_TYPE_FCP; buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER | FC_FCTL_FIRST_SEQUENCE | FC_FCTL_LAST_SEQUENCE | FC_FCTL_END_SEQUENCE | FC_FCTL_SEQUENCE_INITIATIVE); /* build the fake header DMA object */ buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR; buf->header.dma.virt = &buf->hdr; buf->header.dma.alloc = buf; buf->header.dma.size = sizeof(buf->hdr); buf->header.dma.len = sizeof(buf->hdr); buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA; } return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX. * * @par Description * When new buffers are freed, check existing XRIs waiting for buffers. * * @param hw Hardware context allocated by the caller. */ static void ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw) { ocs_hw_io_t *io; int32_t rc; ocs_lock(&hw->io_lock); while (!ocs_list_empty(&hw->io_port_dnrx)) { io = ocs_list_remove_head(&hw->io_port_dnrx); rc = ocs_hw_reque_xri(hw, io); if(rc) { break; } } ocs_unlock(&hw->io_lock); } /** * @brief Called when the POST_SGL_PAGE command completes. * * @par Description * Free the mailbox command buffer. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { if (status != 0) { ocs_log_debug(hw->os, "Status 0x%x\n", status); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Prepares an XRI to move to the chip. * * @par Description * Puts the data SGL into the SGL list for the IO object and possibly registers * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are * mailbox commands, we don't need to wait for completion before preceding. * * @param hw Hardware context allocated by the caller. * @param io Pointer to the IO object. * * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure. */ ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io) { /* We only need to preregister the SGL if it has not yet been done. */ if (!sli_get_sgl_preregister(&hw->sli)) { uint8_t *post_sgl; ocs_dma_t *psgls = &io->def_sgl; ocs_dma_t **sgls = &psgls; /* non-local buffer required for mailbox queue */ post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (post_sgl == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE, io->indicator, 1, sgls, NULL, NULL)) { if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT, ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) { ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "SGL post failed\n"); return OCS_HW_RTN_ERROR; } } } ocs_lock(&hw->io_lock); if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */ ocs_unlock(&hw->io_lock); return OCS_HW_RTN_ERROR; } ocs_unlock(&hw->io_lock); return OCS_HW_RTN_SUCCESS; } /** * @brief Prepares an XRI to move back to the host. * * @par Description * Releases any attached buffer back to the pool. * * @param hw Hardware context allocated by the caller. * @param io Pointer to the IO object. */ void ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io) { if (io->axr_buf != NULL) { ocs_lock(&hw->io_lock); /* check list and remove if there */ if (ocs_list_on_list(&io->dnrx_link)) { ocs_list_remove(&hw->io_port_dnrx, io); io->auto_xfer_rdy_dnrx = 0; /* release the count for waiting for a buffer */ ocs_hw_io_free(hw, io); } ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf); io->axr_buf = NULL; ocs_unlock(&hw->io_lock); ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw); } return; } /** * @brief Posts an auto xfer rdy buffer to an IO. * * @par Description * Puts the data SGL into the SGL list for the IO object * @n @name * @b Note: io_lock must be held. * * @param hw Hardware context allocated by the caller. * @param io Pointer to the IO object. * * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs. */ uint8_t ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf) { ocs_hw_auto_xfer_rdy_buffer_t *buf; sli4_sge_t *data; if(!reuse_buf) { buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool); io->axr_buf = buf; } data = io->def_sgl.virt; data[0].sge_type = SLI4_SGE_TYPE_SKIP; data[0].last = 0; /* * Note: if we are doing DIF assists, then the SGE[1] must contain the * DI_SEED SGE. The host is responsible for programming: * SGE Type (Word 2, bits 30:27) * Replacement App Tag (Word 2 bits 15:0) * App Tag (Word 3 bits 15:0) * New Ref Tag (Word 3 bit 23) * Metadata Enable (Word 3 bit 20) * Auto-Increment RefTag (Word 3 bit 19) * Block Size (Word 3 bits 18:16) * The following fields are managed by the SLI Port: * Ref Tag Compare (Word 0) * Replacement Ref Tag (Word 1) - In not the LBA * NA (Word 2 bit 25) * Opcode RX (Word 3 bits 27:24) * Checksum Enable (Word 3 bit 22) * RefTag Enable (Word 3 bit 21) * * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared. */ if (hw->config.auto_xfer_rdy_p_type) { sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1]; diseed->sge_type = SLI4_SGE_TYPE_DISEED; diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value; diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value; diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid; diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */ diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip; } else { data[1].sge_type = SLI4_SGE_TYPE_SKIP; data[1].last = 0; } data[2].sge_type = SLI4_SGE_TYPE_DATA; data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys); data[2].buffer_address_low = ocs_addr32_lo(io->axr_buf->payload.dma.phys); data[2].buffer_length = io->axr_buf->payload.dma.size; data[2].last = TRUE; data[3].sge_type = SLI4_SGE_TYPE_SKIP; return 0; } /** * @brief Return auto xfer ready buffers (while in RQ pair mode). * * @par Description * The header and payload buffers are returned to the auto xfer rdy pool. * * @param hw Hardware context. * @param seq Header/payload sequence buffers. * * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure. */ static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc; buf->data_cqe = 0; buf->cmd_cqe = 0; buf->fcfi = 0; buf->call_axr_cmd = 0; buf->call_axr_data = 0; /* build a fake data header in big endian */ buf->hdr.info = FC_RCTL_INFO_SOL_DATA; buf->hdr.r_ctl = FC_RCTL_FC4_DATA; buf->hdr.type = FC_TYPE_FCP; buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER | FC_FCTL_FIRST_SEQUENCE | FC_FCTL_LAST_SEQUENCE | FC_FCTL_END_SEQUENCE | FC_FCTL_SEQUENCE_INITIATIVE); /* build the fake header DMA object */ buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR; buf->header.dma.virt = &buf->hdr; buf->header.dma.alloc = buf; buf->header.dma.size = sizeof(buf->hdr); buf->header.dma.len = sizeof(buf->hdr); buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA; ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw); return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Free auto xfer rdy buffers. * * @par Description * Frees the auto xfer rdy buffers. * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ static void ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw) { ocs_hw_auto_xfer_rdy_buffer_t *buf; uint32_t i; if (hw->auto_xfer_rdy_buf_pool != NULL) { ocs_lock(&hw->io_lock); for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) { buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i); if (buf != NULL) { ocs_dma_free(hw->os, &buf->payload.dma); } } ocs_unlock(&hw->io_lock); ocs_pool_free(hw->auto_xfer_rdy_buf_pool); hw->auto_xfer_rdy_buf_pool = NULL; } } /** * @ingroup devInitShutdown * @brief Configure the rq_pair function from ocs_hw_init(). * * @par Description * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature. * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rqpair_init(ocs_hw_t *hw) { ocs_hw_rtn_e rc; uint32_t xris_posted; ocs_log_debug(hw->os, "RQ Pair mode\n"); /* * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would * return with an error. So allocate the buffers based on the initial XRI pool required to support this * feature. */ if (sli_get_auto_xfer_rdy_capable(&hw->sli) && hw->config.auto_xfer_rdy_size > 0) { if (hw->auto_xfer_rdy_buf_pool == NULL) { /* * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have * one to post back for the case where the response phase is started in the context of * the data completion. */ rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1); if (rc != OCS_HW_RTN_SUCCESS) { return rc; } } else { ocs_pool_reset(hw->auto_xfer_rdy_buf_pool); } /* Post the auto XFR_RDY XRIs */ xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt); if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) { ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted); return OCS_HW_RTN_ERROR; } } return 0; } /** * @ingroup devInitShutdown * @brief Tear down the rq_pair function from ocs_hw_teardown(). * * @par Description * Frees the buffers to auto xfer rdy. * * @param hw Hardware context allocated by the caller. */ void ocs_hw_rqpair_teardown(ocs_hw_t *hw) { /* We need to free any auto xfer ready buffers */ ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw); } Index: head/sys/dev/ocs_fc/ocs_ioctl.c =================================================================== --- head/sys/dev/ocs_fc/ocs_ioctl.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_ioctl.c (revision 343349) @@ -1,1253 +1,1257 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "ocs.h" #include "ocs_utils.h" #include #include #include #include #include #include #include static d_open_t ocs_open; static d_close_t ocs_close; static d_ioctl_t ocs_ioctl; static struct cdevsw ocs_cdevsw = { .d_version = D_VERSION, .d_open = ocs_open, .d_close = ocs_close, .d_ioctl = ocs_ioctl, .d_name = "ocs_fc" }; int ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len, uint8_t *change_status); static int ocs_open(struct cdev *cdev, int flags, int fmt, struct thread *td) { #if 0 struct ocs_softc *ocs = cdev->si_drv1; device_printf(ocs->dev, "%s\n", __func__); #endif return 0; } static int ocs_close(struct cdev *cdev, int flag, int fmt, struct thread *td) { #if 0 struct ocs_softc *ocs = cdev->si_drv1; device_printf(ocs->dev, "%s\n", __func__); #endif return 0; } static int32_t __ocs_ioctl_mbox_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { struct ocs_softc *ocs = arg; /* wait for the ioctl to sleep before calling wakeup */ mtx_lock(&ocs->dbg_lock); mtx_unlock(&ocs->dbg_lock); wakeup(arg); return 0; } static int ocs_process_sli_config (ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd, ocs_dma_t *dma){ sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)mcmd->payload; if (sli_config->emb) { sli4_req_hdr_t *req = (sli4_req_hdr_t *)sli_config->payload.embed; switch (req->opcode) { case SLI4_OPC_COMMON_READ_OBJECT: if (mcmd->out_bytes) { sli4_req_common_read_object_t *rdobj = (sli4_req_common_read_object_t *)sli_config->payload.embed; if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) { device_printf(ocs->dev, "%s: COMMON_READ_OBJECT - %lld allocation failed\n", __func__, (unsigned long long)mcmd->out_bytes); return ENXIO; } memset(dma->virt, 0, mcmd->out_bytes); rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64; rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes; rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys); rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } break; case SLI4_OPC_COMMON_WRITE_OBJECT: { sli4_req_common_write_object_t *wrobj = (sli4_req_common_write_object_t *)sli_config->payload.embed; if (ocs_dma_alloc(ocs, dma, wrobj->desired_write_length, 4096)) { device_printf(ocs->dev, "%s: COMMON_WRITE_OBJECT - %d allocation failed\n", __func__, wrobj->desired_write_length); return ENXIO; } /* setup the descriptor */ wrobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64; wrobj->host_buffer_descriptor[0].buffer_length = wrobj->desired_write_length; wrobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys); wrobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys); /* copy the data into the DMA buffer */ copyin((void *)(uintptr_t)mcmd->in_addr, dma->virt, mcmd->in_bytes); } break; case SLI4_OPC_COMMON_DELETE_OBJECT: break; case SLI4_OPC_COMMON_READ_OBJECT_LIST: if (mcmd->out_bytes) { sli4_req_common_read_object_list_t *rdobj = (sli4_req_common_read_object_list_t *)sli_config->payload.embed; if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) { device_printf(ocs->dev, "%s: COMMON_READ_OBJECT_LIST - %lld allocation failed\n", __func__,(unsigned long long) mcmd->out_bytes); return ENXIO; } memset(dma->virt, 0, mcmd->out_bytes); rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64; rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes; rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys); rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } break; case SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA: break; default: device_printf(ocs->dev, "%s: in=%p (%lld) out=%p (%lld)\n", __func__, (void *)(uintptr_t)mcmd->in_addr, (unsigned long long)mcmd->in_bytes, (void *)(uintptr_t)mcmd->out_addr, (unsigned long long)mcmd->out_bytes); device_printf(ocs->dev, "%s: unknown (opc=%#x)\n", __func__, req->opcode); hexdump(mcmd, mcmd->size, NULL, 0); break; } } else { uint32_t max_bytes = max(mcmd->in_bytes, mcmd->out_bytes); if (ocs_dma_alloc(ocs, dma, max_bytes, 4096)) { device_printf(ocs->dev, "%s: non-embedded - %u allocation failed\n", __func__, max_bytes); return ENXIO; } copyin((void *)(uintptr_t)mcmd->in_addr, dma->virt, mcmd->in_bytes); sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys); sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys); sli_config->payload.mem.length = max_bytes; } return 0; } static int ocs_process_mbx_ioctl(ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd) { ocs_dma_t dma = { 0 }; if ((ELXU_BSD_MAGIC != mcmd->magic) || (sizeof(ocs_ioctl_elxu_mbox_t) != mcmd->size)) { device_printf(ocs->dev, "%s: malformed command m=%08x s=%08x\n", __func__, mcmd->magic, mcmd->size); return EINVAL; } switch(((sli4_mbox_command_header_t *)mcmd->payload)->command) { case SLI4_MBOX_COMMAND_SLI_CONFIG: if (ENXIO == ocs_process_sli_config(ocs, mcmd, &dma)) return ENXIO; break; case SLI4_MBOX_COMMAND_READ_REV: case SLI4_MBOX_COMMAND_READ_STATUS: case SLI4_MBOX_COMMAND_READ_LNK_STAT: break; default: device_printf(ocs->dev, "command %d\n",((sli4_mbox_command_header_t *)mcmd->payload)->command); device_printf(ocs->dev, "%s, command not support\n", __func__); goto no_support; break; } /* * The dbg_lock usage here insures the command completion code * (__ocs_ioctl_mbox_cb), which calls wakeup(), does not run until * after first calling msleep() * * 1. ioctl grabs dbg_lock * 2. ioctl issues command * if the command completes before msleep(), the * command completion code (__ocs_ioctl_mbox_cb) will spin * on dbg_lock before calling wakeup() * 3. ioctl calls msleep which releases dbg_lock before sleeping * and reacquires it before waking * 4. command completion handler acquires the dbg_lock, immediately * releases it, and calls wakeup * 5. msleep returns, re-acquiring the lock * 6. ioctl code releases the lock */ mtx_lock(&ocs->dbg_lock); - ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT, - __ocs_ioctl_mbox_cb, ocs); - msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0); + if (ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT, + __ocs_ioctl_mbox_cb, ocs)) { + + device_printf(ocs->dev, "%s: command- %x failed\n", __func__, + ((sli4_mbox_command_header_t *)mcmd->payload)->command); + } + msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0); mtx_unlock(&ocs->dbg_lock); if( SLI4_MBOX_COMMAND_SLI_CONFIG == ((sli4_mbox_command_header_t *)mcmd->payload)->command && mcmd->out_bytes && dma.virt) { copyout(dma.virt, (void *)(uintptr_t)mcmd->out_addr, mcmd->out_bytes); } no_support: ocs_dma_free(ocs, &dma); return 0; } /** * @brief perform requested Elx CoreDump helper function * * The Elx CoreDump facility used for BE3 diagnostics uses the OCS_IOCTL_CMD_ECD_HELPER * ioctl function to execute requested "help" functions * * @param ocs pointer to ocs structure * @param req pointer to helper function request * * @return returns 0 for success, a negative error code value for failure. */ static int ocs_process_ecd_helper (ocs_t *ocs, ocs_ioctl_ecd_helper_t *req) { int32_t rc = 0; uint8_t v8; uint16_t v16; uint32_t v32; /* Check the BAR read/write commands for valid bar */ switch(req->cmd) { case OCS_ECD_HELPER_BAR_READ8: case OCS_ECD_HELPER_BAR_READ16: case OCS_ECD_HELPER_BAR_READ32: case OCS_ECD_HELPER_BAR_WRITE8: case OCS_ECD_HELPER_BAR_WRITE16: case OCS_ECD_HELPER_BAR_WRITE32: if (req->bar >= PCI_MAX_BAR) { device_printf(ocs->dev, "Error: bar %d out of range\n", req->bar); return -EFAULT; } if (ocs->reg[req->bar].res == NULL) { device_printf(ocs->dev, "Error: bar %d not defined\n", req->bar); return -EFAULT; } break; default: break; } switch(req->cmd) { case OCS_ECD_HELPER_CFG_READ8: v8 = ocs_config_read8(ocs, req->offset); req->data = v8; break; case OCS_ECD_HELPER_CFG_READ16: v16 = ocs_config_read16(ocs, req->offset); req->data = v16; break; case OCS_ECD_HELPER_CFG_READ32: v32 = ocs_config_read32(ocs, req->offset); req->data = v32; break; case OCS_ECD_HELPER_CFG_WRITE8: ocs_config_write8(ocs, req->offset, req->data); break; case OCS_ECD_HELPER_CFG_WRITE16: ocs_config_write16(ocs, req->offset, req->data); break; case OCS_ECD_HELPER_CFG_WRITE32: ocs_config_write32(ocs, req->offset, req->data); break; case OCS_ECD_HELPER_BAR_READ8: req->data = ocs_reg_read8(ocs, req->bar, req->offset); break; case OCS_ECD_HELPER_BAR_READ16: req->data = ocs_reg_read16(ocs, req->bar, req->offset); break; case OCS_ECD_HELPER_BAR_READ32: req->data = ocs_reg_read32(ocs, req->bar, req->offset); break; case OCS_ECD_HELPER_BAR_WRITE8: ocs_reg_write8(ocs, req->bar, req->offset, req->data); break; case OCS_ECD_HELPER_BAR_WRITE16: ocs_reg_write16(ocs, req->bar, req->offset, req->data); break; case OCS_ECD_HELPER_BAR_WRITE32: ocs_reg_write32(ocs, req->bar, req->offset, req->data); break; default: device_printf(ocs->dev, "Invalid helper command=%d\n", req->cmd); break; } return rc; } static int ocs_ioctl(struct cdev *cdev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int status = 0; struct ocs_softc *ocs = cdev->si_drv1; device_t dev = ocs->dev; switch (cmd) { case OCS_IOCTL_CMD_ELXU_MBOX: { /* "copyin" done by kernel; thus, just dereference addr */ ocs_ioctl_elxu_mbox_t *mcmd = (void *)addr; status = ocs_process_mbx_ioctl(ocs, mcmd); break; } case OCS_IOCTL_CMD_ECD_HELPER: { /* "copyin" done by kernel; thus, just dereference addr */ ocs_ioctl_ecd_helper_t *req = (void *)addr; status = ocs_process_ecd_helper(ocs, req); break; } case OCS_IOCTL_CMD_VPORT: { int32_t rc = 0; ocs_ioctl_vport_t *req = (ocs_ioctl_vport_t*) addr; ocs_domain_t *domain; domain = ocs_domain_get_instance(ocs, req->domain_index); if (domain == NULL) { device_printf(ocs->dev, "domain [%d] nod found\n", req->domain_index); return -EFAULT; } if (req->req_create) { rc = ocs_sport_vport_new(domain, req->wwpn, req->wwnn, UINT32_MAX, req->enable_ini, req->enable_tgt, NULL, NULL, TRUE); } else { rc = ocs_sport_vport_del(ocs, domain, req->wwpn, req->wwnn); } return rc; } case OCS_IOCTL_CMD_GET_DDUMP: { ocs_ioctl_ddump_t *req = (ocs_ioctl_ddump_t*) addr; ocs_textbuf_t textbuf; int x; /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { device_printf(ocs->dev, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } switch (req->args.action) { case OCS_IOCTL_DDUMP_GET: case OCS_IOCTL_DDUMP_GET_SAVED: { uint32_t remaining; uint32_t written; uint32_t idx; int32_t n; ocs_textbuf_t *ptbuf = NULL; uint32_t flags = 0; if (req->args.action == OCS_IOCTL_DDUMP_GET_SAVED) { if (ocs_textbuf_initialized(&ocs->ddump_saved)) { ptbuf = &ocs->ddump_saved; } } else { if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } /* translate IOCTL ddump flags to ddump flags */ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_WQES) { flags |= OCS_DDUMP_FLAGS_WQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_CQES) { flags |= OCS_DDUMP_FLAGS_CQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_MQES) { flags |= OCS_DDUMP_FLAGS_MQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_RQES) { flags |= OCS_DDUMP_FLAGS_RQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_EQES) { flags |= OCS_DDUMP_FLAGS_EQES; } /* Try 3 times to get the dump */ for(x=0; x<3; x++) { if (ocs_ddump(ocs, &textbuf, flags, req->args.q_entries) != 0) { ocs_textbuf_reset(&textbuf); } else { /* Success */ x = 0; break; } } if (x != 0 ) { /* Retries failed */ ocs_log_test(ocs, "ocs_ddump failed\n"); } else { ptbuf = &textbuf; } } written = 0; if (ptbuf != NULL) { /* Process each textbuf segment */ remaining = req->user_buffer_len; for (idx = 0; remaining; idx++) { n = ocs_textbuf_ext_get_written(ptbuf, idx); if (n < 0) { break; } if ((uint32_t)n >= remaining) { n = (int32_t)remaining; } if (ocs_copy_to_user(req->user_buffer + written, ocs_textbuf_ext_get_buffer(ptbuf, idx), n)) { ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__); } written += n; remaining -= (uint32_t)n; } } req->bytes_written = written; if (ptbuf == &textbuf) { ocs_textbuf_free(ocs, &textbuf); } break; } case OCS_IOCTL_DDUMP_CLR_SAVED: ocs_clear_saved_ddump(ocs); break; default: ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); break; } break; } case OCS_IOCTL_CMD_DRIVER_INFO: { ocs_ioctl_driver_info_t *req = (ocs_ioctl_driver_info_t*)addr; ocs_memset(req, 0, sizeof(*req)); req->pci_vendor = ocs->pci_vendor; req->pci_device = ocs->pci_device; ocs_strncpy(req->businfo, ocs->businfo, sizeof(req->businfo)); req->sli_intf = ocs_config_read32(ocs, SLI4_INTF_REG); ocs_strncpy(req->desc, device_get_desc(dev), sizeof(req->desc)); ocs_strncpy(req->fw_rev, ocs->fwrev, sizeof(req->fw_rev)); if (ocs->domain && ocs->domain->sport) { *((uint64_t*)req->hw_addr.fc.wwnn) = ocs_htobe64(ocs->domain->sport->wwnn); *((uint64_t*)req->hw_addr.fc.wwpn) = ocs_htobe64(ocs->domain->sport->wwpn); } ocs_strncpy(req->serialnum, ocs->serialnum, sizeof(req->serialnum)); break; } case OCS_IOCTL_CMD_MGMT_LIST: { ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr; ocs_textbuf_t textbuf; /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } ocs_mgmt_get_list(ocs, &textbuf); if (ocs_textbuf_get_written(&textbuf)) { if (ocs_copy_to_user(req->user_buffer, ocs_textbuf_get_buffer(&textbuf), ocs_textbuf_get_written(&textbuf))) { ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__); } } req->bytes_written = ocs_textbuf_get_written(&textbuf); ocs_textbuf_free(ocs, &textbuf); break; } case OCS_IOCTL_CMD_MGMT_GET_ALL: { ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr; ocs_textbuf_t textbuf; int32_t n; uint32_t idx; uint32_t copied = 0; /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } ocs_mgmt_get_all(ocs, &textbuf); for (idx = 0; (n = ocs_textbuf_ext_get_written(&textbuf, idx)) > 0; idx++) { if(ocs_copy_to_user(req->user_buffer + copied, ocs_textbuf_ext_get_buffer(&textbuf, idx), ocs_textbuf_ext_get_written(&textbuf, idx))) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); } copied += n; } req->bytes_written = copied; ocs_textbuf_free(ocs, &textbuf); break; } case OCS_IOCTL_CMD_MGMT_GET: { ocs_ioctl_cmd_get_t* req = (ocs_ioctl_cmd_get_t*)addr; ocs_textbuf_t textbuf; char name[OCS_MGMT_MAX_NAME]; /* Copy the name value in from user space */ if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) { ocs_log_test(ocs, "ocs_copy_from_user failed\n"); ocs_ioctl_free(ocs, req, sizeof(ocs_ioctl_cmd_get_t)); return -EFAULT; } /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->value_length)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } ocs_mgmt_get(ocs, name, &textbuf); if (ocs_textbuf_get_written(&textbuf)) { if (ocs_copy_to_user(req->value, ocs_textbuf_get_buffer(&textbuf), ocs_textbuf_get_written(&textbuf))) { ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__); } } req->value_length = ocs_textbuf_get_written(&textbuf); ocs_textbuf_free(ocs, &textbuf); break; } case OCS_IOCTL_CMD_MGMT_SET: { char name[OCS_MGMT_MAX_NAME]; char value[OCS_MGMT_MAX_VALUE]; ocs_ioctl_cmd_set_t* req = (ocs_ioctl_cmd_set_t*)addr; // Copy the name in from user space if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) { ocs_log_test(ocs, "Error: copy from user failed\n"); ocs_ioctl_free(ocs, req, sizeof(*req)); return -EFAULT; } // Copy the value in from user space if (ocs_copy_from_user(value, req->value, OCS_MGMT_MAX_VALUE)) { ocs_log_test(ocs, "Error: copy from user failed\n"); ocs_ioctl_free(ocs, req, sizeof(*req)); return -EFAULT; } req->result = ocs_mgmt_set(ocs, req->name, req->value); break; } case OCS_IOCTL_CMD_MGMT_EXEC: { ocs_ioctl_action_t* req = (ocs_ioctl_action_t*) addr; char action_name[OCS_MGMT_MAX_NAME]; if (ocs_copy_from_user(action_name, req->name, sizeof(action_name))) { ocs_log_test(ocs, "Error: copy req.name from user failed\n"); ocs_ioctl_free(ocs, req, sizeof(*req)); return -EFAULT; } req->result = ocs_mgmt_exec(ocs, action_name, req->arg_in, req->arg_in_length, req->arg_out, req->arg_out_length); break; } default: ocs_log_test(ocs, "Error: unknown cmd %#lx\n", cmd); status = -ENOTTY; break; } return status; } static void ocs_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg) { ocs_mgmt_fw_write_result_t *result = arg; result->status = status; result->actual_xfer = actual_write_length; result->change_status = change_status; ocs_sem_v(&(result->semaphore)); } int ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len, uint8_t *change_status) { int rc = 0; uint32_t bytes_left; uint32_t xfer_size; uint32_t offset; ocs_dma_t dma; int last = 0; ocs_mgmt_fw_write_result_t result; ocs_sem_init(&(result.semaphore), 0, "fw_write"); bytes_left = buf_len; offset = 0; if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) { ocs_log_err(ocs, "ocs_firmware_write: malloc failed\n"); return -ENOMEM; } while (bytes_left > 0) { if (bytes_left > FW_WRITE_BUFSIZE) { xfer_size = FW_WRITE_BUFSIZE; } else { xfer_size = bytes_left; } ocs_memcpy(dma.virt, buf + offset, xfer_size); if (bytes_left == xfer_size) { last = 1; } ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset, last, ocs_fw_write_cb, &result); if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { rc = -ENXIO; break; } if (result.actual_xfer == 0 || result.status != 0) { rc = -EFAULT; break; } if (last) { *change_status = result.change_status; } bytes_left -= result.actual_xfer; offset += result.actual_xfer; } ocs_dma_free(ocs, &dma); return rc; } static int ocs_sys_fwupgrade(SYSCTL_HANDLER_ARGS) { char file_name[256] = {0}; char fw_change_status; uint32_t rc = 1; ocs_t *ocs = (ocs_t *)arg1; const struct firmware *fw; const struct ocs_hw_grp_hdr *fw_image; rc = sysctl_handle_string(oidp, file_name, sizeof(file_name), req); if (rc || !req->newptr) return rc; fw = firmware_get(file_name); if (fw == NULL) { device_printf(ocs->dev, "Unable to get Firmware. " "Make sure %s is copied to /boot/modules\n", file_name); return ENOENT; } fw_image = (const struct ocs_hw_grp_hdr *)fw->data; /* Check if firmware provided is compatible with this particular * Adapter of not*/ if ((ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G5) && (ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G6)) { device_printf(ocs->dev, "Invalid FW image found Magic: 0x%x Size: %zu \n", ocs_be32toh(fw_image->magic_number), fw->datasize); rc = -1; goto exit; } if (!strncmp(ocs->fw_version, fw_image->revision, strnlen(fw_image->revision, 16))) { device_printf(ocs->dev, "No update req. " "Firmware is already up to date. \n"); rc = 0; goto exit; } device_printf(ocs->dev, "Upgrading Firmware from %s to %s \n", ocs->fw_version, fw_image->revision); rc = ocs_firmware_write(ocs, fw->data, fw->datasize, &fw_change_status); if (rc) { ocs_log_err(ocs, "Firmware update failed with status = %d\n", rc); } else { ocs_log_info(ocs, "Firmware updated successfully\n"); switch (fw_change_status) { case 0x00: device_printf(ocs->dev, "No reset needed, new firmware is active.\n"); break; case 0x01: device_printf(ocs->dev, "A physical device reset (host reboot) is " "needed to activate the new firmware\n"); break; case 0x02: case 0x03: device_printf(ocs->dev, "firmware is resetting to activate the new " "firmware, Host reboot is needed \n"); break; default: ocs_log_warn(ocs, "Unexected value change_status: %d\n", fw_change_status); break; } } exit: /* Release Firmware*/ firmware_put(fw, FIRMWARE_UNLOAD); return rc; } static int ocs_sysctl_wwnn(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; char old[64]; char new[64]; uint64_t *wwnn = NULL; ocs_xport_t *xport = ocs->xport; if (xport->req_wwnn) { wwnn = &xport->req_wwnn; memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) *wwnn); } else { wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE); memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) ocs_htobe64(*wwnn)); } /*Read wwnn*/ if (!req->newptr) { return (sysctl_handle_string(oidp, old, sizeof(old), req)); } /*Configure port wwn*/ rc = sysctl_handle_string(oidp, new, sizeof(new), req); if (rc) return (rc); if (strncmp(old, new, strlen(old)) == 0) { return 0; } return (set_req_wwnn(ocs, NULL, new)); } static int ocs_sysctl_wwpn(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; char old[64]; char new[64]; uint64_t *wwpn = NULL; ocs_xport_t *xport = ocs->xport; if (xport->req_wwpn) { wwpn = &xport->req_wwpn; memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx",(unsigned long long) *wwpn); } else { wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT); memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx",(unsigned long long) ocs_htobe64(*wwpn)); } /*Read wwpn*/ if (!req->newptr) { return (sysctl_handle_string(oidp, old, sizeof(old), req)); } /*Configure port wwn*/ rc = sysctl_handle_string(oidp, new, sizeof(new), req); if (rc) return (rc); if (strncmp(old, new, strlen(old)) == 0) { return 0; } return (set_req_wwpn(ocs, NULL, new)); } static int ocs_sysctl_current_topology(SYSCTL_HANDLER_ARGS) { ocs_t *ocs = oidp->oid_arg1; uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_TOPOLOGY, &value); return (sysctl_handle_int(oidp, &value, 0, req)); } static int ocs_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { ocs_t *ocs = oidp->oid_arg1; uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &value); return (sysctl_handle_int(oidp, &value, 0, req)); } static int ocs_sysctl_config_topology(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; uint32_t old_value; uint32_t new_value; char buf[64]; ocs_hw_get(&ocs->hw, OCS_HW_CONFIG_TOPOLOGY, &old_value); /*Read topo*/ if (!req->newptr) { return (sysctl_handle_int(oidp, &old_value, 0, req)); } /*Configure port wwn*/ rc = sysctl_handle_int(oidp, &new_value, 0, req); if (rc) return (rc); if (new_value == old_value) { return 0; } snprintf(buf, sizeof(buf), "%d",new_value); rc = set_configured_topology(ocs, NULL, buf); return rc; } static int ocs_sysctl_config_speed(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; uint32_t old_value; uint32_t new_value; char buf[64]; ocs_hw_get(&ocs->hw, OCS_HW_LINK_CONFIG_SPEED, &old_value); /*Read topo*/ if (!req->newptr) { return (sysctl_handle_int(oidp, &old_value, 0, req)); } /*Configure port wwn*/ rc = sysctl_handle_int(oidp, &new_value, 0, req); if (rc) return (rc); if (new_value == old_value) { return 0; } snprintf(buf, sizeof(buf), "%d",new_value); rc = set_configured_speed(ocs, NULL,buf); return rc; } static int ocs_sysctl_fcid(SYSCTL_HANDLER_ARGS) { ocs_t *ocs = oidp->oid_arg1; char buf[64]; memset(buf, 0, sizeof(buf)); if (ocs->domain && ocs->domain->attached) { snprintf(buf, sizeof(buf), "0x%06x", ocs->domain->sport->fc_id); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int ocs_sysctl_port_state(SYSCTL_HANDLER_ARGS) { char new[256] = {0}; uint32_t rc = 1; ocs_xport_stats_t old; ocs_t *ocs = (ocs_t *)arg1; ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &old); /*Read port state */ if (!req->newptr) { snprintf(new, sizeof(new), "%s", (old.value == OCS_XPORT_PORT_OFFLINE) ? "offline" : "online"); return (sysctl_handle_string(oidp, new, sizeof(new), req)); } /*Configure port state*/ rc = sysctl_handle_string(oidp, new, sizeof(new), req); if (rc) return (rc); if (ocs_strcasecmp(new, "offline") == 0) { if (old.value == OCS_XPORT_PORT_OFFLINE) { return (0); } ocs_log_debug(ocs, "Setting port to %s\n", new); rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc != 0) { ocs_log_err(ocs, "Setting port to offline failed\n"); } } else if (ocs_strcasecmp(new, "online") == 0) { if (old.value == OCS_XPORT_PORT_ONLINE) { return (0); } ocs_log_debug(ocs, "Setting port to %s\n", new); rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc != 0) { ocs_log_err(ocs, "Setting port to online failed\n"); } } else { ocs_log_err(ocs, "Unsupported link state %s\n", new); rc = 1; } return (rc); } static int ocs_sysctl_vport_wwpn(SYSCTL_HANDLER_ARGS) { ocs_fcport *fcp = oidp->oid_arg1; char str_wwpn[64]; memset(str_wwpn, 0, sizeof(str_wwpn)); snprintf(str_wwpn, sizeof(str_wwpn), "0x%llx", (unsigned long long)fcp->vport->wwpn); return (sysctl_handle_string(oidp, str_wwpn, sizeof(str_wwpn), req)); } static int ocs_sysctl_vport_wwnn(SYSCTL_HANDLER_ARGS) { ocs_fcport *fcp = oidp->oid_arg1; char str_wwnn[64]; memset(str_wwnn, 0, sizeof(str_wwnn)); snprintf(str_wwnn, sizeof(str_wwnn), "0x%llx", (unsigned long long)fcp->vport->wwnn); return (sysctl_handle_string(oidp, str_wwnn, sizeof(str_wwnn), req)); } /** * @brief Initialize sysctl * * Initialize sysctl so elxsdkutil can query device information. * * @param ocs pointer to ocs * @return void */ static void ocs_sysctl_init(ocs_t *ocs) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(ocs->dev); struct sysctl_oid *tree = device_get_sysctl_tree(ocs->dev); struct sysctl_oid *vtree; const char *str = NULL; char name[16]; uint32_t rev, if_type, family, i; ocs_fcport *fcp = NULL; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "devid", CTLFLAG_RD, NULL, pci_get_devid(ocs->dev), "Device ID"); memset(ocs->modeldesc, 0, sizeof(ocs->modeldesc)); if (0 == pci_get_vpd_ident(ocs->dev, &str)) { snprintf(ocs->modeldesc, sizeof(ocs->modeldesc), "%s", str); } SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "modeldesc", CTLFLAG_RD, ocs->modeldesc, 0, "Model Description"); memset(ocs->serialnum, 0, sizeof(ocs->serialnum)); if (0 == pci_get_vpd_readonly(ocs->dev, "SN", &str)) { snprintf(ocs->serialnum, sizeof(ocs->serialnum), "%s", str); } SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sn", CTLFLAG_RD, ocs->serialnum, 0, "Serial Number"); ocs_hw_get(&ocs->hw, OCS_HW_SLI_REV, &rev); ocs_hw_get(&ocs->hw, OCS_HW_IF_TYPE, &if_type); ocs_hw_get(&ocs->hw, OCS_HW_SLI_FAMILY, &family); memset(ocs->fwrev, 0, sizeof(ocs->fwrev)); snprintf(ocs->fwrev, sizeof(ocs->fwrev), "%s, sli-%d:%d:%x", (char *)ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV), rev, if_type, family); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fwrev", CTLFLAG_RD, ocs->fwrev, 0, "Firmware Revision"); memset(ocs->sli_intf, 0, sizeof(ocs->sli_intf)); snprintf(ocs->sli_intf, sizeof(ocs->sli_intf), "%08x", ocs_config_read32(ocs, SLI4_INTF_REG)); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sli_intf", CTLFLAG_RD, ocs->sli_intf, 0, "SLI Interface"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fw_upgrade", CTLTYPE_STRING | CTLFLAG_RW, (void *)ocs, 0, ocs_sys_fwupgrade, "A", "Firmware grp file"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLTYPE_STRING | CTLFLAG_RW, ocs, 0, ocs_sysctl_wwnn, "A", "World Wide Node Name, wwnn should be in the format 0x"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLTYPE_STRING | CTLFLAG_RW, ocs, 0, ocs_sysctl_wwpn, "A", "World Wide Port Name, wwpn should be in the format 0x"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "current_topology", CTLTYPE_UINT | CTLFLAG_RD, ocs, 0, ocs_sysctl_current_topology, "IU", "Current Topology, 1-NPort; 2-Loop; 3-None"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "current_speed", CTLTYPE_UINT | CTLFLAG_RD, ocs, 0, ocs_sysctl_current_speed, "IU", "Current Speed"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "configured_topology", CTLTYPE_UINT | CTLFLAG_RW, ocs, 0, ocs_sysctl_config_topology, "IU", "Configured Topology, 0-Auto; 1-NPort; 2-Loop"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "configured_speed", CTLTYPE_UINT | CTLFLAG_RW, ocs, 0, ocs_sysctl_config_speed, "IU", "Configured Speed, 0-Auto, 2000, 4000, 8000, 16000, 32000"); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "businfo", CTLFLAG_RD, ocs->businfo, 0, "Bus Info"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fcid", CTLTYPE_STRING | CTLFLAG_RD, ocs, 0, ocs_sysctl_fcid, "A", "Port FC ID"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "port_state", CTLTYPE_STRING | CTLFLAG_RW, ocs, 0, ocs_sysctl_port_state, "A", "configured port state"); for (i = 0; i < ocs->num_vports; i++) { fcp = FCPORT(ocs, i+1); memset(name, 0, sizeof(name)); snprintf(name, sizeof(name), "vport%d", i); vtree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLFLAG_RW, 0, "Virtual port"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO, "wwnn", CTLTYPE_STRING | CTLFLAG_RW, fcp, 0, ocs_sysctl_vport_wwnn, "A", "World Wide Node Name"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO, "wwpn", CTLTYPE_STRING | CTLFLAG_RW, fcp, 0, ocs_sysctl_vport_wwpn, "A", "World Wide Port Name"); } } /** * @brief Initialize the debug module * * Parse device hints (similar to Linux module parameters) here. To use, * run the command * kenv hint.ocs.U.P=V * from the command line replacing U with the unit # (0,1,...), * P with the parameter name (debug_mask), and V with the value */ void ocs_debug_attach(void *os) { struct ocs_softc *ocs = os; int error = 0; char *resname = NULL; int32_t unit = INT32_MAX; uint32_t ocs_debug_mask = 0; resname = "debug_mask"; if (0 == (error = resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), resname, &ocs_debug_mask))) { device_printf(ocs->dev, "setting %s to %010x\n", resname, ocs_debug_mask); ocs_debug_enable(ocs_debug_mask); } unit = device_get_unit(ocs->dev); ocs->cdev = make_dev(&ocs_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "ocs%d", unit); if (ocs->cdev) { ocs->cdev->si_drv1 = ocs; } /* initialize sysctl interface */ ocs_sysctl_init(ocs); mtx_init(&ocs->dbg_lock, "ocs_dbg_lock", NULL, MTX_DEF); } /** * @brief Free the debug module */ void ocs_debug_detach(void *os) { struct ocs_softc *ocs = os; mtx_destroy(&ocs->dbg_lock); if (ocs->cdev) { ocs->cdev->si_drv1 = NULL; destroy_dev(ocs->cdev); } } Index: head/sys/dev/ocs_fc/ocs_mgmt.c =================================================================== --- head/sys/dev/ocs_fc/ocs_mgmt.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_mgmt.c (revision 343349) @@ -1,2924 +1,2929 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * The ocs_mgmt top level functions for Fibre Channel. */ /** * @defgroup mgmt Management Functions */ #include "ocs.h" #include "ocs_mgmt.h" #include "ocs_vpd.h" #define SFP_PAGE_SIZE 128 /* Executables*/ static int ocs_mgmt_firmware_write(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static int ocs_mgmt_firmware_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static int ocs_mgmt_function_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg); static int ocs_mgmt_force_assert(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); #if defined(OCS_INCLUDE_RAMD) static int32_t ocs_mgmt_read_phys(ocs_t *ocs, char *, void *, uint32_t , void *, uint32_t); #endif /* Getters */ static void get_nodes_count(ocs_t *, char *, ocs_textbuf_t*); static void get_desc(ocs_t *, char *, ocs_textbuf_t*); static void get_fw_rev(ocs_t *, char *, ocs_textbuf_t*); static void get_fw_rev2(ocs_t *, char *, ocs_textbuf_t*); static void get_ipl(ocs_t *, char *, ocs_textbuf_t*); static void get_wwnn(ocs_t *, char *, ocs_textbuf_t*); static void get_wwpn(ocs_t *, char *, ocs_textbuf_t*); static void get_fcid(ocs_t *, char *, ocs_textbuf_t *); static void get_sn(ocs_t *, char *, ocs_textbuf_t*); static void get_pn(ocs_t *, char *, ocs_textbuf_t*); static void get_sli4_intf_reg(ocs_t *, char *, ocs_textbuf_t*); static void get_phy_port_num(ocs_t *, char *, ocs_textbuf_t*); static void get_asic_id(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_vendor(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_device(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_subsystem_vendor(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_subsystem_device(ocs_t *, char *, ocs_textbuf_t*); static void get_businfo(ocs_t *, char *, ocs_textbuf_t*); static void get_sfp_a0(ocs_t *, char *, ocs_textbuf_t*); static void get_sfp_a2(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev1(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev2(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev3(ocs_t *, char *, ocs_textbuf_t*); static void get_debug_mq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_cq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_wq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_eq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_logmask(ocs_t*, char*, ocs_textbuf_t*); static void get_current_speed(ocs_t*, char*, ocs_textbuf_t*); static void get_current_topology(ocs_t*, char*, ocs_textbuf_t*); static void get_current_link_state(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_speed(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_topology(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_link_state(ocs_t*, char*, ocs_textbuf_t*); static void get_linkcfg(ocs_t*, char*, ocs_textbuf_t*); static void get_req_wwnn(ocs_t*, char*, ocs_textbuf_t*); static void get_req_wwpn(ocs_t*, char*, ocs_textbuf_t*); static void get_nodedb_mask(ocs_t*, char*, ocs_textbuf_t*); static void get_profile_list(ocs_t*, char*, ocs_textbuf_t*); static void get_active_profile(ocs_t*, char*, ocs_textbuf_t*); static void get_port_protocol(ocs_t*, char*, ocs_textbuf_t*); static void get_driver_version(ocs_t*, char*, ocs_textbuf_t*); static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); /* Setters */ static int set_debug_mq_dump(ocs_t*, char*, char*); static int set_debug_cq_dump(ocs_t*, char*, char*); static int set_debug_wq_dump(ocs_t*, char*, char*); static int set_debug_eq_dump(ocs_t*, char*, char*); static int set_logmask(ocs_t*, char*, char*); static int set_configured_link_state(ocs_t*, char*, char*); static int set_linkcfg(ocs_t*, char*, char*); static int set_nodedb_mask(ocs_t*, char*, char*); static int set_port_protocol(ocs_t*, char*, char*); static int set_active_profile(ocs_t*, char*, char*); static int set_tgt_rscn_delay(ocs_t*, char*, char*); static int set_tgt_rscn_period(ocs_t*, char*, char*); static int set_inject_drop_cmd(ocs_t*, char*, char*); static int set_inject_free_drop_cmd(ocs_t*, char*, char*); static int set_inject_drop_data(ocs_t*, char*, char*); static int set_inject_drop_resp(ocs_t*, char*, char*); static int set_cmd_err_inject(ocs_t*, char*, char*); static int set_cmd_delay_value(ocs_t*, char*, char*); static int set_nv_wwn(ocs_t*, char*, char*); static int set_loglevel(ocs_t*, char*, char*); static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg); #if defined(OCS_INCLUDE_RAMD) static void* find_address_in_target(ocs_ramdisc_t **ramdisc_array, uint32_t ramdisc_count, uintptr_t target_addr); #endif ocs_mgmt_table_entry_t mgmt_table[] = { {"nodes_count", get_nodes_count, NULL, NULL}, {"desc", get_desc, NULL, NULL}, {"fw_rev", get_fw_rev, NULL, NULL}, {"fw_rev2", get_fw_rev2, NULL, NULL}, {"ipl", get_ipl, NULL, NULL}, {"hw_rev1", get_hw_rev1, NULL, NULL}, {"hw_rev2", get_hw_rev2, NULL, NULL}, {"hw_rev3", get_hw_rev3, NULL, NULL}, {"wwnn", get_wwnn, NULL, NULL}, {"wwpn", get_wwpn, NULL, NULL}, {"fc_id", get_fcid, NULL, NULL}, {"sn", get_sn, NULL, NULL}, {"pn", get_pn, NULL, NULL}, {"sli4_intf_reg", get_sli4_intf_reg, NULL, NULL}, {"phy_port_num", get_phy_port_num, NULL, NULL}, {"asic_id_reg", get_asic_id, NULL, NULL}, {"pci_vendor", get_pci_vendor, NULL, NULL}, {"pci_device", get_pci_device, NULL, NULL}, {"pci_subsystem_vendor", get_pci_subsystem_vendor, NULL, NULL}, {"pci_subsystem_device", get_pci_subsystem_device, NULL, NULL}, {"businfo", get_businfo, NULL, NULL}, {"sfp_a0", get_sfp_a0, NULL, NULL}, {"sfp_a2", get_sfp_a2, NULL, NULL}, {"profile_list", get_profile_list, NULL, NULL}, {"driver_version", get_driver_version, NULL, NULL}, {"current_speed", get_current_speed, NULL, NULL}, {"current_topology", get_current_topology, NULL, NULL}, {"current_link_state", get_current_link_state, NULL, NULL}, {"chip_type", get_chip_type, NULL, NULL}, {"configured_speed", get_configured_speed, set_configured_speed, NULL}, {"configured_topology", get_configured_topology, set_configured_topology, NULL}, {"configured_link_state", get_configured_link_state, set_configured_link_state, NULL}, {"debug_mq_dump", get_debug_mq_dump, set_debug_mq_dump, NULL}, {"debug_cq_dump", get_debug_cq_dump, set_debug_cq_dump, NULL}, {"debug_wq_dump", get_debug_wq_dump, set_debug_wq_dump, NULL}, {"debug_eq_dump", get_debug_eq_dump, set_debug_eq_dump, NULL}, {"logmask", get_logmask, set_logmask, NULL}, {"loglevel", get_loglevel, set_loglevel, NULL}, {"linkcfg", get_linkcfg, set_linkcfg, NULL}, {"requested_wwnn", get_req_wwnn, set_req_wwnn, NULL}, {"requested_wwpn", get_req_wwpn, set_req_wwpn, NULL}, {"nodedb_mask", get_nodedb_mask, set_nodedb_mask, NULL}, {"port_protocol", get_port_protocol, set_port_protocol, NULL}, {"active_profile", get_active_profile, set_active_profile, NULL}, {"firmware_write", NULL, NULL, ocs_mgmt_firmware_write}, {"firmware_reset", NULL, NULL, ocs_mgmt_firmware_reset}, {"function_reset", NULL, NULL, ocs_mgmt_function_reset}, #if defined(OCS_INCLUDE_RAMD) {"read_phys", NULL, NULL, ocs_mgmt_read_phys}, #endif {"force_assert", NULL, NULL, ocs_mgmt_force_assert}, {"tgt_rscn_delay", get_tgt_rscn_delay, set_tgt_rscn_delay, NULL}, {"tgt_rscn_period", get_tgt_rscn_period, set_tgt_rscn_period, NULL}, {"inject_drop_cmd", get_inject_drop_cmd, set_inject_drop_cmd, NULL}, {"inject_free_drop_cmd", get_inject_free_drop_cmd, set_inject_free_drop_cmd, NULL}, {"inject_drop_data", get_inject_drop_data, set_inject_drop_data, NULL}, {"inject_drop_resp", get_inject_drop_resp, set_inject_drop_resp, NULL}, {"cmd_err_inject", get_cmd_err_inject, set_cmd_err_inject, NULL}, {"cmd_delay_value", get_cmd_delay_value, set_cmd_delay_value, NULL}, {"nv_wwpn", get_nv_wwpn, NULL, NULL}, {"nv_wwnn", get_nv_wwnn, NULL, NULL}, {"nv_wwn", NULL, set_nv_wwn, NULL}, {"node_abort_cnt", get_node_abort_cnt, NULL, NULL}, }; /** * @ingroup mgmt * @brief Get a list of options supported by the driver. * * @par Description * This is the top level "get list" handler for the driver. It * performs the following: * - Adds entries to the textbuf for any actions supported by this level in the driver. * - Calls a back-end function to add any actions supported by the back-end. * - Calls a function on each child (domain) to recursively add supported actions. * * @param ocs Pointer to the ocs structure. * @param textbuf Pointer to an ocs_textbuf, which is used to accumulate the results. * * @return Returns 0 on success, or a negative value on failure. */ void ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; uint32_t i; int access; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_list_handler)) { ocs->mgmt_functions->get_list_handler(textbuf, ocs); } if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_list_handler)) { ocs->tgt_mgmt_functions->get_list_handler(textbuf, &(ocs->tgt_ocs)); } /* Have each of my children add their actions */ if (ocs_device_lock_try(ocs) == TRUE) { /* If we get here then we are holding the device lock */ ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_list_handler)) { domain->mgmt_functions->get_list_handler(textbuf, domain); } } ocs_device_unlock(ocs); } ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); } /** * @ingroup mgmt * @brief Return the value of a management item. * * @par Description * This is the top level "get" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches a parameter that is known at this level, * writes the value into textbuf. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, * passes the request to each of the children (domains) to * have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be answered * when a response has been written into textbuf, indicated by textbuf->buffer_written * being non-zero. * * @param ocs Pointer to the ocs structure. * @param name Name of the status item to be retrieved. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return Returns 0 if the value was found and returned, or -1 if an error occurred. */ int ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; char qualifier[6]; int retval = -1; uint32_t i; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); snprintf(qualifier, sizeof(qualifier), "/ocs"); /* See if the name starts with my qualifier. If not then this request isn't for me */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) + 1; for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_handler)) { retval = ocs->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, ocs); } if (retval != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_handler)) { retval = ocs->tgt_mgmt_functions->get_handler(textbuf, qualifier, (char*)name, &(ocs->tgt_ocs)); } } if (retval != 0) { /* The driver didn't handle it, pass it to each domain */ ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_handler)) { retval = domain->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, domain); } if (retval == 0) { break; } } ocs_device_unlock(ocs); } } ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); return retval; } /** * @ingroup mgmt * @brief Set the value of a mgmt item. * * @par Description * This is the top level "set" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches a parameter that is known at this level, * calls the correct function to change the configuration. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, passes the request to each of the * children (domains) to have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be handled * if the function returns 0. * * @param ocs Pointer to the ocs structure. * @param name Name of the property to be changed. * @param value Requested new value of the property. * * @return Returns 0 if the configuration value was updated, or -1 otherwise. */ int ocs_mgmt_set(ocs_t *ocs, char *name, char *value) { ocs_domain_t *domain; int result = -1; char qualifier[80]; uint32_t i; snprintf(qualifier, sizeof(qualifier), "/ocs"); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can set */ for (i=0;imgmt_functions) && (ocs->mgmt_functions->set_handler)) { result = ocs->mgmt_functions->set_handler(qualifier, name, (char *)value, ocs); } if (result != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->set_handler)) { result = ocs->tgt_mgmt_functions->set_handler(qualifier, name, (char *)value, &(ocs->tgt_ocs)); } } /* If I didn't know how to set this config value pass the request to each of my children */ if (result != 0) { ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->set_handler)) { result = domain->mgmt_functions->set_handler(qualifier, name, (char*)value, domain); } if (result == 0) { break; } } ocs_device_unlock(ocs); } } return result; } /** * @ingroup mgmt * @brief Perform a management action. * * @par Description * This is the top level "exec" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches an action that is known at this level, * calls the correct function to perform the action. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, passes the request to each of the * children (domains) to have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be handled * if the function returns 0. * * @param ocs Pointer to the ocs structure. * @param action Name of the action to be performed. * @param arg_in Pointer to an argument being passed to the action. * @param arg_in_length Length of the argument pointed to by @c arg_in. * @param arg_out Pointer to an argument being passed to the action. * @param arg_out_length Length of the argument pointed to by @c arg_out. * * @return Returns 0 if the action was completed, or -1 otherwise. * * */ int ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length) { ocs_domain_t *domain; int result = -1; char qualifier[80]; uint32_t i; snprintf(qualifier, sizeof(qualifier), "/ocs"); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = action + strlen(qualifier) +1; /* See if it's an action I can perform */ for (i=0;imgmt_functions) && (ocs->mgmt_functions->exec_handler)) { result = ocs->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, ocs); } if (result != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->exec_handler)) { result = ocs->tgt_mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, &(ocs->tgt_ocs)); } } /* If I didn't know how to do this action pass the request to each of my children */ if (result != 0) { ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->exec_handler)) { result = domain->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, domain); } if (result == 0) { break; } } ocs_device_unlock(ocs); } } return result; } void ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; uint32_t i; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_all_handler)) { ocs->mgmt_functions->get_all_handler(textbuf, ocs); } if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_all_handler)) { ocs->tgt_mgmt_functions->get_all_handler(textbuf, &(ocs->tgt_ocs)); } ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_all_handler)) { domain->mgmt_functions->get_all_handler(textbuf, domain); } } ocs_device_unlock(ocs); ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); } #if defined(OCS_INCLUDE_RAMD) static int32_t ocs_mgmt_read_phys(ocs_t *ocs, char *name, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length) { uint32_t length; char addr_str[80]; uintptr_t target_addr; void* vaddr = NULL; ocs_ramdisc_t **ramdisc_array; uint32_t ramdisc_count; if ((arg_in == NULL) || (arg_in_length == 0) || (arg_out == NULL) || (arg_out_length == 0)) { return -1; } if (arg_in_length > 80) { arg_in_length = 80; } if (ocs_copy_from_user(addr_str, arg_in, arg_in_length)) { ocs_log_test(ocs, "Failed to copy addr from user\n"); return -EFAULT; } target_addr = (uintptr_t)ocs_strtoul(addr_str, NULL, 0); /* addr_str must be the physical address of a buffer that was reported * in an SGL. Search ramdiscs looking for a segment that contains that * physical address */ if (ocs->tgt_ocs.use_global_ramd) { /* Only one target */ ramdisc_count = ocs->tgt_ocs.rdisc_count; ramdisc_array = ocs->tgt_ocs.rdisc; vaddr = find_address_in_target(ramdisc_array, ramdisc_count, target_addr); } else { /* Multiple targets. Each target is on a sport */ uint32_t domain_idx; for (domain_idx=0; domain_idxdomain_instance_count; domain_idx++) { ocs_domain_t *domain; uint32_t sport_idx; domain = ocs_domain_get_instance(ocs, domain_idx); for (sport_idx=0; sport_idx < domain->sport_instance_count; sport_idx++) { ocs_sport_t *sport; sport = ocs_sport_get_instance(domain, sport_idx); ramdisc_count = sport->tgt_sport.rdisc_count; ramdisc_array = sport->tgt_sport.rdisc; vaddr = find_address_in_target(ramdisc_array, ramdisc_count, target_addr); if (vaddr != NULL) { break; } } } } length = arg_out_length; if (vaddr != NULL) { if (ocs_copy_to_user(arg_out, vaddr, length)) { ocs_log_test(ocs, "Failed to copy buffer to user\n"); return -EFAULT; } return 0; } else { return -EFAULT; } } /* * This function searches a target for a given physical address. * The target is made up of a number of LUNs, each represented by * a ocs_ramdisc_t. */ static void* find_address_in_target(ocs_ramdisc_t **ramdisc_array, uint32_t ramdisc_count, uintptr_t target_addr) { void *vaddr = NULL; uint32_t ramdisc_idx; /* Check each ramdisc */ for (ramdisc_idx=0; ramdisc_idxsegment_count; segment_idx++) { ramdisc_segment_t *segment = rdisc->segments[segment_idx]; uintptr_t segment_start; uintptr_t segment_end; uint32_t offset; segment_start = segment->data_segment.phys; segment_end = segment->data_segment.phys + segment->data_segment.size - 1; if ((target_addr >= segment_start) && (target_addr <= segment_end)) { /* Found the target address */ offset = target_addr - segment_start; vaddr = (uint32_t*)segment->data_segment.virt + offset; } if (rdisc->dif_separate) { segment_start = segment->dif_segment.phys; segment_end = segment->data_segment.phys + segment->dif_segment.size - 1; if ((target_addr >= segment_start) && (target_addr <= segment_end)) { /* Found the target address */ offset = target_addr - segment_start; vaddr = (uint32_t*)segment->dif_segment.virt + offset; } } if (vaddr != NULL) { break; } } if (vaddr != NULL) { break; } } return vaddr; } #endif static int32_t ocs_mgmt_firmware_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int rc = 0; int index = 0; uint8_t bus, dev, func; ocs_t *other_ocs; ocs_get_bus_dev_func(ocs, &bus, &dev, &func); ocs_log_debug(ocs, "Resetting port\n"); if (ocs_hw_reset(&ocs->hw, OCS_HW_RESET_FIRMWARE)) { ocs_log_test(ocs, "failed to reset port\n"); rc = -1; } else { ocs_log_debug(ocs, "successfully reset port\n"); /* now reset all functions on the same device */ while ((other_ocs = ocs_get_instance(index++)) != NULL) { uint8_t other_bus, other_dev, other_func; ocs_get_bus_dev_func(other_ocs, &other_bus, &other_dev, &other_func); if ((bus == other_bus) && (dev == other_dev)) { if (other_ocs->hw.state != OCS_HW_STATE_UNINITIALIZED) { other_ocs->hw.state = OCS_HW_STATE_QUEUES_ALLOCATED; } ocs_device_detach(other_ocs); if (ocs_device_attach(other_ocs)) { ocs_log_err(other_ocs, "device %d attach failed \n", index); rc = -1; } } } } return rc; } static int32_t ocs_mgmt_function_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int32_t rc; ocs_device_detach(ocs); rc = ocs_device_attach(ocs); return rc; } static int32_t ocs_mgmt_firmware_write(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int rc = 0; uint32_t bytes_left; uint32_t xfer_size; uint32_t offset; uint8_t *userp; ocs_dma_t dma; int last = 0; ocs_mgmt_fw_write_result_t result; uint32_t change_status = 0; char status_str[80]; ocs_sem_init(&(result.semaphore), 0, "fw_write"); bytes_left = buf_len; offset = 0; userp = (uint8_t *)buf; if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) { ocs_log_err(ocs, "ocs_mgmt_firmware_write: malloc failed"); return -ENOMEM; } while (bytes_left > 0) { if (bytes_left > FW_WRITE_BUFSIZE) { xfer_size = FW_WRITE_BUFSIZE; } else { xfer_size = bytes_left; } /* Copy xfer_size bytes from user space to kernel buffer */ if (ocs_copy_from_user(dma.virt, userp, xfer_size)) { rc = -EFAULT; break; } /* See if this is the last block */ if (bytes_left == xfer_size) { last = 1; } /* Send the HW command */ ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset, last, ocs_mgmt_fw_write_cb, &result); /* Wait for semaphore to be signaled when the command completes * TODO: Should there be a timeout on this? If so, how long? */ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { ocs_log_err(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } if (result.actual_xfer == 0) { ocs_log_test(ocs, "actual_write_length is %d\n", result.actual_xfer); rc = -EFAULT; break; } /* Check status */ if (result.status != 0) { ocs_log_test(ocs, "write returned status %d\n", result.status); rc = -EFAULT; break; } if (last) { change_status = result.change_status; } bytes_left -= result.actual_xfer; offset += result.actual_xfer; userp += result.actual_xfer; } /* Create string with status and copy to userland */ if ((arg_out_length > 0) && (arg_out != NULL)) { if (arg_out_length > sizeof(status_str)) { arg_out_length = sizeof(status_str); } ocs_memset(status_str, 0, sizeof(status_str)); ocs_snprintf(status_str, arg_out_length, "%d", change_status); if (ocs_copy_to_user(arg_out, status_str, arg_out_length)) { ocs_log_test(ocs, "copy to user failed for change_status\n"); } } ocs_dma_free(ocs, &dma); return rc; } static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg) { ocs_mgmt_fw_write_result_t *result = arg; result->status = status; result->actual_xfer = actual_write_length; result->change_status = change_status; ocs_sem_v(&(result->semaphore)); } typedef struct ocs_mgmt_sfp_result { ocs_sem_t semaphore; ocs_lock_t cb_lock; int32_t running; int32_t status; uint32_t bytes_read; uint32_t page_data[32]; } ocs_mgmt_sfp_result_t; static void ocs_mgmt_sfp_cb(void *os, int32_t status, uint32_t bytes_read, uint32_t *data, void *arg) { ocs_mgmt_sfp_result_t *result = arg; ocs_t *ocs = os; ocs_lock(&(result->cb_lock)); result->running++; if(result->running == 2) { /* get_sfp() has timed out */ ocs_unlock(&(result->cb_lock)); ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t)); return; } result->status = status; result->bytes_read = bytes_read; ocs_memcpy(&result->page_data, data, SFP_PAGE_SIZE); ocs_sem_v(&(result->semaphore)); ocs_unlock(&(result->cb_lock)); } static int32_t ocs_mgmt_get_sfp(ocs_t *ocs, uint16_t page, void *buf, uint32_t buf_len) { int rc = 0; ocs_mgmt_sfp_result_t *result = ocs_malloc(ocs, sizeof(ocs_mgmt_sfp_result_t), OCS_M_ZERO | OCS_M_NOWAIT);; ocs_sem_init(&(result->semaphore), 0, "get_sfp"); ocs_lock_init(ocs, &(result->cb_lock), "get_sfp"); /* Send the HW command */ ocs_hw_get_sfp(&ocs->hw, page, ocs_mgmt_sfp_cb, result); /* Wait for semaphore to be signaled when the command completes */ if (ocs_sem_p(&(result->semaphore), 5 * 1000 * 1000) != 0) { /* Timed out, callback will free memory */ ocs_lock(&(result->cb_lock)); result->running++; if(result->running == 1) { ocs_log_err(ocs, "ocs_sem_p failed\n"); ocs_unlock(&(result->cb_lock)); return (-ENXIO); } /* sfp_cb() has already executed, proceed as normal */ ocs_unlock(&(result->cb_lock)); } /* Check status */ if (result->status != 0) { ocs_log_test(ocs, "read_transceiver_data returned status %d\n", result->status); rc = -EFAULT; } if (rc == 0) { rc = (result->bytes_read > buf_len ? buf_len : result->bytes_read); /* Copy the results back to the supplied buffer */ ocs_memcpy(buf, result->page_data, rc); } ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t)); return rc; } static int32_t ocs_mgmt_force_assert(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { ocs_assert(FALSE, 0); } static void get_nodes_count(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "nodes_count", "%d", xport->nodes_count); } static void get_driver_version(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "driver_version", ocs->driver_version); } static void get_desc(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "desc", ocs->desc); } static void get_fw_rev(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV)); } static void get_fw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev2", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV2)); } static void get_ipl(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "ipl", ocs_hw_get_ptr(&ocs->hw, OCS_HW_IPL)); } static void get_hw_rev1(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV1, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev1", "%u", value); } static void get_hw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV2, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev2", "%u", value); } static void get_hw_rev3(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV3, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev3", "%u", value); } static void get_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint64_t *wwnn; wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%llx", (unsigned long long)ocs_htobe64(*wwnn)); } static void get_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint64_t *wwpn; wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%llx", (unsigned long long)ocs_htobe64(*wwpn)); } static void get_fcid(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { if (ocs->domain && ocs->domain->attached) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", ocs->domain->sport->fc_id); } else { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "UNKNOWN"); } } static void get_sn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *pserial; uint32_t len; char sn_buf[256]; pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_SERIALNUMBER); if (pserial) { len = *pserial ++; strncpy(sn_buf, (char*)pserial, len); sn_buf[len] = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sn", sn_buf); } } static void get_pn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *pserial; uint32_t len; char sn_buf[256]; pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PARTNUMBER); if (pserial) { len = *pserial ++; strncpy(sn_buf, (char*)pserial, len); sn_buf[len] = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", sn_buf); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", ocs->model); } } static void get_sli4_intf_reg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "sli4_intf_reg", "0x%04x", ocs_config_read32(ocs, SLI4_INTF_REG)); } static void get_phy_port_num(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char *phy_port = NULL; phy_port = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PORTNUM); if (phy_port) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", phy_port); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", "unknown"); } } static void get_asic_id(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "asic_id_reg", "0x%04x", ocs_config_read32(ocs, SLI4_ASIC_ID_REG)); } static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t family; uint32_t asic_id; uint32_t asic_gen_num; uint32_t asic_rev_num; uint32_t rev_id; char result_buf[80]; char tmp_buf[80]; family = (ocs_config_read32(ocs, SLI4_INTF_REG) & 0x00000f00) >> 8; asic_id = ocs_config_read32(ocs, SLI4_ASIC_ID_REG); asic_rev_num = asic_id & 0xff; asic_gen_num = (asic_id & 0xff00) >> 8; rev_id = ocs_config_read32(ocs, SLI4_PCI_CLASS_REVISION) & 0xff; switch(family) { case 0x00: /* BE2 */ ocs_strncpy(result_buf, "BE2 A", sizeof(result_buf)); ocs_snprintf(tmp_buf, 2, "%d", rev_id); strcat(result_buf, tmp_buf); break; case 0x01: /* BE3 */ ocs_strncpy(result_buf, "BE3", sizeof(result_buf)); if (rev_id >= 0x10) { strcat(result_buf, "-R"); } ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x02: /* Skyhawk A0 */ ocs_strncpy(result_buf, "Skyhawk A0", sizeof(result_buf)); break; case 0x0a: /* Lancer A0 */ ocs_strncpy(result_buf, "Lancer A", sizeof(result_buf)); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x0b: /* Lancer B0 or D0 */ ocs_strncpy(result_buf, "Lancer", sizeof(result_buf)); ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x0c: ocs_strncpy(result_buf, "Lancer G6", sizeof(result_buf)); break; case 0x0f: /* Refer to ASIC_ID */ switch(asic_gen_num) { case 0x00: ocs_strncpy(result_buf, "BE2", sizeof(result_buf)); break; case 0x03: ocs_strncpy(result_buf, "BE3-R", sizeof(result_buf)); break; case 0x04: ocs_strncpy(result_buf, "Skyhawk-R", sizeof(result_buf)); break; case 0x05: ocs_strncpy(result_buf, "Corsair", sizeof(result_buf)); break; case 0x0b: ocs_strncpy(result_buf, "Lancer", sizeof(result_buf)); break; case 0x0c: ocs_strncpy(result_buf, "LancerG6", sizeof(result_buf)); break; default: ocs_strncpy(result_buf, "Unknown", sizeof(result_buf)); } if (ocs_strcmp(result_buf, "Unknown") != 0) { ocs_snprintf(tmp_buf, 3, " %c", ((asic_rev_num & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", asic_rev_num & 0x0f); strcat(result_buf, tmp_buf); } break; default: ocs_strncpy(result_buf, "Unknown", sizeof(result_buf)); } ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "chip_type", result_buf); } static void get_pci_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_vendor", "0x%04x", ocs->pci_vendor); } static void get_pci_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_device", "0x%04x", ocs->pci_device); } static void get_pci_subsystem_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_vendor", "0x%04x", ocs->pci_subsystem_vendor); } static void get_pci_subsystem_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_device", "0x%04x", ocs->pci_subsystem_device); } static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_delay", "%ld", (unsigned long)ocs->tgt_rscn_delay_msec / 1000); } static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_period", "%ld", (unsigned long)ocs->tgt_rscn_period_msec / 1000); } static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_cmd", "%d", (ocs->err_injection == INJECT_DROP_CMD ? 1:0)); } static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_free_drop_cmd", "%d", (ocs->err_injection == INJECT_FREE_DROPPED ? 1:0)); } static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_data", "%d", (ocs->err_injection == INJECT_DROP_DATA ? 1:0)); } static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_resp", "%d", (ocs->err_injection == INJECT_DROP_RESP ? 1:0)); } static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_err_inject", "0x%02x", ocs->cmd_err_inject); } static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_delay_value", "%ld", (unsigned long)ocs->delay_value_msec); } static void get_businfo(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "businfo", ocs->businfo); } static void get_sfp_a0(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *page_data; char *buf; int i; int32_t bytes_read; page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (page_data == NULL) { return; } buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT); if (buf == NULL) { ocs_free(ocs, page_data, SFP_PAGE_SIZE); return; } bytes_read = ocs_mgmt_get_sfp(ocs, 0xa0, page_data, SFP_PAGE_SIZE); if (bytes_read <= 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", "(unknown)"); } else { char *d = buf; uint8_t *s = page_data; int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; for (i = 0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; buffer_remaining -= bytes_added; } *d = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", buf); } ocs_free(ocs, page_data, SFP_PAGE_SIZE); ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1); } static void get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *page_data; char *buf; int i; int32_t bytes_read; page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (page_data == NULL) { return; } buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT); if (buf == NULL) { ocs_free(ocs, page_data, SFP_PAGE_SIZE); return; } bytes_read = ocs_mgmt_get_sfp(ocs, 0xa2, page_data, SFP_PAGE_SIZE); if (bytes_read <= 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", "(unknown)"); } else { char *d = buf; uint8_t *s = page_data; int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; for (i=0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; buffer_remaining -= bytes_added; } *d = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", buf); } ocs_free(ocs, page_data, SFP_PAGE_SIZE); ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1); } static void get_debug_mq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_mq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)); } static void get_debug_cq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_cq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_CQ_DUMP)); } static void get_debug_wq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_wq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_WQ_DUMP)); } static void get_debug_eq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_eq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_EQ_DUMP)); } static void get_logmask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "logmask", "0x%02x", ocs->logmask); } static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "loglevel", "%d", loglevel); } static void get_current_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_speed", "%d", value); } static void get_configured_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_LINK_CONFIG_SPEED, &value); if (value == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_speed", "auto"); } else { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_speed", "%d", value); } } static void get_current_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_TOPOLOGY, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_topology", "%d", value); } static void get_configured_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_CONFIG_TOPOLOGY, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_topology", "%d", value); } static void get_current_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_stats_t value; if (ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value) == 0) { if (value.value == OCS_XPORT_PORT_ONLINE) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "online"); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "offline"); } } } static void get_configured_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_stats_t value; if (ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &value) == 0) { if (value.value == OCS_XPORT_PORT_ONLINE) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "online"); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "offline"); } } } /** * @brief HW link config enum to mgmt string value mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the mgmt string that is passed in by the mgmt application * (elxsdkutil). */ typedef struct ocs_mgmt_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; const char *mgmt_str; } ocs_mgmt_linkcfg_map_t; static ocs_mgmt_linkcfg_map_t mgmt_linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, OCS_CONFIG_LINKCFG_4X10G}, {OCS_HW_LINKCFG_1X40G, OCS_CONFIG_LINKCFG_1X40G}, {OCS_HW_LINKCFG_2X16G, OCS_CONFIG_LINKCFG_2X16G}, {OCS_HW_LINKCFG_4X8G, OCS_CONFIG_LINKCFG_4X8G}, {OCS_HW_LINKCFG_4X1G, OCS_CONFIG_LINKCFG_4X1G}, {OCS_HW_LINKCFG_2X10G, OCS_CONFIG_LINKCFG_2X10G}, {OCS_HW_LINKCFG_2X10G_2X8G, OCS_CONFIG_LINKCFG_2X10G_2X8G}}; /** * @brief Get the HW linkcfg enum from the mgmt config string. * * @param mgmt_str mgmt string value. * * @return Returns the HW linkcfg enum corresponding to clp_str. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_mgmt(const char *mgmt_str) { uint32_t i; for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) { if (ocs_strncmp(mgmt_linkcfg_map[i].mgmt_str, mgmt_str, ocs_strlen(mgmt_str)) == 0) { return mgmt_linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Get the mgmt string value from the HW linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the mgmt string value corresponding to the given HW linkcfg. */ static const char * ocs_mgmt_from_hw_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) { if (mgmt_linkcfg_map[i].linkcfg == linkcfg) { return mgmt_linkcfg_map[i].mgmt_str; } } return OCS_CONFIG_LINKCFG_UNKNOWN; } /** * @brief Link configuration callback argument */ typedef struct ocs_mgmt_linkcfg_arg_s { ocs_sem_t semaphore; int32_t status; ocs_hw_linkcfg_e linkcfg; } ocs_mgmt_linkcfg_arg_t; /** * @brief Get linkcfg config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_linkcfg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { const char *linkcfg_str = NULL; uint32_t value; ocs_hw_linkcfg_e linkcfg; ocs_hw_get(&ocs->hw, OCS_HW_LINKCFG, &value); linkcfg = (ocs_hw_linkcfg_e)value; linkcfg_str = ocs_mgmt_from_hw_linkcfg(linkcfg); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "linkcfg", linkcfg_str); } /** * @brief Get requested WWNN config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_req_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwnn", "0x%llx", (unsigned long long)xport->req_wwnn); } /** * @brief Get requested WWPN config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_req_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwpn", "0x%llx", (unsigned long long)xport->req_wwpn); } /** * @brief Get requested nodedb_mask config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_nodedb_mask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "nodedb_mask", "0x%08x", ocs->nodedb_mask); } /** * @brief Set requested WWNN value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ int set_req_wwnn(ocs_t *ocs, char *name, char *value) { int rc; uint64_t wwnn; if (ocs_strcasecmp(value, "default") == 0) { wwnn = 0; } else if (parse_wwn(value, &wwnn) != 0) { ocs_log_test(ocs, "Invalid WWNN: %s\n", value); return 1; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWNN_SET, wwnn); if(rc) { ocs_log_test(ocs, "OCS_XPORT_WWNN_SET failed: %d\n", rc); return rc; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_test(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_test(ocs, "port online failed : %d\n", rc); } return rc; } /** * @brief Set requested WWNP value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ int set_req_wwpn(ocs_t *ocs, char *name, char *value) { int rc; uint64_t wwpn; if (ocs_strcasecmp(value, "default") == 0) { wwpn = 0; } else if (parse_wwn(value, &wwpn) != 0) { ocs_log_test(ocs, "Invalid WWPN: %s\n", value); return 1; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWPN_SET, wwpn); if(rc) { ocs_log_test(ocs, "OCS_XPORT_WWPN_SET failed: %d\n", rc); return rc; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_test(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_test(ocs, "port online failed : %d\n", rc); } return rc; } /** * @brief Set node debug mask value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the nodedb_mask is set. * * @return Returns 0 on success. */ static int set_nodedb_mask(ocs_t *ocs, char *name, char *value) { ocs->nodedb_mask = ocs_strtoul(value, 0, 0); return 0; } /** * @brief Set linkcfg config value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ static int set_linkcfg(ocs_t *ocs, char *name, char *value) { ocs_hw_linkcfg_e linkcfg; ocs_mgmt_linkcfg_arg_t cb_arg; ocs_hw_rtn_e status; ocs_sem_init(&cb_arg.semaphore, 0, "mgmt_linkcfg"); /* translate mgmt linkcfg string to HW linkcfg enum */ linkcfg = ocs_hw_linkcfg_from_mgmt(value); /* set HW linkcfg */ status = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SET_LINK_CONFIG, (uintptr_t)linkcfg, ocs_mgmt_linkcfg_cb, &cb_arg); if (status) { ocs_log_test(ocs, "ocs_hw_set_linkcfg failed\n"); return -1; } if (ocs_sem_p(&cb_arg.semaphore, OCS_SEM_FOREVER)) { ocs_log_err(ocs, "ocs_sem_p failed\n"); return -1; } if (cb_arg.status) { ocs_log_test(ocs, "failed to set linkcfg from HW status=%d\n", cb_arg.status); return -1; } return 0; } /** * @brief Linkcfg callback * * @param status Result of the linkcfg get/set operation. * @param value Resulting linkcfg value. * @param arg Callback argument. * * @return None. */ static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg) { ocs_mgmt_linkcfg_arg_t *cb_arg = (ocs_mgmt_linkcfg_arg_t *)arg; cb_arg->status = status; cb_arg->linkcfg = (ocs_hw_linkcfg_e)value; ocs_sem_v(&cb_arg->semaphore); } static int set_debug_mq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_MQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_MQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_cq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_CQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_CQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_wq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_WQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_WQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_eq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_EQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_EQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_logmask(ocs_t *ocs, char *name, char *value) { ocs->logmask = ocs_strtoul(value, NULL, 0); return 0; } static int set_loglevel(ocs_t *ocs, char *name, char *value) { loglevel = ocs_strtoul(value, NULL, 0); return 0; } int set_configured_speed(ocs_t *ocs, char *name, char *value) { int result = 0; ocs_hw_rtn_e hw_rc; int xport_rc; uint32_t spd; spd = ocs_strtoul(value, NULL, 0); if ((spd != 0) && (spd != 2000) && (spd != 4000) && (spd != 8000) && (spd != 16000) && (spd != 32000)) { ocs_log_test(ocs, "unsupported speed %d\n", spd); return 1; } ocs_log_debug(ocs, "Taking port offline\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Port offline failed\n"); result = 1; } else { ocs_log_debug(ocs, "Setting port to speed %d\n", spd); hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, spd); if (hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(ocs, "Speed set failed\n"); result = 1; } /* If we failed to set the speed we still want to try to bring * the port back online */ ocs_log_debug(ocs, "Bringing port online\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { result = 1; } } return result; } int set_configured_topology(ocs_t *ocs, char *name, char *value) { int result = 0; ocs_hw_rtn_e hw_rc; int xport_rc; uint32_t topo; topo = ocs_strtoul(value, NULL, 0); if (topo >= OCS_HW_TOPOLOGY_NONE) { return 1; } ocs_log_debug(ocs, "Taking port offline\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Port offline failed\n"); result = 1; } else { ocs_log_debug(ocs, "Setting port to topology %d\n", topo); hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topo); if (hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(ocs, "Topology set failed\n"); result = 1; } /* If we failed to set the topology we still want to try to bring * the port back online */ ocs_log_debug(ocs, "Bringing port online\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { result = 1; } } return result; } static int set_configured_link_state(ocs_t *ocs, char *name, char *value) { int result = 0; int xport_rc; if (ocs_strcasecmp(value, "offline") == 0) { ocs_log_debug(ocs, "Setting port to %s\n", value); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Setting port to offline failed\n"); result = -1; } } else if (ocs_strcasecmp(value, "online") == 0) { ocs_log_debug(ocs, "Setting port to %s\n", value); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Setting port to online failed\n"); result = -1; } } else { ocs_log_test(ocs, "Unsupported link state \"%s\"\n", value); result = -1; } return result; } typedef struct ocs_mgmt_get_port_protocol_result { ocs_sem_t semaphore; int32_t status; ocs_hw_port_protocol_e port_protocol; } ocs_mgmt_get_port_protocol_result_t; static void ocs_mgmt_get_port_protocol_cb(int32_t status, ocs_hw_port_protocol_e port_protocol, void *arg) { ocs_mgmt_get_port_protocol_result_t *result = arg; result->status = status; result->port_protocol = port_protocol; ocs_sem_v(&(result->semaphore)); } static void get_port_protocol(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_get_port_protocol_result_t result; uint8_t bus; uint8_t dev; uint8_t func; ocs_sem_init(&(result.semaphore), 0, "get_port_protocol"); ocs_get_bus_dev_func(ocs, &bus, &dev, &func); if(ocs_hw_get_port_protocol(&ocs->hw, func, ocs_mgmt_get_port_protocol_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { switch (result.port_protocol) { case OCS_HW_PORT_PROTOCOL_ISCSI: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "iSCSI"); break; case OCS_HW_PORT_PROTOCOL_FCOE: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FCoE"); break; case OCS_HW_PORT_PROTOCOL_FC: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FC"); break; case OCS_HW_PORT_PROTOCOL_OTHER: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Other"); break; } } else { ocs_log_test(ocs, "getting port profile status 0x%x\n", result.status); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Unknown"); } } } typedef struct ocs_mgmt_set_port_protocol_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_port_protocol_result_t; static void ocs_mgmt_set_port_protocol_cb(int32_t status, void *arg) { ocs_mgmt_get_port_protocol_result_t *result = arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set port protocol * @par Description * This is a management action handler to set the current * port protocol. Input value should be one of iSCSI, * FC, or FCoE. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param value The value to be assigned * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_port_protocol(ocs_t *ocs, char *name, char *value) { ocs_mgmt_set_port_protocol_result_t result; int32_t rc = 0; ocs_hw_port_protocol_e new_protocol; uint8_t bus; uint8_t dev; uint8_t func; ocs_get_bus_dev_func(ocs, &bus, &dev, &func); ocs_sem_init(&(result.semaphore), 0, "set_port_protocol"); if (ocs_strcasecmp(value, "iscsi") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; } else if (ocs_strcasecmp(value, "fc") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_FC; } else if (ocs_strcasecmp(value, "fcoe") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_FCOE; } else { return -1; } rc = ocs_hw_set_port_protocol(&ocs->hw, new_protocol, func, ocs_mgmt_set_port_protocol_cb, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); - rc = -ENXIO; + return -ENXIO; } if (result.status == 0) { /* Success. */ rc = 0; } else { rc = -1; ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status); } } return rc; } typedef struct ocs_mgmt_get_profile_list_result_s { ocs_sem_t semaphore; int32_t status; ocs_hw_profile_list_t *list; } ocs_mgmt_get_profile_list_result_t; static void ocs_mgmt_get_profile_list_cb(int32_t status, ocs_hw_profile_list_t *list, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; result->list = list; ocs_sem_v(&(result->semaphore)); } /** * @brief Get list of profiles * @par Description * This is a management action handler to get the list of * profiles supported by the SLI port. Although the spec says * that all SLI platforms support this, only Skyhawk actually * has a useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_get_profile_list_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_profile_list"); if(ocs_hw_get_profile_list(&ocs->hw, ocs_mgmt_get_profile_list_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { /* Success. */ #define MAX_LINE_SIZE 520 #define BUFFER_SIZE MAX_LINE_SIZE*40 char *result_buf; char result_line[MAX_LINE_SIZE]; uint32_t bytes_left; uint32_t i; result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO); bytes_left = BUFFER_SIZE; for (i=0; inum_descriptors; i++) { sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id, result.list->descriptors[i].profile_description); if (strlen(result_line) < bytes_left) { strcat(result_buf, result_line); bytes_left -= strlen(result_line); } } ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "profile_list", result_buf); ocs_free(ocs, result_buf, BUFFER_SIZE); ocs_free(ocs, result.list, sizeof(ocs_hw_profile_list_t)); } else { ocs_log_test(ocs, "getting profile list status 0x%x\n", result.status); } } } typedef struct ocs_mgmt_get_active_profile_result { ocs_sem_t semaphore; int32_t status; uint32_t active_profile_id; } ocs_mgmt_get_active_profile_result_t; static void ocs_mgmt_get_active_profile_cb(int32_t status, uint32_t active_profile, void *ul_arg) { ocs_mgmt_get_active_profile_result_t *result = ul_arg; result->status = status; result->active_profile_id = active_profile; ocs_sem_v(&(result->semaphore)); } #define MAX_PROFILE_LENGTH 5 /** * @brief Get active profile * @par Description * This is a management action handler to get the currently * active profile for an SLI port. Although the spec says that * all SLI platforms support this, only Skyhawk actually has a * useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_active_profile(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[MAX_PROFILE_LENGTH]; ocs_mgmt_get_active_profile_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_active_profile"); if(ocs_hw_get_active_profile(&ocs->hw, ocs_mgmt_get_active_profile_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { /* Success. */ sprintf(result_string, "0x%02x", result.active_profile_id); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "active_profile", result_string); } else { ocs_log_test(ocs, "getting active profile status 0x%x\n", result.status); } } } typedef struct ocs_mgmt_set_active_profile_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_active_profile_result_t; static void ocs_mgmt_set_active_profile_cb(int32_t status, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set active profile * @par Description * This is a management action handler to set the currently * active profile for an SLI port. Although the spec says that * all SLI platforms support this, only Skyhawk actually has a * useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param value Requested new value of the property. * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_active_profile(ocs_t *ocs, char *name, char *value) { ocs_mgmt_set_active_profile_result_t result; int32_t rc = 0; int32_t new_profile; new_profile = ocs_strtoul(value, NULL, 0); ocs_sem_init(&(result.semaphore), 0, "set_active_profile"); rc = ocs_hw_set_active_profile(&ocs->hw, ocs_mgmt_set_active_profile_cb, new_profile, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); - rc = -ENXIO; + return -ENXIO; } if (result.status == 0) { /* Success. */ rc = 0; } else { rc = -1; ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status); } } return rc; } typedef struct ocs_mgmt_get_nvparms_result { ocs_sem_t semaphore; int32_t status; uint8_t wwpn[8]; uint8_t wwnn[8]; uint8_t hard_alpa; uint32_t preferred_d_id; } ocs_mgmt_get_nvparms_result_t; static void ocs_mgmt_get_nvparms_cb(int32_t status, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void *ul_arg) { ocs_mgmt_get_nvparms_result_t *result = ul_arg; result->status = status; ocs_memcpy(result->wwpn, wwpn, sizeof(result->wwpn)); ocs_memcpy(result->wwnn, wwnn, sizeof(result->wwnn)); result->hard_alpa = hard_alpa; result->preferred_d_id = preferred_d_id; ocs_sem_v(&(result->semaphore)); } /** * @brief Get wwpn * @par Description * * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[24]; ocs_mgmt_get_nvparms_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_nv_wwpn"); if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return; } if (result.status == 0) { /* Success. Copy wwpn from result struct to result string */ sprintf(result_string, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", result.wwpn[0], result.wwpn[1], result.wwpn[2], result.wwpn[3], result.wwpn[4], result.wwpn[5], result.wwpn[6], result.wwpn[7]); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwpn", result_string); } else { ocs_log_test(ocs, "getting wwpn status 0x%x\n", result.status); } } } /** * @brief Get wwnn * @par Description * * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[24]; ocs_mgmt_get_nvparms_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_nv_wwnn"); if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return; } if (result.status == 0) { /* Success. Copy wwnn from result struct to result string */ ocs_snprintf(result_string, sizeof(result_string), "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", result.wwnn[0], result.wwnn[1], result.wwnn[2], result.wwnn[3], result.wwnn[4], result.wwnn[5], result.wwnn[6], result.wwnn[7]); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwnn", result_string); } else { ocs_log_test(ocs, "getting wwnn status 0x%x\n", result.status); } } } /** * @brief Get accumulated node abort counts * @par Description Get the sum of all nodes abort count. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return None. */ static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t abort_counts = 0; ocs_domain_t *domain; ocs_sport_t *sport; ocs_node_t *node; if (ocs_device_lock_try(ocs) != TRUE) { /* Didn't get the lock */ return; } /* Here the Device lock is held */ ocs_list_foreach(&ocs->domain_list, domain) { if (ocs_domain_lock_try(domain) != TRUE) { /* Didn't get the lock */ ocs_device_unlock(ocs); return; } /* Here the Domain lock is held */ ocs_list_foreach(&domain->sport_list, sport) { if (ocs_sport_lock_try(sport) != TRUE) { /* Didn't get the lock */ ocs_domain_unlock(domain); ocs_device_unlock(ocs); return; } /* Here the sport lock is held */ ocs_list_foreach(&sport->node_list, node) { abort_counts += node->abort_cnt; } ocs_sport_unlock(sport); } ocs_domain_unlock(domain); } ocs_device_unlock(ocs); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "node_abort_cnt", "%d" , abort_counts); } typedef struct ocs_mgmt_set_nvparms_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_nvparms_result_t; static void ocs_mgmt_set_nvparms_cb(int32_t status, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set wwn * @par Description Sets the Non-volatile worldwide names, * if provided. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param wwn_p Requested new WWN values. * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_nv_wwn(ocs_t *ocs, char *name, char *wwn_p) { ocs_mgmt_get_nvparms_result_t result; uint8_t new_wwpn[8]; uint8_t new_wwnn[8]; char *wwpn_p = NULL; char *wwnn_p = NULL; int32_t rc = -1; - int wwpn; - int wwnn; + int wwpn = 0; + int wwnn = 0; int i; /* This is a read-modify-write operation, so first we have to read * the current values */ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn1"); rc = ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status != 0) { ocs_log_test(ocs, "getting nvparms status 0x%x\n", result.status); return -1; } } /* wwn_p contains wwpn_p@wwnn_p values */ if (wwn_p != NULL) { wwpn_p = ocs_strsep(&wwn_p, "@"); wwnn_p = wwn_p; } - wwpn = ocs_strcmp(wwpn_p, "NA"); - wwnn = ocs_strcmp(wwnn_p, "NA"); + if (wwpn_p != NULL) { + wwpn = ocs_strcmp(wwpn_p, "NA"); + } + + if (wwnn_p != NULL) { + wwnn = ocs_strcmp(wwnn_p, "NA"); + } /* Parse the new WWPN */ if ((wwpn_p != NULL) && (wwpn != 0)) { if (ocs_sscanf(wwpn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &(new_wwpn[0]), &(new_wwpn[1]), &(new_wwpn[2]), &(new_wwpn[3]), &(new_wwpn[4]), &(new_wwpn[5]), &(new_wwpn[6]), &(new_wwpn[7])) != 8) { ocs_log_test(ocs, "can't parse WWPN %s\n", wwpn_p); return -1; } } /* Parse the new WWNN */ if ((wwnn_p != NULL) && (wwnn != 0 )) { if (ocs_sscanf(wwnn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &(new_wwnn[0]), &(new_wwnn[1]), &(new_wwnn[2]), &(new_wwnn[3]), &(new_wwnn[4]), &(new_wwnn[5]), &(new_wwnn[6]), &(new_wwnn[7])) != 8) { ocs_log_test(ocs, "can't parse WWNN %s\n", wwnn_p); return -1; } } for (i = 0; i < 8; i++) { /* Use active wwpn, if new one is not provided */ if (wwpn == 0) { new_wwpn[i] = result.wwpn[i]; } /* Use active wwnn, if new one is not provided */ if (wwnn == 0) { new_wwnn[i] = result.wwnn[i]; } } /* Modify the nv_wwnn and nv_wwpn, then write it back */ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn2"); rc = ocs_hw_set_nvparms(&ocs->hw, ocs_mgmt_set_nvparms_cb, new_wwpn, new_wwnn, result.hard_alpa, result.preferred_d_id, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status != 0) { ocs_log_test(ocs, "setting wwn status 0x%x\n", result.status); return -1; } } return rc; } static int set_tgt_rscn_delay(ocs_t *ocs, char *name, char *value) { ocs->tgt_rscn_delay_msec = ocs_strtoul(value, NULL, 0) * 1000; ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_tgt_rscn_period(ocs_t *ocs, char *name, char *value) { ocs->tgt_rscn_period_msec = ocs_strtoul(value, NULL, 0) * 1000; ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_cmd(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_CMD); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_free_drop_cmd(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_FREE_DROPPED); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_data(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_DATA); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_resp(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_RESP); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_cmd_err_inject(ocs_t *ocs, char *name, char *value) { ocs->cmd_err_inject = ocs_strtoul(value, NULL, 0); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_cmd_delay_value(ocs_t *ocs, char *name, char *value) { ocs->delay_value_msec = ocs_strtoul(value, NULL, 0); ocs->err_injection = (ocs->delay_value_msec == 0 ? NO_ERR_INJECT : INJECT_DELAY_CMD); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } /** * @brief parse a WWN from a string into a 64-bit value * * Given a pointer to a string, parse the string into a 64-bit * WWN value. The format of the string must be xx:xx:xx:xx:xx:xx:xx:xx * * @param wwn_in pointer to the string to be parsed * @param wwn_out pointer to uint64_t in which to put the parsed result * * @return 0 if successful, non-zero if the WWN is malformed and couldn't be parsed */ int parse_wwn(char *wwn_in, uint64_t *wwn_out) { uint8_t byte0; uint8_t byte1; uint8_t byte2; uint8_t byte3; uint8_t byte4; uint8_t byte5; uint8_t byte6; uint8_t byte7; int rc; rc = ocs_sscanf(wwn_in, "0x%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx", &byte0, &byte1, &byte2, &byte3, &byte4, &byte5, &byte6, &byte7); if (rc == 8) { *wwn_out = ((uint64_t)byte0 << 56) | ((uint64_t)byte1 << 48) | ((uint64_t)byte2 << 40) | ((uint64_t)byte3 << 32) | ((uint64_t)byte4 << 24) | ((uint64_t)byte5 << 16) | ((uint64_t)byte6 << 8) | ((uint64_t)byte7); return 0; } else { return 1; } } static char *mode_string(int mode); /** * @ingroup mgmt * @brief Generate the beginning of a numbered section in a management XML document. * * @par Description * This function begins a section. The XML information is appended to * the textbuf. This form of the function is used for sections that might have * multiple instances, such as a node or a SLI Port (sport). The index number * is appended to the name. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * @param index Index number of this instance of the section. * * @return None. */ extern void ocs_mgmt_start_section(ocs_textbuf_t *textbuf, const char *name, int index) { ocs_textbuf_printf(textbuf, "<%s instance=\"%d\">\n", name, index); } /** * @ingroup mgmt * @brief Generate the beginning of an unnumbered section in a management XML document. * * @par Description * This function begins a section. The XML information is appended to * the textbuf. This form of the function is used for sections that have * a single instance only. Therefore, no index number is needed. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * * @return None. */ extern void ocs_mgmt_start_unnumbered_section(ocs_textbuf_t *textbuf, const char *name) { ocs_textbuf_printf(textbuf, "<%s>\n", name); } /** * @ingroup mgmt * @brief Generate the end of a section in a management XML document. * * @par Description * This function ends a section. The XML information is appended to * the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * * @return None. */ void ocs_mgmt_end_unnumbered_section(ocs_textbuf_t *textbuf, const char *name) { ocs_textbuf_printf(textbuf, "\n", name); } /** * @ingroup mgmt * @brief Generate the indexed end of a section in a management XML document. * * @par Description * This function ends a section. The XML information is appended to * the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * @param index Index number of this instance of the section. * * @return None. */ void ocs_mgmt_end_section(ocs_textbuf_t *textbuf, const char *name, int index) { ocs_textbuf_printf(textbuf, "\n", name); } /** * @ingroup mgmt * @brief Generate a property, with no value, in a management XML document. * * @par Description * This function generates a property name. The XML information is appended to * the textbuf. This form of the function is used by the list functions * when the property name only (and not the current value) is given. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * * @return None. */ void ocs_mgmt_emit_property_name(ocs_textbuf_t *textbuf, int mode, const char *name) { ocs_textbuf_printf(textbuf, "<%s mode=\"%s\"/>\n", name, mode_string(mode)); } /** * @ingroup mgmt * @brief Generate a property with a string value in a management XML document. * * @par Description * This function generates a property name and a string value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param value Value of the property. * * @return None. */ void ocs_mgmt_emit_string(ocs_textbuf_t *textbuf, int mode, const char *name, const char *value) { ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), value, name); } /** * @ingroup mgmt * @brief Generate a property with an integer value in a management XML document. * * @par Description * This function generates a property name and an integer value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param fmt A printf format for formatting the integer value. * * @return none */ void ocs_mgmt_emit_int(ocs_textbuf_t *textbuf, int mode, const char *name, const char *fmt, ...) { va_list ap; char valuebuf[64]; va_start(ap, fmt); ocs_vsnprintf(valuebuf, sizeof(valuebuf), fmt, ap); va_end(ap); ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), valuebuf, name); } /** * @ingroup mgmt * @brief Generate a property with a boolean value in a management XML document. * * @par Description * This function generates a property name and a boolean value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param value Boolean value to be added to the textbuf. * * @return None. */ void ocs_mgmt_emit_boolean(ocs_textbuf_t *textbuf, int mode, const char *name, int value) { char *valuebuf = value ? "true" : "false"; ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), valuebuf, name); } static char *mode_string(int mode) { static char mode_str[4]; mode_str[0] = '\0'; if (mode & MGMT_MODE_RD) { strcat(mode_str, "r"); } if (mode & MGMT_MODE_WR) { strcat(mode_str, "w"); } if (mode & MGMT_MODE_EX) { strcat(mode_str, "x"); } return mode_str; } Index: head/sys/dev/ocs_fc/ocs_node.c =================================================================== --- head/sys/dev/ocs_fc/ocs_node.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_node.c (revision 343349) @@ -1,2376 +1,2376 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * OCS driver remote node handler. This file contains code that is shared * between fabric (ocs_fabric.c) and device (ocs_device.c) nodes. */ /*! * @defgroup node_common Node common support * @defgroup node_alloc Node allocation */ #include "ocs.h" #include "ocs_els.h" #include "ocs_device.h" #define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]" #define SCSI_ITT_SIZE(ocs) ((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8) #define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag #define scsi_io_printf(io, fmt, ...) ocs_log_debug(io->ocs, "[%s]" SCSI_IOFMT fmt, \ io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__) void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *node); void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *node); int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *node); int ocs_mgmt_node_set(char *parent, char *name, char *value, void *node); int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length, void *node); static ocs_mgmt_functions_t node_mgmt_functions = { .get_list_handler = ocs_mgmt_node_list, .get_handler = ocs_mgmt_node_get, .get_all_handler = ocs_mgmt_node_get_all, .set_handler = ocs_mgmt_node_set, .exec_handler = ocs_mgmt_node_exec, }; /** * @ingroup node_common * @brief Device node state machine wait for all ELS's to * complete * * Abort all ELS's for given node. * * @param node node for which ELS's will be aborted */ void ocs_node_abort_all_els(ocs_node_t *node) { ocs_io_t *els; ocs_io_t *els_next; ocs_node_cb_t cbdata = {0}; ocs_node_hold_frames(node); ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) { ocs_log_debug(node->ocs, "[%s] initiate ELS abort %s\n", node->display_name, els->display_name); ocs_unlock(&node->active_ios_lock); cbdata.els = els; ocs_els_post_event(els, OCS_EVT_ABORT_ELS, &cbdata); ocs_lock(&node->active_ios_lock); } ocs_unlock(&node->active_ios_lock); } /** * @ingroup node_common * @brief Handle remote node events from HW * * Handle remote node events from HW. Essentially the HW event is translated into * a node state machine event that is posted to the affected node. * * @param arg pointer to ocs * @param event HW event to proceoss * @param data application specific data (pointer to the affected node) * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data) { ocs_t *ocs = arg; ocs_sm_event_t sm_event = OCS_EVT_LAST; ocs_remote_node_t *rnode = data; ocs_node_t *node = rnode->node; switch (event) { case OCS_HW_NODE_ATTACH_OK: sm_event = OCS_EVT_NODE_ATTACH_OK; break; case OCS_HW_NODE_ATTACH_FAIL: sm_event = OCS_EVT_NODE_ATTACH_FAIL; break; case OCS_HW_NODE_FREE_OK: sm_event = OCS_EVT_NODE_FREE_OK; break; case OCS_HW_NODE_FREE_FAIL: sm_event = OCS_EVT_NODE_FREE_FAIL; break; default: ocs_log_test(ocs, "unhandled event %#x\n", event); return -1; } /* If we're using HLM, forward the NODE_ATTACH_OK/FAIL event to all nodes in the node group */ if ((node->node_group != NULL) && ((sm_event == OCS_EVT_NODE_ATTACH_OK) || (sm_event == OCS_EVT_NODE_ATTACH_FAIL))) { ocs_node_t *n = NULL; uint8_t attach_ok = sm_event == OCS_EVT_NODE_ATTACH_OK; ocs_sport_lock(node->sport); { ocs_list_foreach(&node->sport->node_list, n) { if (node == n) { continue; } ocs_node_lock(n); if ((!n->rnode.attached) && (node->node_group == n->node_group)) { n->rnode.attached = attach_ok; node_printf(n, "rpi[%d] deferred HLM node attach %s posted\n", n->rnode.index, attach_ok ? "ok" : "fail"); ocs_node_post_event(n, sm_event, NULL); } ocs_node_unlock(n); } } ocs_sport_unlock(node->sport); } ocs_node_post_event(node, sm_event, NULL); return 0; } /** * @ingroup node_alloc * @brief Find an FC node structure given the FC port ID * * @param sport the SPORT to search * @param port_id FC port ID * * @return pointer to the object or NULL if not found */ ocs_node_t * ocs_node_find(ocs_sport_t *sport, uint32_t port_id) { ocs_node_t *node; ocs_assert(sport->lookup, NULL); ocs_sport_lock(sport); node = spv_get(sport->lookup, port_id); ocs_sport_unlock(sport); return node; } /** * @ingroup node_alloc * @brief Find an FC node structure given the WWPN * * @param sport the SPORT to search * @param wwpn the WWPN to search for (host endian) * * @return pointer to the object or NULL if not found */ ocs_node_t * ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn) { ocs_node_t *node = NULL;; ocs_assert(sport, NULL); ocs_sport_lock(sport); ocs_list_foreach(&sport->node_list, node) { if (ocs_node_get_wwpn(node) == wwpn) { ocs_sport_unlock(sport); return node; } } ocs_sport_unlock(sport); return NULL; } /** * @ingroup node_alloc * @brief allocate node object pool * * A pool of ocs_node_t objects is allocated. * * @param ocs pointer to driver instance context * @param node_count count of nodes to allocate * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_node_create_pool(ocs_t *ocs, uint32_t node_count) { ocs_xport_t *xport = ocs->xport; uint32_t i; ocs_node_t *node; uint32_t max_sge; uint32_t num_sgl; uint64_t max_xfer_size; int32_t rc; xport->nodes_count = node_count; xport->nodes = ocs_malloc(ocs, node_count * sizeof(ocs_node_t *), OCS_M_ZERO | OCS_M_NOWAIT); if (xport->nodes == NULL) { ocs_log_err(ocs, "node ptrs allocation failed"); return -1; } if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) && 0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) { - max_xfer_size = max_sge * num_sgl; + max_xfer_size = (max_sge * (uint64_t)num_sgl); } else { max_xfer_size = 65536; } if (max_xfer_size > 65536) max_xfer_size = 65536; ocs_list_init(&xport->nodes_free_list, ocs_node_t, link); for (i = 0; i < node_count; i ++) { node = ocs_malloc(ocs, sizeof(ocs_node_t), OCS_M_ZERO | OCS_M_NOWAIT); if (node == NULL) { ocs_log_err(ocs, "node allocation failed"); goto error; } /* Assign any persistent field values */ node->instance_index = i; node->max_wr_xfer_size = max_xfer_size; node->rnode.indicator = UINT32_MAX; rc = ocs_dma_alloc(ocs, &node->sparm_dma_buf, 256, 16); if (rc) { ocs_free(ocs, node, sizeof(ocs_node_t)); ocs_log_err(ocs, "ocs_dma_alloc failed: %d\n", rc); goto error; } xport->nodes[i] = node; ocs_list_add_tail(&xport->nodes_free_list, node); } return 0; error: ocs_node_free_pool(ocs); return -1; } /** * @ingroup node_alloc * @brief free node object pool * * The pool of previously allocated node objects is freed * * @param ocs pointer to driver instance context * * @return none */ void ocs_node_free_pool(ocs_t *ocs) { ocs_xport_t *xport = ocs->xport; ocs_node_t *node; uint32_t i; if (!xport->nodes) return; ocs_device_lock(ocs); for (i = 0; i < xport->nodes_count; i ++) { node = xport->nodes[i]; if (node) { /* free sparam_dma_buf */ ocs_dma_free(ocs, &node->sparm_dma_buf); ocs_free(ocs, node, sizeof(ocs_node_t)); } xport->nodes[i] = NULL; } ocs_free(ocs, xport->nodes, (xport->nodes_count * sizeof(ocs_node_t *))); ocs_device_unlock(ocs); } /** * @ingroup node_alloc * @brief return pointer to node object given instance index * * A pointer to the node object given by an instance index is returned. * * @param ocs pointer to driver instance context * @param index instance index * * @return returns pointer to node object, or NULL */ ocs_node_t * ocs_node_get_instance(ocs_t *ocs, uint32_t index) { ocs_xport_t *xport = ocs->xport; ocs_node_t *node = NULL; if (index >= (xport->nodes_count)) { ocs_log_test(ocs, "invalid index: %d\n", index); return NULL; } node = xport->nodes[index]; return node->attached ? node : NULL; } /** * @ingroup node_alloc * @brief Allocate an fc node structure and add to node list * * @param sport pointer to the SPORT from which this node is allocated * @param port_id FC port ID of new node * @param init Port is an inititiator (sent a plogi) * @param targ Port is potentially a target * * @return pointer to the object or NULL if none available */ ocs_node_t * ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ) { int32_t rc; ocs_node_t *node = NULL; uint32_t instance_index; uint32_t max_wr_xfer_size; ocs_t *ocs = sport->ocs; ocs_xport_t *xport = ocs->xport; ocs_dma_t sparm_dma_buf; ocs_assert(sport, NULL); if (sport->shutting_down) { ocs_log_debug(ocs, "node allocation when shutting down %06x", port_id); return NULL; } ocs_device_lock(ocs); node = ocs_list_remove_head(&xport->nodes_free_list); ocs_device_unlock(ocs); if (node == NULL) { ocs_log_err(ocs, "node allocation failed %06x", port_id); return NULL; } /* Save persistent values across memset zero */ instance_index = node->instance_index; max_wr_xfer_size = node->max_wr_xfer_size; sparm_dma_buf = node->sparm_dma_buf; ocs_memset(node, 0, sizeof(*node)); node->instance_index = instance_index; node->max_wr_xfer_size = max_wr_xfer_size; node->sparm_dma_buf = sparm_dma_buf; node->rnode.indicator = UINT32_MAX; node->sport = sport; ocs_sport_lock(sport); node->ocs = ocs; node->init = init; node->targ = targ; rc = ocs_hw_node_alloc(&ocs->hw, &node->rnode, port_id, sport); if (rc) { ocs_log_err(ocs, "ocs_hw_node_alloc failed: %d\n", rc); ocs_sport_unlock(sport); /* Return back to pool. */ ocs_device_lock(ocs); ocs_list_add_tail(&xport->nodes_free_list, node); ocs_device_unlock(ocs); return NULL; } ocs_list_add_tail(&sport->node_list, node); ocs_node_lock_init(node); ocs_lock_init(ocs, &node->pend_frames_lock, "pend_frames_lock[%d]", node->instance_index); ocs_list_init(&node->pend_frames, ocs_hw_sequence_t, link); ocs_lock_init(ocs, &node->active_ios_lock, "active_ios[%d]", node->instance_index); ocs_list_init(&node->active_ios, ocs_io_t, link); ocs_list_init(&node->els_io_pend_list, ocs_io_t, link); ocs_list_init(&node->els_io_active_list, ocs_io_t, link); ocs_scsi_io_alloc_enable(node); /* zero the service parameters */ ocs_memset(node->sparm_dma_buf.virt, 0, node->sparm_dma_buf.size); node->rnode.node = node; node->sm.app = node; node->evtdepth = 0; ocs_node_update_display_name(node); spv_set(sport->lookup, port_id, node); ocs_sport_unlock(sport); node->mgmt_functions = &node_mgmt_functions; return node; } /** * @ingroup node_alloc * @brief free a node structure * * The node structure given by 'node' is free'd * * @param node the node to free * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_node_free(ocs_node_t *node) { ocs_sport_t *sport; ocs_t *ocs; ocs_xport_t *xport; ocs_hw_rtn_e rc = 0; ocs_node_t *ns = NULL; int post_all_free = FALSE; ocs_assert(node, -1); ocs_assert(node->sport, -1); ocs_assert(node->ocs, -1); sport = node->sport; ocs_assert(sport, -1); ocs = node->ocs; ocs_assert(ocs->xport, -1); xport = ocs->xport; node_printf(node, "Free'd\n"); if(node->refound) { /* * Save the name server node. We will send fake RSCN event at * the end to handle ignored RSCN event during node deletion */ ns = ocs_node_find(node->sport, FC_ADDR_NAMESERVER); } /* Remove from node list */ ocs_sport_lock(sport); ocs_list_remove(&sport->node_list, node); /* Free HW resources */ if (OCS_HW_RTN_IS_ERROR((rc = ocs_hw_node_free_resources(&ocs->hw, &node->rnode)))) { ocs_log_test(ocs, "ocs_hw_node_free failed: %d\n", rc); rc = -1; } /* if the gidpt_delay_timer is still running, then delete it */ if (ocs_timer_pending(&node->gidpt_delay_timer)) { ocs_del_timer(&node->gidpt_delay_timer); } if (node->fcp2device) { ocs_del_crn(node); } /* remove entry from sparse vector list */ if (sport->lookup == NULL) { ocs_log_test(node->ocs, "assertion failed: sport lookup is NULL\n"); ocs_sport_unlock(sport); return -1; } spv_set(sport->lookup, node->rnode.fc_id, NULL); /* * If the node_list is empty, then post a ALL_CHILD_NODES_FREE event to the sport, * after the lock is released. The sport may be free'd as a result of the event. */ if (ocs_list_empty(&sport->node_list)) { post_all_free = TRUE; } ocs_sport_unlock(sport); if (post_all_free) { ocs_sm_post_event(&sport->sm, OCS_EVT_ALL_CHILD_NODES_FREE, NULL); } node->sport = NULL; node->sm.current_state = NULL; ocs_node_lock_free(node); ocs_lock_free(&node->pend_frames_lock); ocs_lock_free(&node->active_ios_lock); /* return to free list */ ocs_device_lock(ocs); ocs_list_add_tail(&xport->nodes_free_list, node); ocs_device_unlock(ocs); if(ns != NULL) { /* sending fake RSCN event to name server node */ ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, NULL); } return rc; } /** * @brief free memory resources of a node object * * The node object's child objects are freed after which the * node object is freed. * * @param node pointer to a node object * * @return none */ void ocs_node_force_free(ocs_node_t *node) { ocs_io_t *io; ocs_io_t *next; ocs_io_t *els; ocs_io_t *els_next; /* shutdown sm processing */ ocs_sm_disable(&node->sm); ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name)); ocs_strncpy(node->current_state_name, "disabled", sizeof(node->current_state_name)); /* Let the backend cleanup if needed */ ocs_scsi_notify_node_force_free(node); ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->active_ios, io, next) { ocs_list_remove(&io->node->active_ios, io); ocs_io_free(node->ocs, io); } ocs_unlock(&node->active_ios_lock); /* free all pending ELS IOs */ ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) { /* can't call ocs_els_io_free() because lock is held; cleanup manually */ ocs_list_remove(&node->els_io_pend_list, els); ocs_io_free(node->ocs, els); } ocs_unlock(&node->active_ios_lock); /* free all active ELS IOs */ ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) { /* can't call ocs_els_io_free() because lock is held; cleanup manually */ ocs_list_remove(&node->els_io_active_list, els); ocs_io_free(node->ocs, els); } ocs_unlock(&node->active_ios_lock); /* manually purge pending frames (if any) */ ocs_node_purge_pending(node); ocs_node_free(node); } /** * @ingroup node_common * @brief Perform HW call to attach a remote node * * @param node pointer to node object * * @return 0 on success, non-zero otherwise */ int32_t ocs_node_attach(ocs_node_t *node) { int32_t rc = 0; ocs_sport_t *sport = node->sport; ocs_domain_t *domain = sport->domain; ocs_t *ocs = node->ocs; if (!domain->attached) { ocs_log_test(ocs, "Warning: ocs_node_attach with unattached domain\n"); return -1; } /* Update node->wwpn/wwnn */ ocs_node_build_eui_name(node->wwpn, sizeof(node->wwpn), ocs_node_get_wwpn(node)); ocs_node_build_eui_name(node->wwnn, sizeof(node->wwnn), ocs_node_get_wwnn(node)); if (ocs->enable_hlm) { ocs_node_group_init(node); } ocs_dma_copy_in(&node->sparm_dma_buf, node->service_params+4, sizeof(node->service_params)-4); /* take lock to protect node->rnode.attached */ ocs_node_lock(node); rc = ocs_hw_node_attach(&ocs->hw, &node->rnode, &node->sparm_dma_buf); if (OCS_HW_RTN_IS_ERROR(rc)) { ocs_log_test(ocs, "ocs_hw_node_attach failed: %d\n", rc); } ocs_node_unlock(node); return rc; } /** * @ingroup node_common * @brief Generate text for a node's fc_id * * The text for a nodes fc_id is generated, either as a well known name, or a 6 digit * hex value. * * @param fc_id fc_id * @param buffer text buffer * @param buffer_length text buffer length in bytes * * @return none */ void ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length) { switch (fc_id) { case FC_ADDR_FABRIC: ocs_snprintf(buffer, buffer_length, "fabric"); break; case FC_ADDR_CONTROLLER: ocs_snprintf(buffer, buffer_length, "fabctl"); break; case FC_ADDR_NAMESERVER: ocs_snprintf(buffer, buffer_length, "nserve"); break; default: if (FC_ADDR_IS_DOMAIN_CTRL(fc_id)) { ocs_snprintf(buffer, buffer_length, "dctl%02x", FC_ADDR_GET_DOMAIN_CTRL(fc_id)); } else { ocs_snprintf(buffer, buffer_length, "%06x", fc_id); } break; } } /** * @brief update the node's display name * * The node's display name is updated, sometimes needed because the sport part * is updated after the node is allocated. * * @param node pointer to the node object * * @return none */ void ocs_node_update_display_name(ocs_node_t *node) { uint32_t port_id = node->rnode.fc_id; ocs_sport_t *sport = node->sport; char portid_display[16]; ocs_assert(sport); ocs_node_fcid_display(port_id, portid_display, sizeof(portid_display)); ocs_snprintf(node->display_name, sizeof(node->display_name), "%s.%s", sport->display_name, portid_display); } /** * @brief cleans up an XRI for the pending link services accept by aborting the * XRI if required. * *

Description

* This function is called when the LS accept is not sent. * * @param node Node for which should be cleaned up */ void ocs_node_send_ls_io_cleanup(ocs_node_t *node) { ocs_t *ocs = node->ocs; if (node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) { ocs_assert(node->ls_acc_io); ocs_log_debug(ocs, "[%s] cleaning up LS_ACC oxid=0x%x\n", node->display_name, node->ls_acc_oxid); node->ls_acc_io->hio = NULL; ocs_els_io_free(node->ls_acc_io); node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE; node->ls_acc_io = NULL; } } /** * @ingroup node_common * @brief state: shutdown a node * * A node is shutdown, * * @param ctx remote node sm context * @param evt event to process * @param arg per event optional argument * * @return returns NULL * * @note */ void * __ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { int32_t rc; std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: { ocs_node_hold_frames(node); ocs_assert(ocs_node_active_ios_empty(node), NULL); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL); /* by default, we will be freeing node after we unwind */ node->req_free = 1; switch (node->shutdown_reason) { case OCS_NODE_SHUTDOWN_IMPLICIT_LOGO: /* sm: if shutdown reason is implicit logout / ocs_node_attach * Node shutdown b/c of PLOGI received when node already * logged in. We have PLOGI service parameters, so submit * node attach; we won't be freeing this node */ /* currently, only case for implicit logo is PLOGI recvd. Thus, * node's ELS IO pending list won't be empty (PLOGI will be on it) */ ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL); node_printf(node, "Shutdown reason: implicit logout, re-authenticate\n"); ocs_scsi_io_alloc_enable(node); /* Re-attach node with the same HW node resources */ node->req_free = 0; rc = ocs_node_attach(node); ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); if (rc == OCS_HW_RTN_SUCCESS_SYNC) { ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); } break; case OCS_NODE_SHUTDOWN_EXPLICIT_LOGO: { int8_t pend_frames_empty; /* cleanup any pending LS_ACC ELSs */ ocs_node_send_ls_io_cleanup(node); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL); ocs_lock(&node->pend_frames_lock); pend_frames_empty = ocs_list_empty(&node->pend_frames); ocs_unlock(&node->pend_frames_lock); /* there are two scenarios where we want to keep this node alive: * 1. there are pending frames that need to be processed or * 2. we're an initiator and the remote node is a target and we * need to re-authenticate */ node_printf(node, "Shutdown: explicit logo pend=%d sport.ini=%d node.tgt=%d\n", !pend_frames_empty, node->sport->enable_ini, node->targ); if((!pend_frames_empty) || (node->sport->enable_ini && node->targ)) { uint8_t send_plogi = FALSE; if (node->sport->enable_ini && node->targ) { /* we're an initiator and node shutting down is a target; we'll * need to re-authenticate in initial state */ send_plogi = TRUE; } /* transition to __ocs_d_init (will retain HW node resources) */ ocs_scsi_io_alloc_enable(node); node->req_free = 0; /* either pending frames exist, or we're re-authenticating with PLOGI * (or both); in either case, return to initial state */ ocs_node_init_device(node, send_plogi); } /* else: let node shutdown occur */ break; } case OCS_NODE_SHUTDOWN_DEFAULT: default: /* shutdown due to link down, node going away (xport event) or * sport shutdown, purge pending and proceed to cleanup node */ /* cleanup any pending LS_ACC ELSs */ ocs_node_send_ls_io_cleanup(node); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL); node_printf(node, "Shutdown reason: default, purge pending\n"); ocs_node_purge_pending(node); break; } break; } case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } return NULL; } /** * @ingroup common_node * @brief Checks to see if ELS's have been quiesced * * Check if ELS's have been quiesced. If so, transition to the * next state in the shutdown process. * * @param node Node for which ELS's are checked * * @return Returns 1 if ELS's have been quiesced, 0 otherwise. */ static int ocs_node_check_els_quiesced(ocs_node_t *node) { ocs_assert(node, -1); /* check to see if ELS requests, completions are quiesced */ if ((node->els_req_cnt == 0) && (node->els_cmpl_cnt == 0) && ocs_els_io_list_empty(node, &node->els_io_active_list)) { if (!node->attached) { /* hw node detach already completed, proceed */ node_printf(node, "HW node not attached\n"); ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL); } else { /* hw node detach hasn't completed, transition and wait */ node_printf(node, "HW node still attached\n"); ocs_node_transition(node, __ocs_node_wait_node_free, NULL); } return 1; } return 0; } /** * @ingroup common_node * @brief Initiate node IO cleanup. * * Note: this function must be called with a non-attached node * or a node for which the node detach (ocs_hw_node_detach()) * has already been initiated. * * @param node Node for which shutdown is initiated * * @return Returns None. */ void ocs_node_initiate_cleanup(ocs_node_t *node) { ocs_io_t *els; ocs_io_t *els_next; ocs_t *ocs; ocs_assert(node); ocs = node->ocs; /* first cleanup ELS's that are pending (not yet active) */ ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) { /* skip the ELS IO for which a response will be sent after shutdown */ if ((node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) && (els == node->ls_acc_io)) { continue; } /* can't call ocs_els_io_free() because lock is held; cleanup manually */ node_printf(node, "Freeing pending els %s\n", els->display_name); ocs_list_remove(&node->els_io_pend_list, els); ocs_io_free(node->ocs, els); } ocs_unlock(&node->active_ios_lock); if (node->ls_acc_io && node->ls_acc_io->hio != NULL) { /* * if there's an IO that will result in an LS_ACC after * shutdown and its HW IO is non-NULL, it better be an * implicit logout in vanilla sequence coalescing. In this * case, force the LS_ACC to go out on another XRI (hio) * since the previous will have been aborted by the UNREG_RPI */ ocs_assert(node->shutdown_reason == OCS_NODE_SHUTDOWN_IMPLICIT_LOGO); ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI); node_printf(node, "invalidating ls_acc_io due to implicit logo\n"); /* No need to abort because the unreg_rpi takes care of it, just free */ ocs_hw_io_free(&ocs->hw, node->ls_acc_io->hio); /* NULL out hio to force the LS_ACC to grab a new XRI */ node->ls_acc_io->hio = NULL; } /* * if ELS's have already been quiesced, will move to next state * if ELS's have not been quiesced, abort them */ if (ocs_node_check_els_quiesced(node) == 0) { /* * Abort all ELS's since ELS's won't be aborted by HW * node free. */ ocs_node_abort_all_els(node); ocs_node_transition(node, __ocs_node_wait_els_shutdown, NULL); } } /** * @ingroup node_common * @brief Node state machine: Wait for all ELSs to complete. * *

Description

* State waits for all ELSs to complete after aborting all * outstanding . * * @param ctx Remote node state machine context. * @param evt Event to process. * @param arg Per event optional argument. * * @return Returns NULL. */ void * __ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { uint8_t check_quiesce = FALSE; std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: { ocs_node_hold_frames(node); if (ocs_els_io_list_empty(node, &node->els_io_active_list)) { node_printf(node, "All ELS IOs complete\n"); check_quiesce = TRUE; } break; } case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; case OCS_EVT_SRRS_ELS_REQ_OK: case OCS_EVT_SRRS_ELS_REQ_FAIL: case OCS_EVT_SRRS_ELS_REQ_RJT: case OCS_EVT_ELS_REQ_ABORTED: ocs_assert(node->els_req_cnt, NULL); node->els_req_cnt--; check_quiesce = TRUE; break; case OCS_EVT_SRRS_ELS_CMPL_OK: case OCS_EVT_SRRS_ELS_CMPL_FAIL: ocs_assert(node->els_cmpl_cnt, NULL); node->els_cmpl_cnt--; check_quiesce = TRUE; break; case OCS_EVT_ALL_CHILD_NODES_FREE: /* all ELS IO's complete */ node_printf(node, "All ELS IOs complete\n"); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL); check_quiesce = TRUE; break; case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: break; case OCS_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; /* ignore shutdown events as we're already in shutdown path */ case OCS_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; /* fall through */ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", ocs_sm_event_name(evt)); break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } if (check_quiesce) { ocs_node_check_els_quiesced(node); } return NULL; } /** * @ingroup node_command * @brief Node state machine: Wait for a HW node free event to * complete. * *

Description

* State waits for the node free event to be received from the HW. * * @param ctx Remote node state machine context. * @param evt Event to process. * @param arg Per event optional argument. * * @return Returns NULL. */ void * __ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: ocs_node_hold_frames(node); break; case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; case OCS_EVT_NODE_FREE_OK: /* node is officially no longer attached */ node->attached = FALSE; ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL); break; case OCS_EVT_ALL_CHILD_NODES_FREE: case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: /* As IOs and ELS IO's complete we expect to get these events */ break; case OCS_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; /* ignore shutdown events as we're already in shutdown path */ case OCS_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; /* Fall through */ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", ocs_sm_event_name(evt)); break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } return NULL; } /** * @ingroup node_common * @brief state: initiate node shutdown * * State is entered when a node receives a shutdown event, and it's waiting * for all the active IOs and ELS IOs associated with the node to complete. * * @param ctx remote node sm context * @param evt event to process * @param arg per event optional argument * * @return returns NULL */ void * __ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { ocs_io_t *io; ocs_io_t *next; std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: ocs_node_hold_frames(node); /* first check to see if no ELS IOs are outstanding */ if (ocs_els_io_list_empty(node, &node->els_io_active_list)) { /* If there are any active IOS, Free them. */ if (!ocs_node_active_ios_empty(node)) { ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->active_ios, io, next) { ocs_list_remove(&io->node->active_ios, io); ocs_io_free(node->ocs, io); } ocs_unlock(&node->active_ios_lock); } ocs_node_transition(node, __ocs_node_shutdown, NULL); } break; case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: case OCS_EVT_ALL_CHILD_NODES_FREE: { if (ocs_node_active_ios_empty(node) && ocs_els_io_list_empty(node, &node->els_io_active_list)) { ocs_node_transition(node, __ocs_node_shutdown, NULL); } break; } case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; case OCS_EVT_SRRS_ELS_REQ_FAIL: /* Can happen as ELS IO IO's complete */ ocs_assert(node->els_req_cnt, NULL); node->els_req_cnt--; break; /* ignore shutdown events as we're already in shutdown path */ case OCS_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; /* fall through */ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: ocs_log_debug(ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt)); break; case OCS_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } return NULL; } /** * @ingroup node_common * @brief state: common node event handler * * Handle common/shared node events * * @param funcname calling function's name * @param ctx remote node sm context * @param evt event to process * @param arg per event optional argument * * @return returns NULL */ void * __ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { ocs_node_t *node = NULL; ocs_t *ocs = NULL; ocs_node_cb_t *cbdata = arg; ocs_assert(ctx, NULL); ocs_assert(ctx->app, NULL); node = ctx->app; ocs_assert(node->ocs, NULL); ocs = node->ocs; switch(evt) { case OCS_EVT_ENTER: case OCS_EVT_REENTER: case OCS_EVT_EXIT: case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: case OCS_EVT_NODE_MISSING: case OCS_EVT_FCP_CMD_RCVD: break; case OCS_EVT_NODE_REFOUND: node->refound = 1; break; /* node->attached must be set appropriately for all node attach/detach events */ case OCS_EVT_NODE_ATTACH_OK: node->attached = TRUE; break; case OCS_EVT_NODE_FREE_OK: case OCS_EVT_NODE_ATTACH_FAIL: node->attached = FALSE; break; /* handle any ELS completions that other states either didn't care about * or forgot about */ case OCS_EVT_SRRS_ELS_CMPL_OK: case OCS_EVT_SRRS_ELS_CMPL_FAIL: ocs_assert(node->els_cmpl_cnt, NULL); node->els_cmpl_cnt--; break; /* handle any ELS request completions that other states either didn't care about * or forgot about */ case OCS_EVT_SRRS_ELS_REQ_OK: case OCS_EVT_SRRS_ELS_REQ_FAIL: case OCS_EVT_SRRS_ELS_REQ_RJT: case OCS_EVT_ELS_REQ_ABORTED: ocs_assert(node->els_req_cnt, NULL); node->els_req_cnt--; break; case OCS_EVT_ELS_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; /* Unsupported ELS was received, send LS_RJT, command not supported */ ocs_log_debug(ocs, "[%s] (%s) ELS x%02x, LS_RJT not supported\n", node->display_name, funcname, ((uint8_t*)cbdata->payload->dma.virt)[0]); ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_COMMAND_NOT_SUPPORTED, FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL); break; } case OCS_EVT_PLOGI_RCVD: case OCS_EVT_FLOGI_RCVD: case OCS_EVT_LOGO_RCVD: case OCS_EVT_PRLI_RCVD: case OCS_EVT_PRLO_RCVD: case OCS_EVT_PDISC_RCVD: case OCS_EVT_FDISC_RCVD: case OCS_EVT_ADISC_RCVD: case OCS_EVT_RSCN_RCVD: case OCS_EVT_SCR_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; /* sm: / send ELS_RJT */ ocs_log_debug(ocs, "[%s] (%s) %s sending ELS_RJT\n", node->display_name, funcname, ocs_sm_event_name(evt)); /* if we didn't catch this in a state, send generic LS_RJT */ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL); break; } case OCS_EVT_GID_PT_RCVD: case OCS_EVT_RFT_ID_RCVD: case OCS_EVT_RFF_ID_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; ocs_log_debug(ocs, "[%s] (%s) %s sending CT_REJECT\n", node->display_name, funcname, ocs_sm_event_name(evt)); ocs_send_ct_rsp(cbdata->io, hdr->ox_id, cbdata->payload->dma.virt, FCCT_HDR_CMDRSP_REJECT, FCCT_COMMAND_NOT_SUPPORTED, 0); break; } case OCS_EVT_ABTS_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; ocs_log_debug(ocs, "[%s] (%s) %s sending BA_ACC\n", node->display_name, funcname, ocs_sm_event_name(evt)); /* sm: send BA_ACC */ ocs_bls_send_acc_hdr(cbdata->io, hdr); break; } default: ocs_log_test(node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname, ocs_sm_event_name(evt)); break; } return NULL; } /** * @ingroup node_common * @brief save node service parameters * * Service parameters are copyed into the node structure * * @param node pointer to node structure * @param payload pointer to service parameters to save * * @return none */ void ocs_node_save_sparms(ocs_node_t *node, void *payload) { ocs_memcpy(node->service_params, payload, sizeof(node->service_params)); } /** * @ingroup node_common * @brief Post event to node state machine context * * This is used by the node state machine code to post events to the nodes. Upon * completion of the event posting, if the nesting depth is zero and we're not holding * inbound frames, then the pending frames are processed. * * @param node pointer to node * @param evt event to post * @param arg event posting argument * * @return none */ void ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg) { int free_node = FALSE; ocs_assert(node); ocs_node_lock(node); node->evtdepth ++; ocs_sm_post_event(&node->sm, evt, arg); /* If our event call depth is one and we're not holding frames * then we can dispatch any pending frames. We don't want to allow * the ocs_process_node_pending() call to recurse. */ if (!node->hold_frames && (node->evtdepth == 1)) { ocs_process_node_pending(node); } node->evtdepth --; /* Free the node object if so requested, and we're at an event * call depth of zero */ if ((node->evtdepth == 0) && node->req_free) { free_node = TRUE; } ocs_node_unlock(node); if (free_node) { ocs_node_free(node); } return; } /** * @ingroup node_common * @brief transition state of a node * * The node's state is transitioned to the requested state. Entry/Exit * events are posted as needed. * * @param node pointer to node * @param state state to transition to * @param data transition data * * @return none */ void ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data) { ocs_sm_ctx_t *ctx = &node->sm; ocs_node_lock(node); if (ctx->current_state == state) { ocs_node_post_event(node, OCS_EVT_REENTER, data); } else { ocs_node_post_event(node, OCS_EVT_EXIT, data); ctx->current_state = state; ocs_node_post_event(node, OCS_EVT_ENTER, data); } ocs_node_unlock(node); } /** * @ingroup node_common * @brief build EUI formatted WWN * * Build a WWN given the somewhat transport agnostic iScsi naming specification, for FC * use the eui. format, an ascii string such as: "eui.10000000C9A19501" * * @param buffer buffer to place formatted name into * @param buffer_len length in bytes of the buffer * @param eui_name cpu endian 64 bit WWN value * * @return none */ void ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name) { ocs_memset(buffer, 0, buffer_len); ocs_snprintf(buffer, buffer_len, "eui.%016llx", (unsigned long long)eui_name); } /** * @ingroup node_common * @brief return nodes' WWPN as a uint64_t * * The WWPN is computed from service parameters and returned as a uint64_t * * @param node pointer to node structure * * @return WWPN * */ uint64_t ocs_node_get_wwpn(ocs_node_t *node) { fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params; return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo))); } /** * @ingroup node_common * @brief return nodes' WWNN as a uint64_t * * The WWNN is computed from service parameters and returned as a uint64_t * * @param node pointer to node structure * * @return WWNN * */ uint64_t ocs_node_get_wwnn(ocs_node_t *node) { fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params; return (((uint64_t)ocs_be32toh(sp->node_name_hi) << 32ll) | (ocs_be32toh(sp->node_name_lo))); } /** * @brief Generate node ddump data * * Generates the node ddumpdata * * @param textbuf pointer to text buffer * @param node pointer to node context * * @return Returns 0 on success, or a negative value on failure. */ int ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node) { ocs_io_t *io; ocs_io_t *els; int retval = 0; ocs_ddump_section(textbuf, "node", node->instance_index); ocs_ddump_value(textbuf, "display_name", "%s", node->display_name); ocs_ddump_value(textbuf, "current_state", "%s", node->current_state_name); ocs_ddump_value(textbuf, "prev_state", "%s", node->prev_state_name); ocs_ddump_value(textbuf, "current_evt", "%s", ocs_sm_event_name(node->current_evt)); ocs_ddump_value(textbuf, "prev_evt", "%s", ocs_sm_event_name(node->prev_evt)); ocs_ddump_value(textbuf, "indicator", "%#x", node->rnode.indicator); ocs_ddump_value(textbuf, "fc_id", "%#06x", node->rnode.fc_id); ocs_ddump_value(textbuf, "attached", "%d", node->rnode.attached); ocs_ddump_value(textbuf, "hold_frames", "%d", node->hold_frames); ocs_ddump_value(textbuf, "io_alloc_enabled", "%d", node->io_alloc_enabled); ocs_ddump_value(textbuf, "shutdown_reason", "%d", node->shutdown_reason); ocs_ddump_value(textbuf, "send_ls_acc", "%d", node->send_ls_acc); ocs_ddump_value(textbuf, "ls_acc_did", "%d", node->ls_acc_did); ocs_ddump_value(textbuf, "ls_acc_oxid", "%#04x", node->ls_acc_oxid); ocs_ddump_value(textbuf, "req_free", "%d", node->req_free); ocs_ddump_value(textbuf, "els_req_cnt", "%d", node->els_req_cnt); ocs_ddump_value(textbuf, "els_cmpl_cnt", "%d", node->els_cmpl_cnt); ocs_ddump_value(textbuf, "targ", "%d", node->targ); ocs_ddump_value(textbuf, "init", "%d", node->init); ocs_ddump_value(textbuf, "wwnn", "%s", node->wwnn); ocs_ddump_value(textbuf, "wwpn", "%s", node->wwpn); ocs_ddump_value(textbuf, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0); ocs_ddump_value(textbuf, "chained_io_count", "%d", node->chained_io_count); ocs_ddump_value(textbuf, "abort_cnt", "%d", node->abort_cnt); ocs_display_sparams(NULL, "node_sparams", 1, textbuf, node->service_params+4); ocs_lock(&node->pend_frames_lock); if (!ocs_list_empty(&node->pend_frames)) { ocs_hw_sequence_t *frame; ocs_ddump_section(textbuf, "pending_frames", 0); ocs_list_foreach(&node->pend_frames, frame) { fc_header_t *hdr; char buf[128]; hdr = frame->header->dma.virt; ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id), frame->payload->dma.len); ocs_ddump_value(textbuf, "frame", "%s", buf); } ocs_ddump_endsection(textbuf, "pending_frames", 0); } ocs_unlock(&node->pend_frames_lock); ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node); ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node); ocs_lock(&node->active_ios_lock); ocs_ddump_section(textbuf, "active_ios", 0); ocs_list_foreach(&node->active_ios, io) { ocs_ddump_io(textbuf, io); } ocs_ddump_endsection(textbuf, "active_ios", 0); ocs_ddump_section(textbuf, "els_io_pend_list", 0); ocs_list_foreach(&node->els_io_pend_list, els) { ocs_ddump_els(textbuf, els); } ocs_ddump_endsection(textbuf, "els_io_pend_list", 0); ocs_ddump_section(textbuf, "els_io_active_list", 0); ocs_list_foreach(&node->els_io_active_list, els) { ocs_ddump_els(textbuf, els); } ocs_ddump_endsection(textbuf, "els_io_active_list", 0); ocs_unlock(&node->active_ios_lock); ocs_ddump_endsection(textbuf, "node", node->instance_index); return retval; } /** * @brief check ELS request completion * * Check ELS request completion event to make sure it's for the * ELS request we expect. If not, invoke given common event * handler and return an error. * * @param ctx state machine context * @param evt ELS request event * @param arg event argument * @param cmd ELS command expected * @param node_common_func common event handler to call if ELS * doesn't match * @param funcname function name that called this * * @return zero if ELS command matches, -1 otherwise */ int32_t node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname) { ocs_node_t *node = NULL; ocs_t *ocs = NULL; ocs_node_cb_t *cbdata = arg; fc_els_gen_t *els_gen = NULL; ocs_assert(ctx, -1); node = ctx->app; ocs_assert(node, -1); ocs = node->ocs; ocs_assert(ocs, -1); cbdata = arg; ocs_assert(cbdata, -1); ocs_assert(cbdata->els, -1); els_gen = (fc_els_gen_t *)cbdata->els->els_req.virt; ocs_assert(els_gen, -1); if ((cbdata->els->hio_type != OCS_HW_ELS_REQ) || (els_gen->command_code != cmd)) { if (cbdata->els->hio_type != OCS_HW_ELS_REQ) { ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received type=%d\n", node->display_name, funcname, cmd, cbdata->els->hio_type); } else { ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received cmd=x%x\n", node->display_name, funcname, cmd, els_gen->command_code); } /* send event to common handler */ node_common_func(funcname, ctx, evt, arg); return -1; } return 0; } /** * @brief check NS request completion * * Check ELS request completion event to make sure it's for the * nameserver request we expect. If not, invoke given common * event handler and return an error. * * @param ctx state machine context * @param evt ELS request event * @param arg event argument * @param cmd nameserver command expected * @param node_common_func common event handler to call if * nameserver cmd doesn't match * @param funcname function name that called this * * @return zero if NS command matches, -1 otherwise */ int32_t node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname) { ocs_node_t *node = NULL; ocs_t *ocs = NULL; ocs_node_cb_t *cbdata = arg; fcct_iu_header_t *fcct = NULL; ocs_assert(ctx, -1); node = ctx->app; ocs_assert(node, -1); ocs = node->ocs; ocs_assert(ocs, -1); cbdata = arg; ocs_assert(cbdata, -1); ocs_assert(cbdata->els, -1); fcct = (fcct_iu_header_t *)cbdata->els->els_req.virt; ocs_assert(fcct, -1); if ((cbdata->els->hio_type != OCS_HW_FC_CT) || fcct->cmd_rsp_code != ocs_htobe16(cmd)) { if (cbdata->els->hio_type != OCS_HW_FC_CT) { ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received type=%d\n", node->display_name, funcname, cmd, cbdata->els->hio_type); } else { ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received cmd=x%x\n", node->display_name, funcname, cmd, fcct->cmd_rsp_code); } /* send event to common handler */ node_common_func(funcname, ctx, evt, arg); return -1; } return 0; } void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; ocs_mgmt_start_section(textbuf, "node", node->instance_index); /* Readonly values */ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "hold_frames"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "shutting_down"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "req_free"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id_in_use"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "abort_cnt"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "targ"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "pend_frames"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "chained_io_count"); /* Actions */ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume"); ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->get_list_handler)) { io->mgmt_functions->get_list_handler(textbuf, io); } } ocs_unlock(&node->active_ios_lock); ocs_mgmt_end_section(textbuf, "node", node->instance_index); } int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; char qualifier[80]; int retval = -1; ocs_mgmt_start_section(textbuf, "node", node->instance_index); ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can supply */ if (ocs_strcmp(unqualified_name, "display_name") == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name); retval = 0; } else if (ocs_strcmp(unqualified_name, "indicator") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator); retval = 0; } else if (ocs_strcmp(unqualified_name, "fc_id") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id); retval = 0; } else if (ocs_strcmp(unqualified_name, "attached") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached); retval = 0; } else if (ocs_strcmp(unqualified_name, "hold_frames") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames); retval = 0; } else if (ocs_strcmp(unqualified_name, "io_alloc_enabled") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled); retval = 0; } else if (ocs_strcmp(unqualified_name, "req_free") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free); retval = 0; } else if (ocs_strcmp(unqualified_name, "ls_acc_oxid") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid); retval = 0; } else if (ocs_strcmp(unqualified_name, "ls_acc_did") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did); retval = 0; } else if (ocs_strcmp(unqualified_name, "abort_cnt") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt); retval = 0; } else if (ocs_strcmp(unqualified_name, "targ") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ", node->targ); retval = 0; } else if (ocs_strcmp(unqualified_name, "init") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init", node->init); retval = 0; } else if (ocs_strcmp(unqualified_name, "wwpn") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn); retval = 0; } else if (ocs_strcmp(unqualified_name, "wwnn") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn); retval = 0; } else if (ocs_strcmp(unqualified_name, "current_state") == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name); retval = 0; } else if (ocs_strcmp(unqualified_name, "login_state") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0); retval = 0; } else if (ocs_strcmp(unqualified_name, "pend_frames") == 0) { ocs_hw_sequence_t *frame; ocs_lock(&node->pend_frames_lock); ocs_list_foreach(&node->pend_frames, frame) { fc_header_t *hdr; char buf[128]; hdr = frame->header->dma.virt; ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id), frame->payload->dma.len); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf); } ocs_unlock(&node->pend_frames_lock); retval = 0; } else if (ocs_strcmp(unqualified_name, "chained_io_count") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count); retval = 0; } else { /* If I didn't know the value of this status pass the request to each of my children */ ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->get_handler)) { retval = io->mgmt_functions->get_handler(textbuf, qualifier, name, io); } if (retval == 0) { break; } } ocs_unlock(&node->active_ios_lock); } } ocs_mgmt_end_section(textbuf, "node", node->instance_index); return retval; } void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; ocs_hw_sequence_t *frame; ocs_mgmt_start_section(textbuf, "node", node->instance_index); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ", node->targ); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init", node->init); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn); ocs_lock(&node->pend_frames_lock); ocs_list_foreach(&node->pend_frames, frame) { fc_header_t *hdr; char buf[128]; hdr = frame->header->dma.virt; ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id), frame->payload->dma.len); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf); } ocs_unlock(&node->pend_frames_lock); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume"); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0); ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->get_all_handler)) { io->mgmt_functions->get_all_handler(textbuf,io); } } ocs_unlock(&node->active_ios_lock); ocs_mgmt_end_section(textbuf, "node", node->instance_index); } int ocs_mgmt_node_set(char *parent, char *name, char *value, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; char qualifier[80]; int retval = -1; ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->set_handler)) { retval = io->mgmt_functions->set_handler(qualifier, name, value, io); } if (retval == 0) { break; } } ocs_unlock(&node->active_ios_lock); } return retval; } int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; char qualifier[80]; int retval = -1; ocs_snprintf(qualifier, sizeof(qualifier), "%s.node%d", parent, node->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = action + strlen(qualifier) +1; if (ocs_strcmp(unqualified_name, "resume") == 0) { ocs_node_post_event(node, OCS_EVT_RESUME, NULL); } { /* If I didn't know how to do this action pass the request to each of my children */ ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->exec_handler)) { retval = io->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, io); } if (retval == 0) { break; } } ocs_unlock(&node->active_ios_lock); } } return retval; } /** * @brief Return TRUE if active ios list is empty * * Test if node->active_ios list is empty while holding the node->active_ios_lock. * * @param node pointer to node object * * @return TRUE if node active ios list is empty */ int ocs_node_active_ios_empty(ocs_node_t *node) { int empty; ocs_lock(&node->active_ios_lock); empty = ocs_list_empty(&node->active_ios); ocs_unlock(&node->active_ios_lock); return empty; } /** * @brief Pause a node * * The node is placed in the __ocs_node_paused state after saving the state * to return to * * @param node Pointer to node object * @param state State to resume to * * @return none */ void ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state) { node->nodedb_state = state; ocs_node_transition(node, __ocs_node_paused, NULL); } /** * @brief Paused node state * * This state is entered when a state is "paused". When resumed, the node * is transitioned to a previously saved state (node->ndoedb_state) * * @param ctx Remote node state machine context. * @param evt Event to process. * @param arg Per event optional argument. * * @return returns NULL */ void * __ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: node_printf(node, "Paused\n"); break; case OCS_EVT_RESUME: { ocs_sm_function_t pf = node->nodedb_state; node->nodedb_state = NULL; ocs_node_transition(node, pf, NULL); break; } case OCS_EVT_DOMAIN_ATTACH_OK: break; case OCS_EVT_SHUTDOWN: node->req_free = 1; break; default: __ocs_node_common(__func__, ctx, evt, arg); break; } return NULL; } /** * @brief Resume a paused state * * Posts a resume event to the paused node. * * @param node Pointer to node object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_node_resume(ocs_node_t *node) { ocs_assert(node != NULL, -1); ocs_node_post_event(node, OCS_EVT_RESUME, NULL); return 0; } /** * @ingroup node_common * @brief Dispatch a ELS frame. * *

Description

* An ELS frame is dispatched to the \c node state machine. * RQ Pair mode: this function is always called with a NULL hw * io. * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ int32_t ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq) { struct { uint32_t cmd; ocs_sm_event_t evt; uint32_t payload_size; } els_cmd_list[] = { {FC_ELS_CMD_PLOGI, OCS_EVT_PLOGI_RCVD, sizeof(fc_plogi_payload_t)}, {FC_ELS_CMD_FLOGI, OCS_EVT_FLOGI_RCVD, sizeof(fc_plogi_payload_t)}, {FC_ELS_CMD_LOGO, OCS_EVT_LOGO_RCVD, sizeof(fc_acc_payload_t)}, {FC_ELS_CMD_RRQ, OCS_EVT_RRQ_RCVD, sizeof(fc_acc_payload_t)}, {FC_ELS_CMD_PRLI, OCS_EVT_PRLI_RCVD, sizeof(fc_prli_payload_t)}, {FC_ELS_CMD_PRLO, OCS_EVT_PRLO_RCVD, sizeof(fc_prlo_payload_t)}, {FC_ELS_CMD_PDISC, OCS_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, {FC_ELS_CMD_FDISC, OCS_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, {FC_ELS_CMD_ADISC, OCS_EVT_ADISC_RCVD, sizeof(fc_adisc_payload_t)}, {FC_ELS_CMD_RSCN, OCS_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD}, {FC_ELS_CMD_SCR , OCS_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD}, }; ocs_t *ocs = node->ocs; ocs_node_cb_t cbdata; fc_header_t *hdr = seq->header->dma.virt; uint8_t *buf = seq->payload->dma.virt; ocs_sm_event_t evt = OCS_EVT_ELS_RCVD; uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD; uint32_t i; ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; /* find a matching event for the ELS command */ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i ++) { if (els_cmd_list[i].cmd == buf[0]) { evt = els_cmd_list[i].evt; payload_size = els_cmd_list[i].payload_size; break; } } switch(evt) { case OCS_EVT_FLOGI_RCVD: ocs_display_sparams(node->display_name, "flogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4); break; case OCS_EVT_FDISC_RCVD: ocs_display_sparams(node->display_name, "fdisc rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4); break; case OCS_EVT_PLOGI_RCVD: ocs_display_sparams(node->display_name, "plogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4); break; default: break; } cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER); if (cbdata.io != NULL) { cbdata.io->hw_priv = seq->hw_priv; /* if we're here, sequence initiative has been transferred */ cbdata.io->seq_init = 1; ocs_node_post_event(node, evt, &cbdata); } else { node_printf(node, "failure to allocate SCSI IO for ELS s_id %06x d_id %06x ox_id %04x rx_id %04x\n", fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id)); } ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Dispatch a ABTS frame (RQ Pair/sequence coalescing). * *

Description

* An ABTS frame is dispatched to the node state machine. This * function is used for both RQ Pair and sequence coalescing. * * @param node Node that originated the frame. * @param seq Header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ int32_t ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; ocs_xport_t *xport = ocs->xport; fc_header_t *hdr = seq->header->dma.virt; uint16_t ox_id = ocs_be16toh(hdr->ox_id); uint16_t rx_id = ocs_be16toh(hdr->rx_id); ocs_node_cb_t cbdata; int32_t rc = 0; node->abort_cnt++; /* * Check to see if the IO we want to abort is active, if it not active, * then we can send the BA_ACC using the send frame option */ if (ocs_io_find_tgt_io(ocs, node, ox_id, rx_id) == NULL) { uint32_t send_frame_capable; ocs_log_debug(ocs, "IO not found (ox_id %04x)\n", ox_id); /* If we have SEND_FRAME capability, then use it to send BA_ACC */ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable); if ((rc == 0) && send_frame_capable) { rc = ocs_sframe_send_bls_acc(node, seq); if (rc) { ocs_log_test(ocs, "ocs_bls_acc_send_frame failed\n"); } return rc; } /* continuing */ } ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; cbdata.io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER); if (cbdata.io != NULL) { cbdata.io->hw_priv = seq->hw_priv; /* If we got this far, SIT=1 */ cbdata.io->seq_init = 1; /* fill out generic fields */ cbdata.io->ocs = ocs; cbdata.io->node = node; cbdata.io->cmd_tgt = TRUE; ocs_node_post_event(node, OCS_EVT_ABTS_RCVD, &cbdata); } else { ocs_atomic_add_return(&xport->io_alloc_failed_count, 1); node_printf(node, "SCSI IO allocation failed for ABTS received s_id %06x d_id %06x ox_id %04x rx_id %04x\n", fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id)); } /* ABTS processed, return RX buffer to the chip */ ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Dispatch a CT frame. * *

Description

* A CT frame is dispatched to the \c node state machine. * RQ Pair mode: this function is always called with a NULL hw * io. * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ int32_t ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; fc_header_t *hdr = seq->header->dma.virt; fcct_iu_header_t *iu = seq->payload->dma.virt; ocs_sm_event_t evt = OCS_EVT_ELS_RCVD; uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD; uint16_t gscmd = ocs_be16toh(iu->cmd_rsp_code); ocs_node_cb_t cbdata; uint32_t i; struct { uint32_t cmd; ocs_sm_event_t evt; uint32_t payload_size; } ct_cmd_list[] = { {FC_GS_NAMESERVER_RFF_ID, OCS_EVT_RFF_ID_RCVD, 100}, {FC_GS_NAMESERVER_RFT_ID, OCS_EVT_RFT_ID_RCVD, 100}, {FC_GS_NAMESERVER_GNN_ID, OCS_EVT_GNN_ID_RCVD, 100}, {FC_GS_NAMESERVER_GPN_ID, OCS_EVT_GPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_GFPN_ID, OCS_EVT_GFPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_GFF_ID, OCS_EVT_GFF_ID_RCVD, 100}, {FC_GS_NAMESERVER_GID_FT, OCS_EVT_GID_FT_RCVD, 256}, {FC_GS_NAMESERVER_GID_PT, OCS_EVT_GID_PT_RCVD, 256}, {FC_GS_NAMESERVER_RPN_ID, OCS_EVT_RPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_RNN_ID, OCS_EVT_RNN_ID_RCVD, 100}, {FC_GS_NAMESERVER_RCS_ID, OCS_EVT_RCS_ID_RCVD, 100}, {FC_GS_NAMESERVER_RSNN_NN, OCS_EVT_RSNN_NN_RCVD, 100}, {FC_GS_NAMESERVER_RSPN_ID, OCS_EVT_RSPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_RHBA, OCS_EVT_RHBA_RCVD, 100}, {FC_GS_NAMESERVER_RPA, OCS_EVT_RPA_RCVD, 100}, }; ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; /* find a matching event for the ELS/GS command */ for (i = 0; i < ARRAY_SIZE(ct_cmd_list); i ++) { if (ct_cmd_list[i].cmd == gscmd) { evt = ct_cmd_list[i].evt; payload_size = ct_cmd_list[i].payload_size; break; } } /* Allocate an IO and send a reject */ cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER); if (cbdata.io == NULL) { node_printf(node, "GS IO failed for s_id %06x d_id %06x ox_id %04x rx_id %04x\n", fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id)); return -1; } cbdata.io->hw_priv = seq->hw_priv; ocs_node_post_event(node, evt, &cbdata); ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Dispatch a FCP command frame when the node is not ready. * *

Description

* A frame is dispatched to the \c node state machine. * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled. */ int32_t ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_node_cb_t cbdata; ocs_t *ocs = node->ocs; ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; ocs_node_post_event(node, OCS_EVT_FCP_CMD_RCVD, &cbdata); ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Stub handler for non-ABTS BLS frames * *

Description

* Log message and drop. Customer can plumb it to their back-end as needed * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 */ int32_t ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq) { fc_header_t *hdr = seq->header->dma.virt; node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n", ocs_htobe32(((uint32_t *)hdr)[0]), ocs_htobe32(((uint32_t *)hdr)[1]), ocs_htobe32(((uint32_t *)hdr)[2]), ocs_htobe32(((uint32_t *)hdr)[3]), ocs_htobe32(((uint32_t *)hdr)[4]), ocs_htobe32(((uint32_t *)hdr)[5])); return -1; } Index: head/sys/dev/ocs_fc/ocs_pci.c =================================================================== --- head/sys/dev/ocs_fc/ocs_pci.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_pci.c (revision 343349) @@ -1,963 +1,963 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved." /** * @file * Implementation of required FreeBSD PCI interface functions */ #include "ocs.h" #include "version.h" #include #include static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data"); #include #include #include /** * Tunable parameters for transport */ int logmask = 0; int ctrlmask = 2; int logdest = 1; int loglevel = LOG_INFO; int ramlog_size = 1*1024*1024; int ddump_saved_size = 0; static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1"; static void ocs_release_bus(struct ocs_softc *); static int32_t ocs_intr_alloc(struct ocs_softc *); static int32_t ocs_intr_setup(struct ocs_softc *); static int32_t ocs_intr_teardown(struct ocs_softc *); static int ocs_pci_intx_filter(void *); static void ocs_pci_intr(void *); static int32_t ocs_init_dma_tag(struct ocs_softc *ocs); static int32_t ocs_setup_fcports(ocs_t *ocs); ocs_t *ocs_devices[MAX_OCS_DEVICES]; /** * @brief Check support for the given device * * Determine support for a given device by examining the PCI vendor and * device IDs * * @param dev device abstraction * * @return 0 if device is supported, ENXIO otherwise */ static int ocs_pci_probe(device_t dev) { char *desc = NULL; if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) { return ENXIO; } switch (pci_get_device(dev)) { case PCI_PRODUCT_EMULEX_OCE16001: desc = "Emulex LightPulse FC Adapter"; break; case PCI_PRODUCT_EMULEX_LPE31004: desc = "Emulex LightPulse FC Adapter"; break; case PCI_PRODUCT_EMULEX_OCE50102: desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter"; break; default: return ENXIO; } device_set_desc(dev, desc); return BUS_PROBE_DEFAULT; } static int ocs_map_bars(device_t dev, struct ocs_softc *ocs) { /* * Map PCI BAR0 register into the CPU's space. */ ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0); ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ocs->reg[0].rid, RF_ACTIVE); if (ocs->reg[0].res == NULL) { device_printf(dev, "bus_alloc_resource failed rid=%#x\n", ocs->reg[0].rid); return ENXIO; } ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res); ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res); return 0; } static int ocs_setup_params(struct ocs_softc *ocs) { int32_t i = 0; const char *hw_war_version; /* Setup tunable parameters */ ocs->ctrlmask = ctrlmask; ocs->speed = 0; ocs->topology = 0; ocs->ethernet_license = 0; ocs->num_scsi_ios = 8192; ocs->enable_hlm = 0; ocs->hlm_group_size = 8; ocs->logmask = logmask; ocs->config_tgt = FALSE; if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "target", &i)) { if (1 == i) { ocs->config_tgt = TRUE; device_printf(ocs->dev, "Enabling target\n"); } } ocs->config_ini = TRUE; if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "initiator", &i)) { if (0 == i) { ocs->config_ini = FALSE; device_printf(ocs->dev, "Disabling initiator\n"); } } ocs->enable_ini = ocs->config_ini; if (!ocs->config_ini && !ocs->config_tgt) { device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n"); return 1; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "logmask", &logmask)) { device_printf(ocs->dev, "logmask = %#x\n", logmask); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "logdest", &logdest)) { device_printf(ocs->dev, "logdest = %#x\n", logdest); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "loglevel", &loglevel)) { device_printf(ocs->dev, "loglevel = %#x\n", loglevel); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "ramlog_size", &ramlog_size)) { device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "ddump_saved_size", &ddump_saved_size)) { device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size); } /* If enabled, initailize a RAM logging buffer */ if (logdest & 2) { ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS, OCS_RAMLOG_DEFAULT_BUFFERS); /* If NULL was returned, then we'll simply skip using the ramlog but */ /* set logdest to 1 to ensure that we at least get default logging. */ if (ocs->ramlog == NULL) { logdest = 1; } } /* initialize a saved ddump */ if (ddump_saved_size) { if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) { ocs_log_err(ocs, "failed to allocate memory for saved ddump\n"); } } if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "hw_war_version", &hw_war_version)) { device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version); ocs->hw_war_version = strdup(hw_war_version, M_OCS); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "explicit_buffer_list", &i)) { ocs->explicit_buffer_list = i; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "ethernet_license", &i)) { ocs->ethernet_license = i; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "speed", &i)) { device_printf(ocs->dev, "speed = %d Mbps\n", i); ocs->speed = i; } ocs->desc = device_get_desc(ocs->dev); ocs_device_lock_init(ocs); ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH; ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device); if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "enable_hlm", &i)) { device_printf(ocs->dev, "enable_hlm = %d\n", i); ocs->enable_hlm = i; if (ocs->enable_hlm) { ocs->hlm_group_size = 8; if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "hlm_group_size", &i)) { ocs->hlm_group_size = i; } device_printf(ocs->dev, "hlm_group_size = %d\n", i); } } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "num_scsi_ios", &i)) { ocs->num_scsi_ios = i; device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios); } else { ocs->num_scsi_ios = 8192; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "topology", &i)) { ocs->topology = i; device_printf(ocs->dev, "Setting topology=%#x\n", i); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "num_vports", &i)) { if (i >= 0 && i <= 254) { device_printf(ocs->dev, "num_vports = %d\n", i); ocs->num_vports = i; } else { device_printf(ocs->dev, "num_vports: %d not supported \n", i); } } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "external_loopback", &i)) { device_printf(ocs->dev, "external_loopback = %d\n", i); ocs->external_loopback = i; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "tgt_rscn_delay", &i)) { device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i); ocs->tgt_rscn_delay_msec = i * 1000; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "tgt_rscn_period", &i)) { device_printf(ocs->dev, "tgt_rscn_period = %d\n", i); ocs->tgt_rscn_period_msec = i * 1000; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "target_io_timer", &i)) { device_printf(ocs->dev, "target_io_timer = %d\n", i); ocs->target_io_timer_sec = i; } hw_global.queue_topology_string = queue_topology; ocs->rq_selection_policy = 0; ocs->rr_quanta = 1; ocs->filter_def = "0,0,0,0"; return 0; } static int32_t ocs_setup_fcports(ocs_t *ocs) { uint32_t i = 0, role = 0; uint64_t sli_wwpn, sli_wwnn; size_t size; ocs_xport_t *xport = ocs->xport; ocs_vport_spec_t *vport; ocs_fcport *fcp = NULL; size = sizeof(ocs_fcport) * (ocs->num_vports + 1); ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT); if (ocs->fcports == NULL) { device_printf(ocs->dev, "Can't allocate fcport \n"); return 1; } role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 | (ocs->enable_tgt)? KNOB_ROLE_TARGET: 0; fcp = FCPORT(ocs, i); fcp->role = role; i++; ocs_list_foreach(&xport->vport_list, vport) { fcp = FCPORT(ocs, i); vport->tgt_data = fcp; fcp->vport = vport; fcp->role = role; if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) { ocs_log_err(ocs, "Get default wwn failed \n"); i++; continue; } vport->wwpn = ocs_be64toh(sli_wwpn); vport->wwnn = ocs_be64toh(sli_wwnn); i++; ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn); } return 0; } int32_t ocs_device_attach(ocs_t *ocs) { int32_t i; ocs_io_t *io = NULL; if (ocs->attached) { ocs_log_warn(ocs, "%s: Device is already attached\n", __func__); return -1; } /* Allocate transport object and bring online */ ocs->xport = ocs_xport_alloc(ocs); if (ocs->xport == NULL) { device_printf(ocs->dev, "failed to allocate transport object\n"); return ENOMEM; } else if (ocs_xport_attach(ocs->xport) != 0) { device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__); goto fail_xport_attach; } else if (ocs_xport_initialize(ocs->xport) != 0) { device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__); goto fail_xport_init; } if (ocs_init_dma_tag(ocs)) { goto fail_intr_setup; } for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) { if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) { device_printf(ocs->dev, "%s: bad dma map create\n", __func__); } io->tgt_io.state = OCS_CAM_IO_FREE; } if (ocs_setup_fcports(ocs)) { device_printf(ocs->dev, "FCports creation failed\n"); goto fail_intr_setup; } if(ocs_cam_attach(ocs)) { device_printf(ocs->dev, "cam attach failed \n"); goto fail_intr_setup; } if (ocs_intr_setup(ocs)) { device_printf(ocs->dev, "Interrupt setup failed\n"); goto fail_intr_setup; } if (ocs->enable_ini || ocs->enable_tgt) { if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) { device_printf(ocs->dev, "Can't init port\n"); goto fail_xport_online; } } ocs->attached = true; return 0; fail_xport_online: if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) { device_printf(ocs->dev, "Transport Shutdown timed out\n"); } ocs_intr_teardown(ocs); fail_intr_setup: fail_xport_init: ocs_xport_detach(ocs->xport); if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); ocs_xport_free(ocs->xport); ocs->xport = NULL; fail_xport_attach: if (ocs->xport) ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport))); ocs->xport = NULL; return ENXIO; } /** * @brief Connect the driver to the given device * * If the probe routine is successful, the OS will give the driver * the opportunity to connect itself to the device. This routine * maps PCI resources (memory BARs and interrupts) and initialize a * hardware object. * * @param dev device abstraction * * @return 0 if the driver attaches to the device, ENXIO otherwise */ static int ocs_pci_attach(device_t dev) { struct ocs_softc *ocs; int instance; instance = device_get_unit(dev); ocs = (struct ocs_softc *)device_get_softc(dev); if (NULL == ocs) { device_printf(dev, "cannot allocate softc\n"); return ENOMEM; } memset(ocs, 0, sizeof(struct ocs_softc)); if (instance < ARRAY_SIZE(ocs_devices)) { ocs_devices[instance] = ocs; } else { device_printf(dev, "got unexpected ocs instance number %d\n", instance); } ocs->instance_index = instance; ocs->dev = dev; pci_enable_io(dev, SYS_RES_MEMORY); pci_enable_busmaster(dev); ocs->pci_vendor = pci_get_vendor(dev); ocs->pci_device = pci_get_device(dev); snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); /* Map all memory BARs */ if (ocs_map_bars(dev, ocs)) { device_printf(dev, "Failed to map pci bars\n"); goto release_bus; } /* create a root DMA tag for the device */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, /* byte alignment */ 0, /* no boundary restrictions */ BUS_SPACE_MAXADDR, /* no minimum low address */ BUS_SPACE_MAXADDR, /* no maximum high address */ NULL, /* no filter function */ NULL, /* or arguments */ BUS_SPACE_MAXSIZE, /* max size covered by tag */ BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */ BUS_SPACE_MAXSIZE, /* no segment length restrictions */ 0, /* flags */ NULL, /* no lock manipulation function */ NULL, /* or arguments */ &ocs->dmat)) { device_printf(dev, "parent DMA tag allocation failed\n"); goto release_bus; } if (ocs_intr_alloc(ocs)) { device_printf(dev, "Interrupt allocation failed\n"); goto release_bus; } if (PCIC_SERIALBUS == pci_get_class(dev) && PCIS_SERIALBUS_FC == pci_get_subclass(dev)) ocs->ocs_xport = OCS_XPORT_FC; else { device_printf(dev, "unsupported class (%#x : %#x)\n", pci_get_class(dev), pci_get_class(dev)); goto release_bus; } /* Setup tunable parameters */ if (ocs_setup_params(ocs)) { device_printf(ocs->dev, "failed to setup params\n"); goto release_bus; } if (ocs_device_attach(ocs)) { device_printf(ocs->dev, "failed to attach device\n"); goto release_params; } ocs->fc_type = FC_TYPE_FCP; ocs_debug_attach(ocs); return 0; release_params: ocs_ramlog_free(ocs, ocs->ramlog); ocs_device_lock_free(ocs); free(ocs->hw_war_version, M_OCS); release_bus: ocs_release_bus(ocs); return ENXIO; } /** * @brief free resources when pci device detach * * @param ocs pointer to ocs structure * * @return 0 for success, a negative error code value for failure. */ int32_t ocs_device_detach(ocs_t *ocs) { int32_t rc = 0, i; ocs_io_t *io = NULL; if (ocs != NULL) { if (!ocs->attached) { ocs_log_warn(ocs, "%s: Device is not attached\n", __func__); return -1; } ocs->attached = FALSE; rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN); if (rc) { ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__); } ocs_intr_teardown(ocs); if (ocs_xport_detach(ocs->xport) != 0) { ocs_log_err(ocs, "%s: Transport detach failed\n", __func__); } ocs_cam_detach(ocs); - ocs_free(ocs, ocs->fcports, sizeof(ocs->fcports)); + ocs_free(ocs, ocs->fcports, sizeof(*(ocs->fcports))); for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) { if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) { device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__); } } bus_dma_tag_destroy(ocs->dmat); ocs_xport_free(ocs->xport); ocs->xport = NULL; } return 0; } /** * @brief Detach the driver from the given device * * If the driver is a loadable module, this routine gets called at unload * time. This routine will stop the device and free any allocated resources. * * @param dev device abstraction * * @return 0 if the driver detaches from the device, ENXIO otherwise */ static int ocs_pci_detach(device_t dev) { struct ocs_softc *ocs; ocs = (struct ocs_softc *)device_get_softc(dev); if (!ocs) { device_printf(dev, "no driver context?!?\n"); return -1; } if (ocs->config_tgt && ocs->enable_tgt) { device_printf(dev, "can't detach with target mode enabled\n"); return EBUSY; } ocs_device_detach(ocs); /* * Workaround for OCS SCSI Transport quirk. * * CTL requires that target mode is disabled prior to unloading the * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled, * the transport will not call ocs_scsi_tgt_del_device() which deallocates * CAM resources. The workaround is to explicitly make the call here. */ if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); /* free strdup created buffer.*/ free(ocs->hw_war_version, M_OCS); ocs_device_lock_free(ocs); ocs_debug_detach(ocs); ocs_ramlog_free(ocs, ocs->ramlog); ocs_release_bus(ocs); return 0; } /** * @brief Notify driver of system shutdown * * @param dev device abstraction * * @return 0 if the driver attaches to the device, ENXIO otherwise */ static int ocs_pci_shutdown(device_t dev) { device_printf(dev, "%s\n", __func__); return 0; } /** * @brief Release bus resources allocated within the soft context * * @param ocs Pointer to the driver's context * * @return none */ static void ocs_release_bus(struct ocs_softc *ocs) { if (NULL != ocs) { uint32_t i; ocs_intr_teardown(ocs); if (ocs->irq) { bus_release_resource(ocs->dev, SYS_RES_IRQ, rman_get_rid(ocs->irq), ocs->irq); if (ocs->n_vec) { pci_release_msi(ocs->dev); ocs->n_vec = 0; } ocs->irq = NULL; } bus_dma_tag_destroy(ocs->dmat); for (i = 0; i < PCI_MAX_BAR; i++) { if (ocs->reg[i].res) { bus_release_resource(ocs->dev, SYS_RES_MEMORY, ocs->reg[i].rid, ocs->reg[i].res); } } } } /** * @brief Allocate and initialize interrupts * * @param ocs Pointer to the driver's context * * @return none */ static int32_t ocs_intr_alloc(struct ocs_softc *ocs) { ocs->n_vec = 1; if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) { device_printf(ocs->dev, "MSI-X allocation failed\n"); if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) { device_printf(ocs->dev, "MSI allocation failed \n"); ocs->irqid = 0; ocs->n_vec = 0; } else ocs->irqid = 1; } else { ocs->irqid = 1; } ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid, RF_ACTIVE | RF_SHAREABLE); if (NULL == ocs->irq) { device_printf(ocs->dev, "could not allocate interrupt\n"); return -1; } ocs->intr_ctx.vec = 0; ocs->intr_ctx.softc = ocs; snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name), "%s_intr_%d", device_get_nameunit(ocs->dev), ocs->intr_ctx.vec); return 0; } /** * @brief Create and attach an interrupt handler * * @param ocs Pointer to the driver's context * * @return 0 on success, non-zero otherwise */ static int32_t ocs_intr_setup(struct ocs_softc *ocs) { driver_filter_t *filter = NULL; if (0 == ocs->n_vec) { filter = ocs_pci_intx_filter; } if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM, filter, ocs_pci_intr, &ocs->intr_ctx, &ocs->tag)) { device_printf(ocs->dev, "could not initialize interrupt\n"); return -1; } return 0; } /** * @brief Detach an interrupt handler * * @param ocs Pointer to the driver's context * * @return 0 on success, non-zero otherwise */ static int32_t ocs_intr_teardown(struct ocs_softc *ocs) { if (!ocs) { printf("%s: bad driver context?!?\n", __func__); return -1; } if (ocs->tag) { bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag); ocs->tag = NULL; } return 0; } /** * @brief PCI interrupt handler * * @param arg pointer to the driver's software context * * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise */ static int ocs_pci_intx_filter(void *arg) { ocs_intr_ctx_t *intr = arg; struct ocs_softc *ocs = NULL; uint16_t val = 0; if (NULL == intr) { return FILTER_STRAY; } ocs = intr->softc; #ifndef PCIM_STATUS_INTR #define PCIM_STATUS_INTR 0x0008 #endif val = pci_read_config(ocs->dev, PCIR_STATUS, 2); if (0xffff == val) { device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__); return FILTER_STRAY; } if (0 == (val & PCIM_STATUS_INTR)) { return FILTER_STRAY; } val = pci_read_config(ocs->dev, PCIR_COMMAND, 2); val |= PCIM_CMD_INTxDIS; pci_write_config(ocs->dev, PCIR_COMMAND, val, 2); return FILTER_SCHEDULE_THREAD; } /** * @brief interrupt handler * * @param context pointer to the interrupt context */ static void ocs_pci_intr(void *context) { ocs_intr_ctx_t *intr = context; struct ocs_softc *ocs = intr->softc; mtx_lock(&ocs->sim_lock); ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC); mtx_unlock(&ocs->sim_lock); } /** * @brief Initialize DMA tag * * @param ocs the driver instance's software context * * @return 0 on success, non-zero otherwise */ static int32_t ocs_init_dma_tag(struct ocs_softc *ocs) { uint32_t max_sgl = 0; uint32_t max_sge = 0; /* * IOs can't use the parent DMA tag and must create their * own, based primarily on a restricted number of DMA segments. * This is more of a BSD requirement than a SLI Port requirement */ ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl); ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge); if (bus_dma_tag_create(ocs->dmat, 1, /* byte alignment */ 0, /* no boundary restrictions */ BUS_SPACE_MAXADDR, /* no minimum low address */ BUS_SPACE_MAXADDR, /* no maximum high address */ NULL, /* no filter function */ NULL, /* or arguments */ BUS_SPACE_MAXSIZE, /* max size covered by tag */ max_sgl, /* segment count restrictions */ max_sge, /* segment length restrictions */ 0, /* flags */ NULL, /* no lock manipulation function */ NULL, /* or arguments */ &ocs->buf_dmat)) { device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__); return -1; } return 0; } int32_t ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len) { return -1; } /** * @brief return pointer to ocs structure given instance index * * A pointer to an ocs structure is returned given an instance index. * * @param index index to ocs_devices array * * @return ocs pointer */ ocs_t *ocs_get_instance(uint32_t index) { if (index < ARRAY_SIZE(ocs_devices)) { return ocs_devices[index]; } return NULL; } /** * @brief Return instance index of an opaque ocs structure * * Returns the ocs instance index * * @param os pointer to ocs instance * * @return pointer to ocs instance index */ uint32_t ocs_instance(void *os) { ocs_t *ocs = os; return ocs->instance_index; } static device_method_t ocs_methods[] = { DEVMETHOD(device_probe, ocs_pci_probe), DEVMETHOD(device_attach, ocs_pci_attach), DEVMETHOD(device_detach, ocs_pci_detach), DEVMETHOD(device_shutdown, ocs_pci_shutdown), {0, 0} }; static driver_t ocs_driver = { "ocs_fc", ocs_methods, sizeof(struct ocs_softc) }; static devclass_t ocs_devclass; DRIVER_MODULE(ocs_fc, pci, ocs_driver, ocs_devclass, 0, 0); MODULE_VERSION(ocs_fc, 1); Index: head/sys/dev/ocs_fc/ocs_xport.c =================================================================== --- head/sys/dev/ocs_fc/ocs_xport.c (revision 343348) +++ head/sys/dev/ocs_fc/ocs_xport.c (revision 343349) @@ -1,1107 +1,1103 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * FC transport API * */ #include "ocs.h" #include "ocs_device.h" static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg); static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg); /** * @brief Post node event callback argument. */ typedef struct { ocs_sem_t sem; ocs_node_t *node; ocs_sm_event_t evt; void *context; } ocs_xport_post_node_event_t; /** * @brief Allocate a transport object. * * @par Description * A transport object is allocated, and associated with a device instance. * * @param ocs Pointer to device instance. * * @return Returns the pointer to the allocated transport object, or NULL if failed. */ ocs_xport_t * ocs_xport_alloc(ocs_t *ocs) { ocs_xport_t *xport; ocs_assert(ocs, NULL); xport = ocs_malloc(ocs, sizeof(*xport), OCS_M_ZERO); if (xport != NULL) { xport->ocs = ocs; } return xport; } /** * @brief Create the RQ threads and the circular buffers used to pass sequences. * * @par Description * Creates the circular buffers and the servicing threads for RQ processing. * * @param xport Pointer to transport object * * @return Returns 0 on success, or a non-zero value on failure. */ static void ocs_xport_rq_threads_teardown(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; uint32_t i; if (xport->num_rq_threads == 0 || xport->rq_thread_info == NULL) { return; } /* Abort any threads */ for (i = 0; i < xport->num_rq_threads; i++) { if (xport->rq_thread_info[i].thread_started) { ocs_thread_terminate(&xport->rq_thread_info[i].thread); /* wait for the thread to exit */ ocs_log_debug(ocs, "wait for thread %d to exit\n", i); while (xport->rq_thread_info[i].thread_started) { ocs_udelay(10000); } ocs_log_debug(ocs, "thread %d to exited\n", i); } if (xport->rq_thread_info[i].seq_cbuf != NULL) { ocs_cbuf_free(xport->rq_thread_info[i].seq_cbuf); xport->rq_thread_info[i].seq_cbuf = NULL; } } } /** * @brief Create the RQ threads and the circular buffers used to pass sequences. * * @par Description * Creates the circular buffers and the servicing threads for RQ processing. * * @param xport Pointer to transport object. * @param num_rq_threads Number of RQ processing threads that the * driver creates. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_xport_rq_threads_create(ocs_xport_t *xport, uint32_t num_rq_threads) { ocs_t *ocs = xport->ocs; int32_t rc = 0; uint32_t i; xport->num_rq_threads = num_rq_threads; ocs_log_debug(ocs, "number of RQ threads %d\n", num_rq_threads); if (num_rq_threads == 0) { return 0; } /* Allocate the space for the thread objects */ xport->rq_thread_info = ocs_malloc(ocs, sizeof(ocs_xport_rq_thread_info_t) * num_rq_threads, OCS_M_ZERO); if (xport->rq_thread_info == NULL) { ocs_log_err(ocs, "memory allocation failure\n"); return -1; } /* Create the circular buffers and threads. */ for (i = 0; i < num_rq_threads; i++) { xport->rq_thread_info[i].ocs = ocs; xport->rq_thread_info[i].seq_cbuf = ocs_cbuf_alloc(ocs, OCS_HW_RQ_NUM_HDR); if (xport->rq_thread_info[i].seq_cbuf == NULL) { goto ocs_xport_rq_threads_create_error; } ocs_snprintf(xport->rq_thread_info[i].thread_name, sizeof(xport->rq_thread_info[i].thread_name), "ocs_unsol_rq:%d:%d", ocs->instance_index, i); rc = ocs_thread_create(ocs, &xport->rq_thread_info[i].thread, ocs_unsol_rq_thread, xport->rq_thread_info[i].thread_name, &xport->rq_thread_info[i], OCS_THREAD_RUN); if (rc) { ocs_log_err(ocs, "ocs_thread_create failed: %d\n", rc); goto ocs_xport_rq_threads_create_error; } xport->rq_thread_info[i].thread_started = TRUE; } return 0; ocs_xport_rq_threads_create_error: ocs_xport_rq_threads_teardown(xport); return -1; } /** * @brief Do as much allocation as possible, but do not initialization the device. * * @par Description * Performs the functions required to get a device ready to run. * * @param xport Pointer to transport object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_xport_attach(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; int32_t rc; uint32_t max_sgl; uint32_t n_sgl; uint32_t i; uint32_t value; uint32_t max_remote_nodes; /* booleans used for cleanup if initialization fails */ uint8_t io_pool_created = FALSE; uint8_t node_pool_created = FALSE; uint8_t rq_threads_created = FALSE; ocs_list_init(&ocs->domain_list, ocs_domain_t, link); for (i = 0; i < SLI4_MAX_FCFI; i++) { xport->fcfi[i].hold_frames = 1; ocs_lock_init(ocs, &xport->fcfi[i].pend_frames_lock, "xport pend_frames[%d]", i); ocs_list_init(&xport->fcfi[i].pend_frames, ocs_hw_sequence_t, link); } rc = ocs_hw_set_ptr(&ocs->hw, OCS_HW_WAR_VERSION, ocs->hw_war_version); if (rc) { ocs_log_test(ocs, "can't set OCS_HW_WAR_VERSION\n"); return -1; } rc = ocs_hw_setup(&ocs->hw, ocs, SLI4_PORT_TYPE_FC); if (rc) { ocs_log_err(ocs, "%s: Can't setup hardware\n", ocs->desc); return -1; } else if (ocs->ctrlmask & OCS_CTRLMASK_CRASH_RESET) { ocs_log_debug(ocs, "stopping after ocs_hw_setup\n"); return -1; } ocs_hw_set(&ocs->hw, OCS_HW_BOUNCE, ocs->hw_bounce); ocs_log_debug(ocs, "HW bounce: %d\n", ocs->hw_bounce); ocs_hw_set(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, ocs->rq_selection_policy); ocs_hw_set(&ocs->hw, OCS_HW_RR_QUANTA, ocs->rr_quanta); ocs_hw_get(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, &value); ocs_log_debug(ocs, "RQ Selection Policy: %d\n", value); ocs_hw_set_ptr(&ocs->hw, OCS_HW_FILTER_DEF, (void*) ocs->filter_def); ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); max_sgl -= SLI4_SGE_MAX_RESERVED; n_sgl = MIN(OCS_FC_MAX_SGL, max_sgl); /* EVT: For chained SGL testing */ if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) { n_sgl = 4; } /* Note: number of SGLs must be set for ocs_node_create_pool */ if (ocs_hw_set(&ocs->hw, OCS_HW_N_SGL, n_sgl) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set number of SGLs\n", ocs->desc); return -1; } else { ocs_log_debug(ocs, "%s: Configured for %d SGLs\n", ocs->desc, n_sgl); } ocs_hw_get(&ocs->hw, OCS_HW_MAX_NODES, &max_remote_nodes); if (!ocs->max_remote_nodes) ocs->max_remote_nodes = max_remote_nodes; rc = ocs_node_create_pool(ocs, ocs->max_remote_nodes); if (rc) { ocs_log_err(ocs, "Can't allocate node pool\n"); goto ocs_xport_attach_cleanup; } else { node_pool_created = TRUE; } /* EVT: if testing chained SGLs allocate OCS_FC_MAX_SGL SGE's in the IO */ xport->io_pool = ocs_io_pool_create(ocs, ocs->num_scsi_ios, (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) ? OCS_FC_MAX_SGL : n_sgl); if (xport->io_pool == NULL) { ocs_log_err(ocs, "Can't allocate IO pool\n"); goto ocs_xport_attach_cleanup; } else { io_pool_created = TRUE; } /* * setup the RQ processing threads */ if (ocs_xport_rq_threads_create(xport, ocs->rq_threads) != 0) { ocs_log_err(ocs, "failure creating RQ threads\n"); goto ocs_xport_attach_cleanup; } rq_threads_created = TRUE; return 0; ocs_xport_attach_cleanup: if (io_pool_created) { ocs_io_pool_free(xport->io_pool); } if (node_pool_created) { ocs_node_free_pool(ocs); } - if (rq_threads_created) { - ocs_xport_rq_threads_teardown(xport); - } - return -1; } /** * @brief Determines how to setup auto Xfer ready. * * @par Description * @param xport Pointer to transport object. * * @return Returns 0 on success or a non-zero value on failure. */ static int32_t ocs_xport_initialize_auto_xfer_ready(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; uint32_t auto_xfer_rdy; char prop_buf[32]; uint32_t ramdisc_blocksize = 512; uint8_t p_type = 0; ocs_hw_get(&ocs->hw, OCS_HW_AUTO_XFER_RDY_CAPABLE, &auto_xfer_rdy); if (!auto_xfer_rdy) { ocs->auto_xfer_rdy_size = 0; ocs_log_test(ocs, "Cannot enable auto xfer rdy for this port\n"); return 0; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_SIZE, ocs->auto_xfer_rdy_size)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } /* * Determine if we are doing protection in the backend. We are looking * at the modules parameters here. The backend cannot allow a format * command to change the protection mode when using this feature, * otherwise the firmware will not do the proper thing. */ if (ocs_get_property("p_type", prop_buf, sizeof(prop_buf)) == 0) { p_type = ocs_strtoul(prop_buf, 0, 0); } if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); } if (ocs_get_property("external_dif", prop_buf, sizeof(prop_buf)) == 0) { if(ocs_strlen(prop_buf)) { if (p_type == 0) { p_type = 1; } } } if (p_type != 0) { if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, TRUE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, ramdisc_blocksize)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy blk size\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_P_TYPE, p_type)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, TRUE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy ref tag\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, FALSE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy app tag valid\n", ocs->desc); return -1; } } ocs_log_debug(ocs, "Auto xfer rdy is enabled, p_type=%d, blksize=%d\n", p_type, ramdisc_blocksize); return 0; } /** * @brief Initializes the device. * * @par Description * Performs the functions required to make a device functional. * * @param xport Pointer to transport object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_xport_initialize(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; int32_t rc; uint32_t i; uint32_t max_hw_io; uint32_t max_sgl; uint32_t hlm; uint32_t rq_limit; uint32_t dif_capable; uint8_t dif_separate = 0; char prop_buf[32]; /* booleans used for cleanup if initialization fails */ uint8_t ini_device_set = FALSE; uint8_t tgt_device_set = FALSE; uint8_t hw_initialized = FALSE; ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); if (ocs_hw_set(&ocs->hw, OCS_HW_N_IO, max_hw_io) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set number of IOs\n", ocs->desc); return -1; } ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); max_sgl -= SLI4_SGE_MAX_RESERVED; if (ocs->enable_hlm) { ocs_hw_get(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, &hlm); if (!hlm) { ocs->enable_hlm = FALSE; ocs_log_err(ocs, "Cannot enable high login mode for this port\n"); } else { ocs_log_debug(ocs, "High login mode is enabled\n"); if (ocs_hw_set(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, TRUE)) { ocs_log_err(ocs, "%s: Can't set high login mode\n", ocs->desc); return -1; } } } /* validate the auto xfer_rdy size */ if (ocs->auto_xfer_rdy_size > 0 && (ocs->auto_xfer_rdy_size < 2048 || ocs->auto_xfer_rdy_size > 65536)) { ocs_log_err(ocs, "Auto XFER_RDY size is out of range (2K-64K)\n"); return -1; } ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); if (ocs->auto_xfer_rdy_size > 0) { if (ocs_xport_initialize_auto_xfer_ready(xport)) { ocs_log_err(ocs, "%s: Failed auto xfer ready setup\n", ocs->desc); return -1; } if (ocs->esoc){ ocs_hw_set(&ocs->hw, OCS_ESOC, TRUE); } } if (ocs->explicit_buffer_list) { /* Are pre-registered SGL's required? */ ocs_hw_get(&ocs->hw, OCS_HW_PREREGISTER_SGL, &i); if (i == TRUE) { ocs_log_err(ocs, "Explicit Buffer List not supported on this device, not enabled\n"); } else { ocs_hw_set(&ocs->hw, OCS_HW_PREREGISTER_SGL, FALSE); } } if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc); return -1; } ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT); if (ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, ocs->speed) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the link speed\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_ETH_LICENSE, ocs->ethernet_license) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the ethernet license\n", ocs->desc); return -1; } /* currently only lancer support setting the CRC seed value */ if (ocs->hw.sli.asic_type == SLI4_ASIC_TYPE_LANCER) { if (ocs_hw_set(&ocs->hw, OCS_HW_DIF_SEED, OCS_FC_DIF_SEED) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the DIF seed\n", ocs->desc); return -1; } } /* Set the Dif mode */ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &dif_capable)) { if (dif_capable) { if (ocs_get_property("dif_separate", prop_buf, sizeof(prop_buf)) == 0) { dif_separate = ocs_strtoul(prop_buf, 0, 0); } if ((rc = ocs_hw_set(&ocs->hw, OCS_HW_DIF_MODE, (dif_separate == 0 ? OCS_HW_DIF_MODE_INLINE : OCS_HW_DIF_MODE_SEPARATE)))) { ocs_log_err(ocs, "Requested DIF MODE not supported\n"); } } } if (ocs->target_io_timer_sec) { ocs_log_debug(ocs, "setting target io timer=%d\n", ocs->target_io_timer_sec); ocs_hw_set(&ocs->hw, OCS_HW_EMULATE_TARGET_WQE_TIMEOUT, TRUE); } ocs_hw_callback(&ocs->hw, OCS_HW_CB_DOMAIN, ocs_domain_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_REMOTE_NODE, ocs_remote_node_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_UNSOLICITED, ocs_unsolicited_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_PORT, ocs_port_cb, ocs); ocs->fw_version = (const char*) ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV); /* Initialize vport list */ ocs_list_init(&xport->vport_list, ocs_vport_spec_t, link); ocs_lock_init(ocs, &xport->io_pending_lock, "io_pending_lock[%d]", ocs->instance_index); ocs_list_init(&xport->io_pending_list, ocs_io_t, io_pending_link); ocs_atomic_init(&xport->io_active_count, 0); ocs_atomic_init(&xport->io_pending_count, 0); ocs_atomic_init(&xport->io_total_free, 0); ocs_atomic_init(&xport->io_total_pending, 0); ocs_atomic_init(&xport->io_alloc_failed_count, 0); ocs_atomic_init(&xport->io_pending_recursing, 0); ocs_lock_init(ocs, &ocs->hw.watchdog_lock, " Watchdog Lock[%d]", ocs_instance(ocs)); rc = ocs_hw_init(&ocs->hw); if (rc) { ocs_log_err(ocs, "ocs_hw_init failure\n"); goto ocs_xport_init_cleanup; } else { hw_initialized = TRUE; } rq_limit = max_hw_io/2; if (ocs_hw_set(&ocs->hw, OCS_HW_RQ_PROCESS_LIMIT, rq_limit) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the RQ process limit\n", ocs->desc); } if (ocs->config_tgt) { rc = ocs_scsi_tgt_new_device(ocs); if (rc) { ocs_log_err(ocs, "failed to initialize target\n"); goto ocs_xport_init_cleanup; } else { tgt_device_set = TRUE; } } if (ocs->enable_ini) { rc = ocs_scsi_ini_new_device(ocs); if (rc) { ocs_log_err(ocs, "failed to initialize initiator\n"); goto ocs_xport_init_cleanup; } else { ini_device_set = TRUE; } } /* Add vports */ if (ocs->num_vports != 0) { uint32_t max_vports; ocs_hw_get(&ocs->hw, OCS_HW_MAX_VPORTS, &max_vports); if (ocs->num_vports < max_vports) { ocs_log_debug(ocs, "Provisioning %d vports\n", ocs->num_vports); for (i = 0; i < ocs->num_vports; i++) { ocs_vport_create_spec(ocs, 0, 0, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt, NULL, NULL); } } else { ocs_log_err(ocs, "failed to create vports. num_vports range should be (1-%d) \n", max_vports-1); goto ocs_xport_init_cleanup; } } return 0; ocs_xport_init_cleanup: if (ini_device_set) { ocs_scsi_ini_del_device(ocs); } if (tgt_device_set) { ocs_scsi_tgt_del_device(ocs); } if (hw_initialized) { /* ocs_hw_teardown can only execute after ocs_hw_init */ ocs_hw_teardown(&ocs->hw); } return -1; } /** * @brief Detaches the transport from the device. * * @par Description * Performs the functions required to shut down a device. * * @param xport Pointer to transport object. * * @return Returns 0 on success or a non-zero value on failure. */ int32_t ocs_xport_detach(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; /* free resources associated with target-server and initiator-client */ if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); if (ocs->enable_ini) { ocs_scsi_ini_del_device(ocs); /*Shutdown FC Statistics timer*/ if (ocs_timer_pending(&ocs->xport->stats_timer)) ocs_del_timer(&ocs->xport->stats_timer); } ocs_hw_teardown(&ocs->hw); return 0; } /** * @brief domain list empty callback * * @par Description * Function is invoked when the device domain list goes empty. By convention * @c arg points to an ocs_sem_t instance, that is incremented. * * @param ocs Pointer to device object. * @param arg Pointer to semaphore instance. * * @return None. */ static void ocs_xport_domain_list_empty_cb(ocs_t *ocs, void *arg) { ocs_sem_t *sem = arg; ocs_assert(ocs); ocs_assert(sem); ocs_sem_v(sem); } /** * @brief post node event callback * * @par Description * This function is called from the mailbox completion interrupt context to post an * event to a node object. By doing this in the interrupt context, it has * the benefit of only posting events in the interrupt context, deferring the need to * create a per event node lock. * * @param hw Pointer to HW structure. * @param status Completion status for mailbox command. * @param mqe Mailbox queue completion entry. * @param arg Callback argument. * * @return Returns 0 on success, a negative error code value on failure. */ static int32_t ocs_xport_post_node_event_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_xport_post_node_event_t *payload = arg; if (payload != NULL) { ocs_node_post_event(payload->node, payload->evt, payload->context); ocs_sem_v(&payload->sem); } return 0; } /** * @brief Initiate force free. * * @par Description * Perform force free of OCS. * * @param xport Pointer to transport object. * * @return None. */ static void ocs_xport_force_free(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; ocs_domain_t *domain; ocs_domain_t *next; ocs_log_debug(ocs, "reset required, do force shutdown\n"); ocs_device_lock(ocs); ocs_list_foreach_safe(&ocs->domain_list, domain, next) { ocs_domain_force_free(domain); } ocs_device_unlock(ocs); } /** * @brief Perform transport attach function. * * @par Description * Perform the attach function, which for the FC transport makes a HW call * to bring up the link. * * @param xport pointer to transport object. * @param cmd command to execute. * * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_ONLINE) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_OFFLINE) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_SHUTDOWN) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_POST_NODE_EVENT, ocs_node_t *node, ocs_sm_event_t, void *context) * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_xport_control(ocs_xport_t *xport, ocs_xport_ctrl_e cmd, ...) { uint32_t rc = 0; ocs_t *ocs = NULL; va_list argp; ocs_assert(xport, -1); ocs_assert(xport->ocs, -1); ocs = xport->ocs; switch (cmd) { case OCS_XPORT_PORT_ONLINE: { /* Bring the port on-line */ rc = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_INIT, 0, NULL, NULL); if (rc) { ocs_log_err(ocs, "%s: Can't init port\n", ocs->desc); } else { xport->configured_link_state = cmd; } break; } case OCS_XPORT_PORT_OFFLINE: { if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { ocs_log_err(ocs, "port shutdown failed\n"); } else { xport->configured_link_state = cmd; } break; } case OCS_XPORT_SHUTDOWN: { ocs_sem_t sem; uint32_t reset_required; /* if a PHYSDEV reset was performed (e.g. hw dump), will affect * all PCI functions; orderly shutdown won't work, just force free */ /* TODO: need to poll this regularly... */ if (ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required) != OCS_HW_RTN_SUCCESS) { reset_required = 0; } if (reset_required) { ocs_log_debug(ocs, "reset required, do force shutdown\n"); ocs_xport_force_free(xport); break; } ocs_sem_init(&sem, 0, "domain_list_sem"); ocs_register_domain_list_empty_cb(ocs, ocs_xport_domain_list_empty_cb, &sem); if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { ocs_log_debug(ocs, "port shutdown failed, do force shutdown\n"); ocs_xport_force_free(xport); } else { ocs_log_debug(ocs, "Waiting %d seconds for domain shutdown.\n", (OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC/1000000)); rc = ocs_sem_p(&sem, OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC); if (rc) { ocs_log_debug(ocs, "Note: Domain shutdown timed out\n"); ocs_xport_force_free(xport); } } ocs_register_domain_list_empty_cb(ocs, NULL, NULL); /* Free up any saved virtual ports */ ocs_vport_del_all(ocs); break; } /* * POST_NODE_EVENT: post an event to a node object * * This transport function is used to post an event to a node object. It does * this by submitting a NOP mailbox command to defer execution to the * interrupt context (thereby enforcing the serialized execution of event posting * to the node state machine instances) * * A counting semaphore is used to make the call synchronous (we wait until * the callback increments the semaphore before returning (or times out) */ case OCS_XPORT_POST_NODE_EVENT: { ocs_node_t *node; ocs_sm_event_t evt; void *context; ocs_xport_post_node_event_t payload; ocs_t *ocs; ocs_hw_t *hw; /* Retrieve arguments */ va_start(argp, cmd); node = va_arg(argp, ocs_node_t*); evt = va_arg(argp, ocs_sm_event_t); context = va_arg(argp, void *); va_end(argp); ocs_assert(node, -1); ocs_assert(node->ocs, -1); ocs = node->ocs; hw = &ocs->hw; /* if node's state machine is disabled, don't bother continuing */ if (!node->sm.current_state) { ocs_log_test(ocs, "node %p state machine disabled\n", node); return -1; } /* Setup payload */ ocs_memset(&payload, 0, sizeof(payload)); ocs_sem_init(&payload.sem, 0, "xport_post_node_Event"); payload.node = node; payload.evt = evt; payload.context = context; if (ocs_hw_async_call(hw, ocs_xport_post_node_event_cb, &payload)) { ocs_log_test(ocs, "ocs_hw_async_call failed\n"); rc = -1; break; } /* Wait for completion */ if (ocs_sem_p(&payload.sem, OCS_SEM_FOREVER)) { ocs_log_test(ocs, "POST_NODE_EVENT: sem wait failed\n"); rc = -1; } break; } /* * Set wwnn for the port. This will be used instead of the default provided by FW. */ case OCS_XPORT_WWNN_SET: { uint64_t wwnn; /* Retrieve arguments */ va_start(argp, cmd); wwnn = va_arg(argp, uint64_t); va_end(argp); ocs_log_debug(ocs, " WWNN %016" PRIx64 "\n", wwnn); xport->req_wwnn = wwnn; break; } /* * Set wwpn for the port. This will be used instead of the default provided by FW. */ case OCS_XPORT_WWPN_SET: { uint64_t wwpn; /* Retrieve arguments */ va_start(argp, cmd); wwpn = va_arg(argp, uint64_t); va_end(argp); ocs_log_debug(ocs, " WWPN %016" PRIx64 "\n", wwpn); xport->req_wwpn = wwpn; break; } default: break; } return rc; } /** * @brief Return status on a link. * * @par Description * Returns status information about a link. * * @param xport Pointer to transport object. * @param cmd Command to execute. * @param result Pointer to result value. * * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_PORT_STATUS) * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_SPEED, ocs_xport_stats_t *result) * return link speed in MB/sec * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_IS_SUPPORTED_LINK_SPEED, ocs_xport_stats_t *result) * [in] *result is speed to check in MB/s * returns 1 if supported, 0 if not * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STATISTICS, ocs_xport_stats_t *result) * return link/host port stats * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STAT_RESET, ocs_xport_stats_t *result) * resets link/host stats * * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_xport_status(ocs_xport_t *xport, ocs_xport_status_e cmd, ocs_xport_stats_t *result) { uint32_t rc = 0; ocs_t *ocs = NULL; ocs_xport_stats_t value; ocs_hw_rtn_e hw_rc; ocs_assert(xport, -1); ocs_assert(xport->ocs, -1); ocs = xport->ocs; switch (cmd) { case OCS_XPORT_CONFIG_PORT_STATUS: ocs_assert(result, -1); if (xport->configured_link_state == 0) { /* Initial state is offline. configured_link_state is */ /* set to online explicitly when port is brought online. */ xport->configured_link_state = OCS_XPORT_PORT_OFFLINE; } result->value = xport->configured_link_state; break; case OCS_XPORT_PORT_STATUS: ocs_assert(result, -1); /* Determine port status based on link speed. */ hw_rc = ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value.value); if (hw_rc == OCS_HW_RTN_SUCCESS) { if (value.value == 0) { result->value = 0; } else { result->value = 1; } rc = 0; } else { rc = -1; } break; case OCS_XPORT_LINK_SPEED: { uint32_t speed; ocs_assert(result, -1); result->value = 0; rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &speed); if (rc == 0) { result->value = speed; } break; } case OCS_XPORT_IS_SUPPORTED_LINK_SPEED: { uint32_t speed; uint32_t link_module_type; ocs_assert(result, -1); speed = result->value; rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_MODULE_TYPE, &link_module_type); if (rc == 0) { switch(speed) { case 1000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_1GB) != 0; break; case 2000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_2GB) != 0; break; case 4000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_4GB) != 0; break; case 8000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_8GB) != 0; break; case 10000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_10GB) != 0; break; case 16000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_16GB) != 0; break; case 32000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_32GB) != 0; break; default: rc = 0; break; } } else { rc = 0; } break; } case OCS_XPORT_LINK_STATISTICS: ocs_device_lock(ocs); ocs_memcpy((void *)result, &ocs->xport->fc_xport_stats, sizeof(ocs_xport_stats_t)); ocs_device_unlock(ocs); break; case OCS_XPORT_LINK_STAT_RESET: { /* Create a semaphore to synchronize the stat reset process. */ ocs_sem_init(&(result->stats.semaphore), 0, "fc_stats_reset"); /* First reset the link stats */ if ((rc = ocs_hw_get_link_stats(&ocs->hw, 0, 1, 1, ocs_xport_link_stats_cb, result)) != 0) { ocs_log_err(ocs, "%s: Failed to reset link statistics\n", __func__); break; } /* Wait for semaphore to be signaled when the command completes */ /* TODO: Should there be a timeout on this? If so, how long? */ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_test(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } /* Next reset the host stats */ if ((rc = ocs_hw_get_host_stats(&ocs->hw, 1, ocs_xport_host_stats_cb, result)) != 0) { ocs_log_err(ocs, "%s: Failed to reset host statistics\n", __func__); break; } /* Wait for semaphore to be signaled when the command completes */ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_test(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } break; } case OCS_XPORT_IS_QUIESCED: ocs_device_lock(ocs); result->value = ocs_list_empty(&ocs->domain_list); ocs_device_unlock(ocs); break; default: rc = -1; break; } return rc; } static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg) { ocs_xport_stats_t *result = arg; result->stats.link_stats.link_failure_error_count = counters[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter; result->stats.link_stats.loss_of_sync_error_count = counters[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter; result->stats.link_stats.primitive_sequence_error_count = counters[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter; result->stats.link_stats.invalid_transmission_word_error_count = counters[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter; result->stats.link_stats.crc_error_count = counters[OCS_HW_LINK_STAT_CRC_COUNT].counter; ocs_sem_v(&(result->stats.semaphore)); } static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg) { ocs_xport_stats_t *result = arg; result->stats.host_stats.transmit_kbyte_count = counters[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter; result->stats.host_stats.receive_kbyte_count = counters[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter; result->stats.host_stats.transmit_frame_count = counters[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter; result->stats.host_stats.receive_frame_count = counters[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter; ocs_sem_v(&(result->stats.semaphore)); } /** * @brief Free a transport object. * * @par Description * The transport object is freed. * * @param xport Pointer to transport object. * * @return None. */ void ocs_xport_free(ocs_xport_t *xport) { ocs_t *ocs; uint32_t i; if (xport) { ocs = xport->ocs; ocs_io_pool_free(xport->io_pool); ocs_node_free_pool(ocs); if(mtx_initialized(&xport->io_pending_lock.lock)) ocs_lock_free(&xport->io_pending_lock); for (i = 0; i < SLI4_MAX_FCFI; i++) { ocs_lock_free(&xport->fcfi[i].pend_frames_lock); } ocs_xport_rq_threads_teardown(xport); ocs_free(ocs, xport, sizeof(*xport)); } } Index: head/sys/dev/ocs_fc/sli4.c =================================================================== --- head/sys/dev/ocs_fc/sli4.c (revision 343348) +++ head/sys/dev/ocs_fc/sli4.c (revision 343349) @@ -1,8645 +1,8646 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @defgroup sli SLI-4 Base APIs */ /** * @file * All common (i.e. transport-independent) SLI-4 functions are implemented * in this file. */ #include "sli4.h" #if defined(OCS_INCLUDE_DEBUG) #include "ocs_utils.h" #endif #define SLI4_BMBX_DELAY_US 1000 /* 1 ms */ #define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */ static int32_t sli_fw_init(sli4_t *); static int32_t sli_fw_term(sli4_t *); static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian); static int32_t sli_cmd_fw_deinitialize(sli4_t *, void *, size_t); static int32_t sli_cmd_fw_initialize(sli4_t *, void *, size_t); static int32_t sli_queue_doorbell(sli4_t *, sli4_queue_t *); static uint8_t sli_queue_entry_is_valid(sli4_queue_t *, uint8_t *, uint8_t); const uint8_t sli4_fw_initialize[] = { 0xff, 0x12, 0x34, 0xff, 0xff, 0x56, 0x78, 0xff, }; const uint8_t sli4_fw_deinitialize[] = { 0xff, 0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd, 0xff, }; typedef struct { uint32_t rev_id; uint32_t family; /* generation */ sli4_asic_type_e type; sli4_asic_rev_e rev; } sli4_asic_entry_t; sli4_asic_entry_t sli4_asic_table[] = { { 0x00, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A0}, { 0x01, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A1}, { 0x02, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A2}, { 0x00, 4, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0}, { 0x00, 2, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0}, { 0x10, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_B0}, { 0x10, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B0}, { 0x11, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B1}, { 0x0, 0x0a, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0}, { 0x10, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_B0}, { 0x30, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_D0}, { 0x3, 0x0b, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3}, { 0x0, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A0}, { 0x1, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A1}, { 0x3, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3}, { 0x00, 0x05, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_REV_A0}, }; /* * @brief Convert queue type enum (SLI_QTYPE_*) into a string. */ const char *SLI_QNAME[] = { "Event Queue", "Completion Queue", "Mailbox Queue", "Work Queue", "Receive Queue", "Undefined" }; /** * @brief Define the mapping of registers to their BAR and offset. * * @par Description * Although SLI-4 specification defines a common set of registers, their locations * (both BAR and offset) depend on the interface type. This array maps a register * enum to an array of BAR/offset pairs indexed by the interface type. For * example, to access the bootstrap mailbox register on an interface type 0 * device, code can refer to the offset using regmap[SLI4_REG_BMBX][0].offset. * * @b Note: A value of UINT32_MAX for either the register set (rset) or offset (off) * indicates an invalid mapping. */ const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES] = { /* SLI4_REG_BMBX */ { { 2, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, }, /* SLI4_REG_EQCQ_DOORBELL */ { { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, }, /* SLI4_REG_FCOE_RQ_DOORBELL */ { { 2, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_IO_WQ_DOORBELL */ { { 2, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_MQ_DOORBELL */ { { 2, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, }, /* SLI4_REG_PHYSDEV_CONTROL */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PHSDEV_CONTROL_REG_23 }, { 0, SLI4_PHSDEV_CONTROL_REG_23 }, }, /* SLI4_REG_SLIPORT_CONTROL */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_CONTROL_REG }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_SLIPORT_ERROR1 */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR1 }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_SLIPORT_ERROR2 */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR2 }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_SLIPORT_SEMAPHORE */ { { 1, SLI4_PORT_SEMAPHORE_REG_0 }, { 0, SLI4_PORT_SEMAPHORE_REG_1 }, { 0, SLI4_PORT_SEMAPHORE_REG_23 }, { 0, SLI4_PORT_SEMAPHORE_REG_23 }, }, /* SLI4_REG_SLIPORT_STATUS */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PORT_STATUS_REG_23 }, { 0, SLI4_PORT_STATUS_REG_23 }, }, /* SLI4_REG_UERR_MASK_HI */ { { 0, SLI4_UERR_MASK_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_UERR_MASK_LO */ { { 0, SLI4_UERR_MASK_LOW_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_UERR_STATUS_HI */ { { 0, SLI4_UERR_STATUS_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_UERR_STATUS_LO */ { { 0, SLI4_UERR_STATUS_LOW_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_SW_UE_CSR1 */ { { 1, SLI4_SW_UE_CSR1}, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, }, /* SLI4_REG_SW_UE_CSR2 */ { { 1, SLI4_SW_UE_CSR2}, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, }, }; /** * @brief Read the given SLI register. * * @param sli Pointer to the SLI context. * @param reg Register name enum. * * @return Returns the register value. */ uint32_t sli_reg_read(sli4_t *sli, sli4_regname_e reg) { const sli4_reg_t *r = &(regmap[reg][sli->if_type]); if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) { ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type); return UINT32_MAX; } return ocs_reg_read32(sli->os, r->rset, r->off); } /** * @brief Write the value to the given SLI register. * * @param sli Pointer to the SLI context. * @param reg Register name enum. * @param val Value to write. * * @return None. */ void sli_reg_write(sli4_t *sli, sli4_regname_e reg, uint32_t val) { const sli4_reg_t *r = &(regmap[reg][sli->if_type]); if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) { ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type); return; } ocs_reg_write32(sli->os, r->rset, r->off, val); } /** * @brief Check if the SLI_INTF register is valid. * * @param val 32-bit SLI_INTF register value. * * @return Returns 0 on success, or a non-zero value on failure. */ static uint8_t sli_intf_valid_check(uint32_t val) { return ((val >> SLI4_INTF_VALID_SHIFT) & SLI4_INTF_VALID_MASK) != SLI4_INTF_VALID; } /** * @brief Retrieve the SLI revision level. * * @param val 32-bit SLI_INTF register value. * * @return Returns the SLI revision level. */ static uint8_t sli_intf_sli_revision(uint32_t val) { return ((val >> SLI4_INTF_SLI_REVISION_SHIFT) & SLI4_INTF_SLI_REVISION_MASK); } static uint8_t sli_intf_sli_family(uint32_t val) { return ((val >> SLI4_INTF_SLI_FAMILY_SHIFT) & SLI4_INTF_SLI_FAMILY_MASK); } /** * @brief Retrieve the SLI interface type. * * @param val 32-bit SLI_INTF register value. * * @return Returns the SLI interface type. */ static uint8_t sli_intf_if_type(uint32_t val) { return ((val >> SLI4_INTF_IF_TYPE_SHIFT) & SLI4_INTF_IF_TYPE_MASK); } /** * @brief Retrieve PCI revision ID. * * @param val 32-bit PCI CLASS_REVISION register value. * * @return Returns the PCI revision ID. */ static uint8_t sli_pci_rev_id(uint32_t val) { return ((val >> SLI4_PCI_REV_ID_SHIFT) & SLI4_PCI_REV_ID_MASK); } /** * @brief retrieve SLI ASIC generation * * @param val 32-bit SLI_ASIC_ID register value * * @return SLI ASIC generation */ static uint8_t sli_asic_gen(uint32_t val) { return ((val >> SLI4_ASIC_GEN_SHIFT) & SLI4_ASIC_GEN_MASK); } /** * @brief Wait for the bootstrap mailbox to report "ready". * * @param sli4 SLI context pointer. * @param msec Number of milliseconds to wait. * * @return Returns 0 if BMBX is ready, or non-zero otherwise (i.e. time out occurred). */ static int32_t sli_bmbx_wait(sli4_t *sli4, uint32_t msec) { uint32_t val = 0; do { ocs_udelay(SLI4_BMBX_DELAY_US); val = sli_reg_read(sli4, SLI4_REG_BMBX); msec--; } while(msec && !(val & SLI4_BMBX_RDY)); return(!(val & SLI4_BMBX_RDY)); } /** * @brief Write bootstrap mailbox. * * @param sli4 SLI context pointer. * * @return Returns 0 if command succeeded, or non-zero otherwise. */ static int32_t sli_bmbx_write(sli4_t *sli4) { uint32_t val = 0; /* write buffer location to bootstrap mailbox register */ ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_PREWRITE); val = SLI4_BMBX_WRITE_HI(sli4->bmbx.phys); sli_reg_write(sli4, SLI4_REG_BMBX, val); if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { ocs_log_crit(sli4->os, "BMBX WRITE_HI failed\n"); return -1; } val = SLI4_BMBX_WRITE_LO(sli4->bmbx.phys); sli_reg_write(sli4, SLI4_REG_BMBX, val); /* wait for SLI Port to set ready bit */ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC/*XXX*/); } #if defined(OCS_INCLUDE_DEBUG) /** * @ingroup sli * @brief Dump BMBX mailbox command. * * @par Description * Convenience function for dumping BMBX mailbox commands. Takes * into account which mailbox command is given since SLI_CONFIG * commands are special. * * @b Note: This function takes advantage of * the one-command-at-a-time nature of the BMBX to be able to * display non-embedded SLI_CONFIG commands. This will not work * for mailbox commands on the MQ. Luckily, all current non-emb * mailbox commands go through the BMBX. * * @param sli4 SLI context pointer. * @param mbx Pointer to mailbox command to dump. * @param prefix Prefix for dump label. * * @return None. */ static void sli_dump_bmbx_command(sli4_t *sli4, void *mbx, const char *prefix) { uint32_t size = 0; char label[64]; uint32_t i; /* Mailbox diagnostic logging */ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mbx; if (!ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)) { return; } if (hdr->command == SLI4_MBOX_COMMAND_SLI_CONFIG) { sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)hdr; sli4_req_hdr_t *sli_config_hdr; if (sli_config->emb) { ocs_snprintf(label, sizeof(label), "%s (emb)", prefix); /* if embedded, dump entire command */ sli_config_hdr = (sli4_req_hdr_t *)sli_config->payload.embed; size = sizeof(*sli_config) - sizeof(sli_config->payload) + sli_config_hdr->request_length + (4*sizeof(uint32_t)); ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, (uint8_t *)sli4->bmbx.virt, size); } else { sli4_sli_config_pmd_t *pmd; ocs_snprintf(label, sizeof(label), "%s (non-emb hdr)", prefix); /* if non-embedded, break up into two parts: SLI_CONFIG hdr and the payload(s) */ size = sizeof(*sli_config) - sizeof(sli_config->payload) + (12 * sli_config->pmd_count); ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, (uint8_t *)sli4->bmbx.virt, size); /* as sanity check, make sure first PMD matches what was saved */ pmd = &sli_config->payload.mem; if ((pmd->address_high == ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys)) && (pmd->address_low == ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys))) { for (i = 0; i < sli_config->pmd_count; i++, pmd++) { sli_config_hdr = sli4->bmbx_non_emb_pmd->virt; ocs_snprintf(label, sizeof(label), "%s (non-emb pay[%d])", prefix, i); ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, (uint8_t *)sli4->bmbx_non_emb_pmd->virt, sli_config_hdr->request_length + (4*sizeof(uint32_t))); } } else { ocs_log_debug(sli4->os, "pmd addr does not match pmd:%x %x (%x %x)\n", pmd->address_high, pmd->address_low, ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys), ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys)); } } } else { /* not an SLI_CONFIG command, just display first 64 bytes, like we do for MQEs */ size = 64; ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, prefix, (uint8_t *)mbx, size); } } #endif /** * @ingroup sli * @brief Submit a command to the bootstrap mailbox and check the status. * * @param sli4 SLI context pointer. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_bmbx_command(sli4_t *sli4) { void *cqe = (uint8_t *)sli4->bmbx.virt + SLI4_BMBX_SIZE; #if defined(OCS_INCLUDE_DEBUG) sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmd"); #endif if (sli_fw_error_status(sli4) > 0) { ocs_log_crit(sli4->os, "Chip is in an error state - Mailbox " "command rejected status=%#x error1=%#x error2=%#x\n", sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2)); return -1; } if (sli_bmbx_write(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail phys=%p reg=%#x\n", (void*)sli4->bmbx.phys, sli_reg_read(sli4, SLI4_REG_BMBX)); return -1; } /* check completion queue entry status */ ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_POSTREAD); if (((sli4_mcqe_t *)cqe)->val) { #if defined(OCS_INCLUDE_DEBUG) sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmpl"); ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "bmbx cqe", cqe, sizeof(sli4_mcqe_t)); #endif return sli_cqe_mq(cqe); } else { ocs_log_err(sli4->os, "invalid or wrong type\n"); return -1; } } /**************************************************************************** * Messages */ /** * @ingroup sli * @brief Write a CONFIG_LINK command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_config_link(sli4_t *sli4, void *buf, size_t size) { sli4_cmd_config_link_t *config_link = buf; ocs_memset(buf, 0, size); config_link->hdr.command = SLI4_MBOX_COMMAND_CONFIG_LINK; /* Port interprets zero in a field as "use default value" */ return sizeof(sli4_cmd_config_link_t); } /** * @ingroup sli * @brief Write a DOWN_LINK command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_down_link(sli4_t *sli4, void *buf, size_t size) { sli4_mbox_command_header_t *hdr = buf; ocs_memset(buf, 0, size); hdr->command = SLI4_MBOX_COMMAND_DOWN_LINK; /* Port interprets zero in a field as "use default value" */ return sizeof(sli4_mbox_command_header_t); } /** * @ingroup sli * @brief Write a DUMP Type 4 command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param wki The well known item ID. * * @return Returns the number of bytes written. */ int32_t sli_cmd_dump_type4(sli4_t *sli4, void *buf, size_t size, uint16_t wki) { sli4_cmd_dump4_t *cmd = buf; ocs_memset(buf, 0, size); cmd->hdr.command = SLI4_MBOX_COMMAND_DUMP; cmd->type = 4; cmd->wki_selection = wki; return sizeof(sli4_cmd_dump4_t); } /** * @ingroup sli * @brief Write a COMMON_READ_TRANSCEIVER_DATA command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param page_num The page of SFP data to retrieve (0xa0 or 0xa2). * @param dma DMA structure from which the data will be copied. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_read_transceiver_data(sli4_t *sli4, void *buf, size_t size, uint32_t page_num, ocs_dma_t *dma) { sli4_req_common_read_transceiver_data_t *req = NULL; uint32_t sli_config_off = 0; uint32_t payload_size; if (dma == NULL) { /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_read_transceiver_data_t), sizeof(sli4_res_common_read_transceiver_data_t)); } else { payload_size = dma->size; } if (sli4->port_type == SLI4_PORT_TYPE_FC) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); } if (dma == NULL) { req = (sli4_req_common_read_transceiver_data_t *)((uint8_t *)buf + sli_config_off); } else { req = (sli4_req_common_read_transceiver_data_t *)dma->virt; ocs_memset(req, 0, dma->size); } req->hdr.opcode = SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->page_number = page_num; req->port = sli4->physical_port; return(sli_config_off + sizeof(sli4_req_common_read_transceiver_data_t)); } /** * @ingroup sli * @brief Write a READ_LINK_STAT command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param req_ext_counters If TRUE, then the extended counters will be requested. * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. * @param clear_all_counters If TRUE, the counters will be cleared. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_link_stats(sli4_t *sli4, void *buf, size_t size, uint8_t req_ext_counters, uint8_t clear_overflow_flags, uint8_t clear_all_counters) { sli4_cmd_read_link_stats_t *cmd = buf; ocs_memset(buf, 0, size); cmd->hdr.command = SLI4_MBOX_COMMAND_READ_LNK_STAT; cmd->rec = req_ext_counters; cmd->clrc = clear_all_counters; cmd->clof = clear_overflow_flags; return sizeof(sli4_cmd_read_link_stats_t); } /** * @ingroup sli * @brief Write a READ_STATUS command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param clear_counters If TRUE, the counters will be cleared. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size, uint8_t clear_counters) { sli4_cmd_read_status_t *cmd = buf; ocs_memset(buf, 0, size); cmd->hdr.command = SLI4_MBOX_COMMAND_READ_STATUS; cmd->cc = clear_counters; return sizeof(sli4_cmd_read_status_t); } /** * @brief Write a FW_DEINITIALIZE command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_fw_deinitialize(sli4_t *sli4, void *buf, size_t size) { ocs_memset(buf, 0, size); ocs_memcpy(buf, sli4_fw_deinitialize, sizeof(sli4_fw_deinitialize)); return sizeof(sli4_fw_deinitialize); } /** * @brief Write a FW_INITIALIZE command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_fw_initialize(sli4_t *sli4, void *buf, size_t size) { ocs_memset(buf, 0, size); ocs_memcpy(buf, sli4_fw_initialize, sizeof(sli4_fw_initialize)); return sizeof(sli4_fw_initialize); } /** * @ingroup sli * @brief Write an INIT_LINK command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param speed Link speed. * @param reset_alpa For native FC, this is the selective reset AL_PA * * @return Returns the number of bytes written. */ int32_t sli_cmd_init_link(sli4_t *sli4, void *buf, size_t size, uint32_t speed, uint8_t reset_alpa) { sli4_cmd_init_link_t *init_link = buf; ocs_memset(buf, 0, size); init_link->hdr.command = SLI4_MBOX_COMMAND_INIT_LINK; /* Most fields only have meaning for FC links */ if (sli4->config.topology != SLI4_READ_CFG_TOPO_FCOE) { init_link->selective_reset_al_pa = reset_alpa; init_link->link_flags.loopback = FALSE; init_link->link_speed_selection_code = speed; switch (speed) { case FC_LINK_SPEED_1G: case FC_LINK_SPEED_2G: case FC_LINK_SPEED_4G: case FC_LINK_SPEED_8G: case FC_LINK_SPEED_16G: case FC_LINK_SPEED_32G: init_link->link_flags.fixed_speed = TRUE; break; case FC_LINK_SPEED_10G: ocs_log_test(sli4->os, "unsupported FC speed %d\n", speed); return 0; } switch (sli4->config.topology) { case SLI4_READ_CFG_TOPO_FC: /* Attempt P2P but failover to FC-AL */ init_link->link_flags.enable_topology_failover = TRUE; if (sli_get_asic_type(sli4) == SLI4_ASIC_TYPE_LANCER) init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_FAIL_OVER; else init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER; break; case SLI4_READ_CFG_TOPO_FC_AL: init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY; if ((init_link->link_speed_selection_code == FC_LINK_SPEED_16G) || (init_link->link_speed_selection_code == FC_LINK_SPEED_32G)) { ocs_log_test(sli4->os, "unsupported FC-AL speed %d\n", speed); return 0; } break; case SLI4_READ_CFG_TOPO_FC_DA: init_link->link_flags.topology = FC_TOPOLOGY_P2P; break; default: ocs_log_test(sli4->os, "unsupported topology %#x\n", sli4->config.topology); return 0; } init_link->link_flags.unfair = FALSE; init_link->link_flags.skip_lirp_lilp = FALSE; init_link->link_flags.gen_loop_validity_check = FALSE; init_link->link_flags.skip_lisa = FALSE; init_link->link_flags.select_hightest_al_pa = FALSE; } return sizeof(sli4_cmd_init_link_t); } /** * @ingroup sli * @brief Write an INIT_VFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param vfi VFI * @param fcfi FCFI * @param vpi VPI (Set to -1 if unused.) * * @return Returns the number of bytes written. */ int32_t sli_cmd_init_vfi(sli4_t *sli4, void *buf, size_t size, uint16_t vfi, uint16_t fcfi, uint16_t vpi) { sli4_cmd_init_vfi_t *init_vfi = buf; ocs_memset(buf, 0, size); init_vfi->hdr.command = SLI4_MBOX_COMMAND_INIT_VFI; init_vfi->vfi = vfi; init_vfi->fcfi = fcfi; /* * If the VPI is valid, initialize it at the same time as * the VFI */ if (0xffff != vpi) { init_vfi->vp = TRUE; init_vfi->vpi = vpi; } return sizeof(sli4_cmd_init_vfi_t); } /** * @ingroup sli * @brief Write an INIT_VPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param vpi VPI allocated. * @param vfi VFI associated with this VPI. * * @return Returns the number of bytes written. */ int32_t sli_cmd_init_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t vpi, uint16_t vfi) { sli4_cmd_init_vpi_t *init_vpi = buf; ocs_memset(buf, 0, size); init_vpi->hdr.command = SLI4_MBOX_COMMAND_INIT_VPI; init_vpi->vpi = vpi; init_vpi->vfi = vfi; return sizeof(sli4_cmd_init_vpi_t); } /** * @ingroup sli * @brief Write a POST_XRI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param xri_base Starting XRI value for range of XRI given to SLI Port. * @param xri_count Number of XRIs provided to the SLI Port. * * @return Returns the number of bytes written. */ int32_t sli_cmd_post_xri(sli4_t *sli4, void *buf, size_t size, uint16_t xri_base, uint16_t xri_count) { sli4_cmd_post_xri_t *post_xri = buf; ocs_memset(buf, 0, size); post_xri->hdr.command = SLI4_MBOX_COMMAND_POST_XRI; post_xri->xri_base = xri_base; post_xri->xri_count = xri_count; if (sli4->config.auto_xfer_rdy == 0) { post_xri->enx = TRUE; post_xri->val = TRUE; } return sizeof(sli4_cmd_post_xri_t); } /** * @ingroup sli * @brief Write a RELEASE_XRI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param num_xri The number of XRIs to be released. * * @return Returns the number of bytes written. */ int32_t sli_cmd_release_xri(sli4_t *sli4, void *buf, size_t size, uint8_t num_xri) { sli4_cmd_release_xri_t *release_xri = buf; ocs_memset(buf, 0, size); release_xri->hdr.command = SLI4_MBOX_COMMAND_RELEASE_XRI; release_xri->xri_count = num_xri; return sizeof(sli4_cmd_release_xri_t); } /** * @brief Write a READ_CONFIG command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes * * @return Returns the number of bytes written. */ static int32_t sli_cmd_read_config(sli4_t *sli4, void *buf, size_t size) { sli4_cmd_read_config_t *read_config = buf; ocs_memset(buf, 0, size); read_config->hdr.command = SLI4_MBOX_COMMAND_READ_CONFIG; return sizeof(sli4_cmd_read_config_t); } /** * @brief Write a READ_NVPARMS command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_nvparms(sli4_t *sli4, void *buf, size_t size) { sli4_cmd_read_nvparms_t *read_nvparms = buf; ocs_memset(buf, 0, size); read_nvparms->hdr.command = SLI4_MBOX_COMMAND_READ_NVPARMS; return sizeof(sli4_cmd_read_nvparms_t); } /** * @brief Write a WRITE_NVPARMS command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param wwpn WWPN to write - pointer to array of 8 uint8_t. * @param wwnn WWNN to write - pointer to array of 8 uint8_t. * @param hard_alpa Hard ALPA to write. * @param preferred_d_id Preferred D_ID to write. * * @return Returns the number of bytes written. */ int32_t sli_cmd_write_nvparms(sli4_t *sli4, void *buf, size_t size, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id) { sli4_cmd_write_nvparms_t *write_nvparms = buf; ocs_memset(buf, 0, size); write_nvparms->hdr.command = SLI4_MBOX_COMMAND_WRITE_NVPARMS; ocs_memcpy(write_nvparms->wwpn, wwpn, 8); ocs_memcpy(write_nvparms->wwnn, wwnn, 8); write_nvparms->hard_alpa = hard_alpa; write_nvparms->preferred_d_id = preferred_d_id; return sizeof(sli4_cmd_write_nvparms_t); } /** * @brief Write a READ_REV command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param vpd Pointer to the buffer. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_read_rev(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *vpd) { sli4_cmd_read_rev_t *read_rev = buf; ocs_memset(buf, 0, size); read_rev->hdr.command = SLI4_MBOX_COMMAND_READ_REV; if (vpd && vpd->size) { read_rev->vpd = TRUE; read_rev->available_length = vpd->size; read_rev->physical_address_low = ocs_addr32_lo(vpd->phys); read_rev->physical_address_high = ocs_addr32_hi(vpd->phys); } return sizeof(sli4_cmd_read_rev_t); } /** * @ingroup sli * @brief Write a READ_SPARM64 command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param dma DMA buffer for the service parameters. * @param vpi VPI used to determine the WWN. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_sparm64(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t vpi) { sli4_cmd_read_sparm64_t *read_sparm64 = buf; ocs_memset(buf, 0, size); if (SLI4_READ_SPARM64_VPI_SPECIAL == vpi) { ocs_log_test(sli4->os, "special VPI not supported!!!\n"); return -1; } if (!dma || !dma->phys) { ocs_log_test(sli4->os, "bad DMA buffer\n"); return -1; } read_sparm64->hdr.command = SLI4_MBOX_COMMAND_READ_SPARM64; read_sparm64->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64; read_sparm64->bde_64.buffer_length = dma->size; read_sparm64->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); read_sparm64->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); read_sparm64->vpi = vpi; return sizeof(sli4_cmd_read_sparm64_t); } /** * @ingroup sli * @brief Write a READ_TOPOLOGY command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param dma DMA buffer for loop map (optional). * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_topology(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_cmd_read_topology_t *read_topo = buf; ocs_memset(buf, 0, size); read_topo->hdr.command = SLI4_MBOX_COMMAND_READ_TOPOLOGY; if (dma && dma->size) { if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) { ocs_log_test(sli4->os, "loop map buffer too small %jd\n", dma->size); return 0; } ocs_memset(dma->virt, 0, dma->size); read_topo->bde_loop_map.bde_type = SLI4_BDE_TYPE_BDE_64; read_topo->bde_loop_map.buffer_length = dma->size; read_topo->bde_loop_map.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); read_topo->bde_loop_map.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } return sizeof(sli4_cmd_read_topology_t); } /** * @ingroup sli * @brief Write a REG_FCFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param index FCF index returned by READ_FCF_TABLE. * @param rq_cfg RQ_ID/R_CTL/TYPE routing information * @param vlan_id VLAN ID tag. * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t index, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t vlan_id) { sli4_cmd_reg_fcfi_t *reg_fcfi = buf; uint32_t i; ocs_memset(buf, 0, size); reg_fcfi->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI; reg_fcfi->fcf_index = index; for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { switch(i) { case 0: reg_fcfi->rq_id_0 = rq_cfg[0].rq_id; break; case 1: reg_fcfi->rq_id_1 = rq_cfg[1].rq_id; break; case 2: reg_fcfi->rq_id_2 = rq_cfg[2].rq_id; break; case 3: reg_fcfi->rq_id_3 = rq_cfg[3].rq_id; break; } reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask; reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match; } if (vlan_id) { reg_fcfi->vv = TRUE; reg_fcfi->vlan_tag = vlan_id; } return sizeof(sli4_cmd_reg_fcfi_t); } /** * @brief Write REG_FCFI_MRQ to provided command buffer * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param fcf_index FCF index returned by READ_FCF_TABLE. * @param vlan_id VLAN ID tag. * @param rr_quant Round robin quanta if RQ selection policy is 2 * @param rq_selection_policy RQ selection policy * @param num_rqs Array of count of RQs per filter * @param rq_ids Array of RQ ids per filter * @param rq_cfg RQ_ID/R_CTL/TYPE routing information * * @return returns 0 for success, a negative error code value for failure. */ int32_t sli_cmd_reg_fcfi_mrq(sli4_t *sli4, void *buf, size_t size, uint8_t mode, uint16_t fcf_index, uint16_t vlan_id, uint8_t rq_selection_policy, uint8_t mrq_bit_mask, uint16_t num_mrqs, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]) { sli4_cmd_reg_fcfi_mrq_t *reg_fcfi_mrq = buf; uint32_t i; ocs_memset(buf, 0, size); reg_fcfi_mrq->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI_MRQ; if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { reg_fcfi_mrq->fcf_index = fcf_index; if (vlan_id) { reg_fcfi_mrq->vv = TRUE; reg_fcfi_mrq->vlan_tag = vlan_id; } goto done; } reg_fcfi_mrq->mode = mode; for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask; reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match; switch(i) { case 3: reg_fcfi_mrq->rq_id_3 = rq_cfg[i].rq_id; break; case 2: reg_fcfi_mrq->rq_id_2 = rq_cfg[i].rq_id; break; case 1: reg_fcfi_mrq->rq_id_1 = rq_cfg[i].rq_id; break; case 0: reg_fcfi_mrq->rq_id_0 = rq_cfg[i].rq_id; break; } } reg_fcfi_mrq->rq_selection_policy = rq_selection_policy; reg_fcfi_mrq->mrq_filter_bitmask = mrq_bit_mask; reg_fcfi_mrq->num_mrq_pairs = num_mrqs; done: return sizeof(sli4_cmd_reg_fcfi_mrq_t); } /** * @ingroup sli * @brief Write a REG_RPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param nport_id Remote F/N_Port_ID. * @param rpi Previously-allocated Remote Port Indicator. * @param vpi Previously-allocated Virtual Port Indicator. * @param dma DMA buffer that contains the remote port's service parameters. * @param update Boolean indicating an update to an existing RPI (TRUE) * or a new registration (FALSE). * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_rpi(sli4_t *sli4, void *buf, size_t size, uint32_t nport_id, uint16_t rpi, uint16_t vpi, ocs_dma_t *dma, uint8_t update, uint8_t enable_t10_pi) { sli4_cmd_reg_rpi_t *reg_rpi = buf; ocs_memset(buf, 0, size); reg_rpi->hdr.command = SLI4_MBOX_COMMAND_REG_RPI; reg_rpi->rpi = rpi; reg_rpi->remote_n_port_id = nport_id; reg_rpi->upd = update; reg_rpi->etow = enable_t10_pi; reg_rpi->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64; reg_rpi->bde_64.buffer_length = SLI4_REG_RPI_BUF_LEN; reg_rpi->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); reg_rpi->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); reg_rpi->vpi = vpi; return sizeof(sli4_cmd_reg_rpi_t); } /** * @ingroup sli * @brief Write a REG_VFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param domain Pointer to the domain object. * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain) { sli4_cmd_reg_vfi_t *reg_vfi = buf; if (!sli4 || !buf || !domain) { return 0; } ocs_memset(buf, 0, size); reg_vfi->hdr.command = SLI4_MBOX_COMMAND_REG_VFI; reg_vfi->vfi = domain->indicator; reg_vfi->fcfi = domain->fcf_indicator; /* TODO contents of domain->dma only valid if topo == FABRIC */ reg_vfi->sparm.bde_type = SLI4_BDE_TYPE_BDE_64; reg_vfi->sparm.buffer_length = 0x70; reg_vfi->sparm.u.data.buffer_address_low = ocs_addr32_lo(domain->dma.phys); reg_vfi->sparm.u.data.buffer_address_high = ocs_addr32_hi(domain->dma.phys); reg_vfi->e_d_tov = sli4->config.e_d_tov; reg_vfi->r_a_tov = sli4->config.r_a_tov; reg_vfi->vp = TRUE; reg_vfi->vpi = domain->sport->indicator; ocs_memcpy(reg_vfi->wwpn, &domain->sport->sli_wwpn, sizeof(reg_vfi->wwpn)); reg_vfi->local_n_port_id = domain->sport->fc_id; return sizeof(sli4_cmd_reg_vfi_t); } /** * @ingroup sli * @brief Write a REG_VPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param sport Point to SLI Port object. * @param update Boolean indicating whether to update the existing VPI (true) * or create a new VPI (false). * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_vpi(sli4_t *sli4, void *buf, size_t size, ocs_sli_port_t *sport, uint8_t update) { sli4_cmd_reg_vpi_t *reg_vpi = buf; if (!sli4 || !buf || !sport) { return 0; } ocs_memset(buf, 0, size); reg_vpi->hdr.command = SLI4_MBOX_COMMAND_REG_VPI; reg_vpi->local_n_port_id = sport->fc_id; reg_vpi->upd = update != 0; ocs_memcpy(reg_vpi->wwpn, &sport->sli_wwpn, sizeof(reg_vpi->wwpn)); reg_vpi->vpi = sport->indicator; reg_vpi->vfi = sport->domain->indicator; return sizeof(sli4_cmd_reg_vpi_t); } /** * @brief Write a REQUEST_FEATURES command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param mask Features to request. * @param query Use feature query mode (does not change FW). * * @return Returns the number of bytes written. */ static int32_t sli_cmd_request_features(sli4_t *sli4, void *buf, size_t size, sli4_features_t mask, uint8_t query) { sli4_cmd_request_features_t *features = buf; ocs_memset(buf, 0, size); features->hdr.command = SLI4_MBOX_COMMAND_REQUEST_FEATURES; if (query) { features->qry = TRUE; } features->command.dword = mask.dword; return sizeof(sli4_cmd_request_features_t); } /** * @ingroup sli * @brief Write a SLI_CONFIG command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param length Length in bytes of attached command. * @param dma DMA buffer for non-embedded commands. * * @return Returns the number of bytes written. */ int32_t sli_cmd_sli_config(sli4_t *sli4, void *buf, size_t size, uint32_t length, ocs_dma_t *dma) { sli4_cmd_sli_config_t *sli_config = NULL; if ((length > sizeof(sli_config->payload.embed)) && (dma == NULL)) { ocs_log_test(sli4->os, "length(%d) > payload(%ld)\n", length, sizeof(sli_config->payload.embed)); return -1; } sli_config = buf; ocs_memset(buf, 0, size); sli_config->hdr.command = SLI4_MBOX_COMMAND_SLI_CONFIG; if (NULL == dma) { sli_config->emb = TRUE; sli_config->payload_length = length; } else { sli_config->emb = FALSE; sli_config->pmd_count = 1; sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys); sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys); sli_config->payload.mem.length = dma->size; sli_config->payload_length = dma->size; #if defined(OCS_INCLUDE_DEBUG) /* save pointer to DMA for BMBX dumping purposes */ sli4->bmbx_non_emb_pmd = dma; #endif } return offsetof(sli4_cmd_sli_config_t, payload.embed); } /** * @brief Initialize SLI Port control register. * * @param sli4 SLI context pointer. * @param endian Endian value to write. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian) { uint32_t iter; int32_t rc; rc = -1; /* Initialize port, endian */ sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, endian | SLI4_SLIPORT_CONTROL_IP); for (iter = 0; iter < 3000; iter ++) { ocs_udelay(SLI4_INIT_PORT_DELAY_US); if (sli_fw_ready(sli4) == 1) { rc = 0; break; } } if (rc != 0) { ocs_log_crit(sli4->os, "port failed to become ready after initialization\n"); } return rc; } /** * @ingroup sli * @brief Write a UNREG_FCFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param indicator Indicator value. * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator) { sli4_cmd_unreg_fcfi_t *unreg_fcfi = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); unreg_fcfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_FCFI; unreg_fcfi->fcfi = indicator; return sizeof(sli4_cmd_unreg_fcfi_t); } /** * @ingroup sli * @brief Write an UNREG_RPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param indicator Indicator value. * @param which Type of unregister, such as node, port, domain, or FCF. * @param fc_id FC address. * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_rpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, sli4_resource_e which, uint32_t fc_id) { sli4_cmd_unreg_rpi_t *unreg_rpi = buf; uint8_t index_indicator = 0; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); unreg_rpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_RPI; switch (which) { case SLI_RSRC_FCOE_RPI: index_indicator = SLI4_UNREG_RPI_II_RPI; if (fc_id != UINT32_MAX) { unreg_rpi->dp = TRUE; unreg_rpi->destination_n_port_id = fc_id & 0x00ffffff; } break; case SLI_RSRC_FCOE_VPI: index_indicator = SLI4_UNREG_RPI_II_VPI; break; case SLI_RSRC_FCOE_VFI: index_indicator = SLI4_UNREG_RPI_II_VFI; break; case SLI_RSRC_FCOE_FCFI: index_indicator = SLI4_UNREG_RPI_II_FCFI; break; default: ocs_log_test(sli4->os, "unknown type %#x\n", which); return 0; } unreg_rpi->ii = index_indicator; unreg_rpi->index = indicator; return sizeof(sli4_cmd_unreg_rpi_t); } /** * @ingroup sli * @brief Write an UNREG_VFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param domain Pointer to the domain object * @param which Type of unregister, such as domain, FCFI, or everything. * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain, uint32_t which) { sli4_cmd_unreg_vfi_t *unreg_vfi = buf; if (!sli4 || !buf || !domain) { return 0; } ocs_memset(buf, 0, size); unreg_vfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VFI; switch (which) { case SLI4_UNREG_TYPE_DOMAIN: unreg_vfi->index = domain->indicator; break; case SLI4_UNREG_TYPE_FCF: unreg_vfi->index = domain->fcf_indicator; break; case SLI4_UNREG_TYPE_ALL: unreg_vfi->index = UINT16_MAX; break; default: return 0; } if (SLI4_UNREG_TYPE_DOMAIN != which) { unreg_vfi->ii = SLI4_UNREG_VFI_II_FCFI; } return sizeof(sli4_cmd_unreg_vfi_t); } /** * @ingroup sli * @brief Write an UNREG_VPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param indicator Indicator value. * @param which Type of unregister: port, domain, FCFI, everything * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, uint32_t which) { sli4_cmd_unreg_vpi_t *unreg_vpi = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); unreg_vpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VPI; unreg_vpi->index = indicator; switch (which) { case SLI4_UNREG_TYPE_PORT: unreg_vpi->ii = SLI4_UNREG_VPI_II_VPI; break; case SLI4_UNREG_TYPE_DOMAIN: unreg_vpi->ii = SLI4_UNREG_VPI_II_VFI; break; case SLI4_UNREG_TYPE_FCF: unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI; break; case SLI4_UNREG_TYPE_ALL: unreg_vpi->index = UINT16_MAX; /* override indicator */ unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI; break; default: return 0; } return sizeof(sli4_cmd_unreg_vpi_t); } /** * @ingroup sli * @brief Write an CONFIG_AUTO_XFER_RDY command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param max_burst_len if the write FCP_DL is less than this size, * then the SLI port will generate the auto XFER_RDY. * * @return Returns the number of bytes written. */ int32_t sli_cmd_config_auto_xfer_rdy(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len) { sli4_cmd_config_auto_xfer_rdy_t *req = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY; req->max_burst_len = max_burst_len; return sizeof(sli4_cmd_config_auto_xfer_rdy_t); } /** * @ingroup sli * @brief Write an CONFIG_AUTO_XFER_RDY_HP command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param max_burst_len if the write FCP_DL is less than this size, * @param esoc enable start offset computation, * @param block_size block size, * then the SLI port will generate the auto XFER_RDY. * * @return Returns the number of bytes written. */ int32_t sli_cmd_config_auto_xfer_rdy_hp(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len, uint32_t esoc, uint32_t block_size ) { sli4_cmd_config_auto_xfer_rdy_hp_t *req = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP; req->max_burst_len = max_burst_len; req->esoc = esoc; req->block_size = block_size; return sizeof(sli4_cmd_config_auto_xfer_rdy_hp_t); } /** * @brief Write a COMMON_FUNCTION_RESET command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_function_reset(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_function_reset_t *reset = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_function_reset_t), sizeof(sli4_res_common_function_reset_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } reset = (sli4_req_common_function_reset_t *)((uint8_t *)buf + sli_config_off); reset->hdr.opcode = SLI4_OPC_COMMON_FUNCTION_RESET; reset->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; return(sli_config_off + sizeof(sli4_req_common_function_reset_t)); } /** * @brief Write a COMMON_CREATE_CQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param eq_id Associated EQ_ID * @param ignored This parameter carries the ULP which is only used for WQ and RQs * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t eq_id, uint16_t ignored) { sli4_req_common_create_cq_v0_t *cqv0 = NULL; sli4_req_common_create_cq_v2_t *cqv2 = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; uint32_t if_type = sli4->if_type; uint32_t page_bytes = 0; uint32_t num_pages = 0; uint32_t cmd_size = 0; uint32_t page_size = 0; uint32_t n_cqe = 0; /* First calculate number of pages and the mailbox cmd length */ switch (if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: page_bytes = SLI_PAGE_SIZE; num_pages = sli_page_count(qmem->size, page_bytes); cmd_size = sizeof(sli4_req_common_create_cq_v0_t) + (8 * num_pages); break; case SLI4_IF_TYPE_LANCER_FC_ETH: n_cqe = qmem->size / SLI4_CQE_BYTES; switch (n_cqe) { case 256: case 512: case 1024: case 2048: page_size = 1; break; case 4096: page_size = 2; break; default: return 0; } page_bytes = page_size * SLI_PAGE_SIZE; num_pages = sli_page_count(qmem->size, page_bytes); cmd_size = sizeof(sli4_req_common_create_cq_v2_t) + (8 * num_pages); break; default: ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type); return -1; } /* now that we have the mailbox command size, we can set SLI_CONFIG fields */ if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } switch (if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: cqv0 = (sli4_req_common_create_cq_v0_t *)((uint8_t *)buf + sli_config_off); cqv0->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ; cqv0->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cqv0->hdr.version = 0; cqv0->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ cqv0->num_pages = num_pages; switch (cqv0->num_pages) { case 1: cqv0->cqecnt = SLI4_CQ_CNT_256; break; case 2: cqv0->cqecnt = SLI4_CQ_CNT_512; break; case 4: cqv0->cqecnt = SLI4_CQ_CNT_1024; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv0->num_pages); return -1; } cqv0->evt = TRUE; cqv0->valid = TRUE; /* TODO cq->nodelay = ???; */ /* TODO cq->clswm = ???; */ cqv0->arm = FALSE; cqv0->eq_id = eq_id; for (p = 0, addr = qmem->phys; p < cqv0->num_pages; p++, addr += page_bytes) { cqv0->page_physical_address[p].low = ocs_addr32_lo(addr); cqv0->page_physical_address[p].high = ocs_addr32_hi(addr); } break; case SLI4_IF_TYPE_LANCER_FC_ETH: { cqv2 = (sli4_req_common_create_cq_v2_t *)((uint8_t *)buf + sli_config_off); cqv2->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ; cqv2->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cqv2->hdr.version = 2; cqv2->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); cqv2->page_size = page_size; /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ cqv2->num_pages = num_pages; if (!cqv2->num_pages || (cqv2->num_pages > SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES)) { return 0; } switch (cqv2->num_pages) { case 1: cqv2->cqecnt = SLI4_CQ_CNT_256; break; case 2: cqv2->cqecnt = SLI4_CQ_CNT_512; break; case 4: cqv2->cqecnt = SLI4_CQ_CNT_1024; break; case 8: cqv2->cqecnt = SLI4_CQ_CNT_LARGE; cqv2->cqe_count = n_cqe; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv2->num_pages); return -1; } cqv2->evt = TRUE; cqv2->valid = TRUE; /* TODO cq->nodelay = ???; */ /* TODO cq->clswm = ???; */ cqv2->arm = FALSE; cqv2->eq_id = eq_id; for (p = 0, addr = qmem->phys; p < cqv2->num_pages; p++, addr += page_bytes) { cqv2->page_physical_address[p].low = ocs_addr32_lo(addr); cqv2->page_physical_address[p].high = ocs_addr32_hi(addr); } } break; - default: - ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type); - return -1; - } + } return (sli_config_off + cmd_size); } /** * @brief Write a COMMON_DESTROY_CQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param cq_id CQ ID * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_destroy_cq(sli4_t *sli4, void *buf, size_t size, uint16_t cq_id) { sli4_req_common_destroy_cq_t *cq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_destroy_cq_t), sizeof(sli4_res_hdr_t)), NULL); } cq = (sli4_req_common_destroy_cq_t *)((uint8_t *)buf + sli_config_off); cq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_CQ; cq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cq->hdr.request_length = sizeof(sli4_req_common_destroy_cq_t) - sizeof(sli4_req_hdr_t); cq->cq_id = cq_id; return(sli_config_off + sizeof(sli4_req_common_destroy_cq_t)); } /** * @brief Write a COMMON_MODIFY_EQ_DELAY command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param q Queue object array. * @param num_q Queue object array count. * @param shift Phase shift for staggering interrupts. * @param delay_mult Delay multiplier for limiting interrupt frequency. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_modify_eq_delay(sli4_t *sli4, void *buf, size_t size, sli4_queue_t *q, int num_q, uint32_t shift, uint32_t delay_mult) { sli4_req_common_modify_eq_delay_t *modify_delay = NULL; uint32_t sli_config_off = 0; int i; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_modify_eq_delay_t), sizeof(sli4_res_hdr_t)), NULL); } modify_delay = (sli4_req_common_modify_eq_delay_t *)((uint8_t *)buf + sli_config_off); modify_delay->hdr.opcode = SLI4_OPC_COMMON_MODIFY_EQ_DELAY; modify_delay->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; modify_delay->hdr.request_length = sizeof(sli4_req_common_modify_eq_delay_t) - sizeof(sli4_req_hdr_t); modify_delay->num_eq = num_q; for (i = 0; ieq_delay_record[i].eq_id = q[i].id; modify_delay->eq_delay_record[i].phase = shift; modify_delay->eq_delay_record[i].delay_multiplier = delay_mult; } return(sli_config_off + sizeof(sli4_req_common_modify_eq_delay_t)); } /** * @brief Write a COMMON_CREATE_EQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param ignored1 Ignored (used for consistency among queue creation functions). * @param ignored2 Ignored (used for consistency among queue creation functions). * * @note Other queue creation routines use the last parameter to pass in * the associated Q_ID and ULP. EQ doesn't have an associated queue or ULP, * so these parameters are ignored * * @note This creates a Version 0 message * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_create_eq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t ignored1, uint16_t ignored2) { sli4_req_common_create_eq_t *eq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_create_eq_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } eq = (sli4_req_common_create_eq_t *)((uint8_t *)buf + sli_config_off); eq->hdr.opcode = SLI4_OPC_COMMON_CREATE_EQ; eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; eq->hdr.request_length = sizeof(sli4_req_common_create_eq_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ eq->num_pages = qmem->size / SLI_PAGE_SIZE; switch (eq->num_pages) { case 1: eq->eqesz = SLI4_EQE_SIZE_4; eq->count = SLI4_EQ_CNT_1024; break; case 2: eq->eqesz = SLI4_EQE_SIZE_4; eq->count = SLI4_EQ_CNT_2048; break; case 4: eq->eqesz = SLI4_EQE_SIZE_4; eq->count = SLI4_EQ_CNT_4096; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", eq->num_pages); return -1; } eq->valid = TRUE; eq->arm = FALSE; eq->delay_multiplier = 32; for (p = 0, addr = qmem->phys; p < eq->num_pages; p++, addr += SLI_PAGE_SIZE) { eq->page_address[p].low = ocs_addr32_lo(addr); eq->page_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_common_create_eq_t)); } /** * @brief Write a COMMON_DESTROY_EQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param eq_id Queue ID to destroy. * * @note Other queue creation routines use the last parameter to pass in * the associated Q_ID. EQ doesn't have an associated queue so this * parameter is ignored. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_destroy_eq(sli4_t *sli4, void *buf, size_t size, uint16_t eq_id) { sli4_req_common_destroy_eq_t *eq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_destroy_eq_t), sizeof(sli4_res_hdr_t)), NULL); } eq = (sli4_req_common_destroy_eq_t *)((uint8_t *)buf + sli_config_off); eq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_EQ; eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; eq->hdr.request_length = sizeof(sli4_req_common_destroy_eq_t) - sizeof(sli4_req_hdr_t); eq->eq_id = eq_id; return(sli_config_off + sizeof(sli4_req_common_destroy_eq_t)); } /** * @brief Write a LOWLEVEL_SET_WATCHDOG command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param timeout watchdog timer timeout in seconds * * @return void */ void sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout) { sli4_req_lowlevel_set_watchdog_t *req = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_lowlevel_set_watchdog_t), sizeof(sli4_res_lowlevel_set_watchdog_t)), NULL); } req = (sli4_req_lowlevel_set_watchdog_t *)((uint8_t *)buf + sli_config_off); req->hdr.opcode = SLI4_OPC_LOWLEVEL_SET_WATCHDOG; req->hdr.subsystem = SLI4_SUBSYSTEM_LOWLEVEL; req->hdr.request_length = sizeof(sli4_req_lowlevel_set_watchdog_t) - sizeof(sli4_req_hdr_t); req->watchdog_timeout = timeout; return; } static int32_t sli_cmd_common_get_cntl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_hdr_t *hdr = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma); } if (dma == NULL) { return 0; } ocs_memset(dma->virt, 0, dma->size); hdr = dma->virt; hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = dma->size; return(sli_config_off + sizeof(sli4_req_hdr_t)); } /** * @brief Write a COMMON_GET_CNTL_ADDL_ATTRIBUTES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma DMA structure from which the data will be copied. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_get_cntl_addl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_hdr_t *hdr = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma); } if (dma == NULL) { return 0; } ocs_memset(dma->virt, 0, dma->size); hdr = dma->virt; hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = dma->size; return(sli_config_off + sizeof(sli4_req_hdr_t)); } /** * @brief Write a COMMON_CREATE_MQ_EXT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ignored This parameter carries the ULP which is only used for WQ and RQs * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_create_mq_ext(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored) { sli4_req_common_create_mq_ext_t *mq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_create_mq_ext_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } mq = (sli4_req_common_create_mq_ext_t *)((uint8_t *)buf + sli_config_off); mq->hdr.opcode = SLI4_OPC_COMMON_CREATE_MQ_EXT; mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; mq->hdr.request_length = sizeof(sli4_req_common_create_mq_ext_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ mq->num_pages = qmem->size / SLI_PAGE_SIZE; switch (mq->num_pages) { case 1: mq->ring_size = SLI4_MQE_SIZE_16; break; case 2: mq->ring_size = SLI4_MQE_SIZE_32; break; case 4: mq->ring_size = SLI4_MQE_SIZE_64; break; case 8: mq->ring_size = SLI4_MQE_SIZE_128; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", mq->num_pages); return -1; } /* TODO break this down by sli4->config.topology */ mq->async_event_bitmap = SLI4_ASYNC_EVT_FC_FCOE; if (sli4->config.mq_create_version) { mq->cq_id_v1 = cq_id; mq->hdr.version = 1; } else { mq->cq_id_v0 = cq_id; } mq->val = TRUE; for (p = 0, addr = qmem->phys; p < mq->num_pages; p++, addr += SLI_PAGE_SIZE) { mq->page_physical_address[p].low = ocs_addr32_lo(addr); mq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_common_create_mq_ext_t)); } /** * @brief Write a COMMON_DESTROY_MQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param mq_id MQ ID * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_destroy_mq(sli4_t *sli4, void *buf, size_t size, uint16_t mq_id) { sli4_req_common_destroy_mq_t *mq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_destroy_mq_t), sizeof(sli4_res_hdr_t)), NULL); } mq = (sli4_req_common_destroy_mq_t *)((uint8_t *)buf + sli_config_off); mq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_MQ; mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; mq->hdr.request_length = sizeof(sli4_req_common_destroy_mq_t) - sizeof(sli4_req_hdr_t); mq->mq_id = mq_id; return(sli_config_off + sizeof(sli4_req_common_destroy_mq_t)); } /** * @ingroup sli * @brief Write a COMMON_NOP command * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param context NOP context value (passed to response, except on FC/FCoE). * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_nop(sli4_t *sli4, void *buf, size_t size, uint64_t context) { sli4_req_common_nop_t *nop = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_nop_t), sizeof(sli4_res_common_nop_t)), NULL); } nop = (sli4_req_common_nop_t *)((uint8_t *)buf + sli_config_off); nop->hdr.opcode = SLI4_OPC_COMMON_NOP; nop->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; nop->hdr.request_length = 8; ocs_memcpy(&nop->context, &context, sizeof(context)); return(sli_config_off + sizeof(sli4_req_common_nop_t)); } /** * @ingroup sli * @brief Write a COMMON_GET_RESOURCE_EXTENT_INFO command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param rtype Resource type (for example, XRI, VFI, VPI, and RPI). * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_resource_extent_info(sli4_t *sli4, void *buf, size_t size, uint16_t rtype) { sli4_req_common_get_resource_extent_info_t *extent = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_common_get_resource_extent_info_t), NULL); } extent = (sli4_req_common_get_resource_extent_info_t *)((uint8_t *)buf + sli_config_off); extent->hdr.opcode = SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO; extent->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; extent->hdr.request_length = 4; extent->resource_type = rtype; return(sli_config_off + sizeof(sli4_req_common_get_resource_extent_info_t)); } /** * @ingroup sli * @brief Write a COMMON_GET_SLI4_PARAMETERS command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_sli4_parameters(sli4_t *sli4, void *buf, size_t size) { sli4_req_hdr_t *hdr = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_res_common_get_sli4_parameters_t), NULL); } hdr = (sli4_req_hdr_t *)((uint8_t *)buf + sli_config_off); hdr->opcode = SLI4_OPC_COMMON_GET_SLI4_PARAMETERS; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = 0x50; return(sli_config_off + sizeof(sli4_req_hdr_t)); } /** * @brief Write a COMMON_QUERY_FW_CONFIG command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to destination buffer. * @param size Buffer size in bytes. * * @return Returns the number of bytes written */ static int32_t sli_cmd_common_query_fw_config(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_query_fw_config_t *fw_config; uint32_t sli_config_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_query_fw_config_t), sizeof(sli4_res_common_query_fw_config_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } fw_config = (sli4_req_common_query_fw_config_t*)((uint8_t*)buf + sli_config_off); fw_config->hdr.opcode = SLI4_OPC_COMMON_QUERY_FW_CONFIG; fw_config->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; fw_config->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); return sli_config_off + sizeof(sli4_req_common_query_fw_config_t); } /** * @brief Write a COMMON_GET_PORT_NAME command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to destination buffer. * @param size Buffer size in bytes. * * @note Function supports both version 0 and 1 forms of this command via * the IF_TYPE. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_get_port_name(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_get_port_name_t *port_name; uint32_t sli_config_off = 0; uint32_t payload_size; uint8_t version = 0; uint8_t pt = 0; /* Select command version according to IF_TYPE */ switch (sli4->if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: case SLI4_IF_TYPE_BE3_SKH_VF: version = 0; break; case SLI4_IF_TYPE_LANCER_FC_ETH: case SLI4_IF_TYPE_LANCER_RDMA: version = 1; break; default: ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", sli4->if_type); return 0; } /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_get_port_name_t), sizeof(sli4_res_common_get_port_name_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); pt = 1; } port_name = (sli4_req_common_get_port_name_t *)((uint8_t *)buf + sli_config_off); port_name->hdr.opcode = SLI4_OPC_COMMON_GET_PORT_NAME; port_name->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; port_name->hdr.request_length = sizeof(sli4_req_hdr_t) + (version * sizeof(uint32_t)); port_name->hdr.version = version; /* Set the port type value (ethernet=0, FC=1) for V1 commands */ if (version == 1) { port_name->pt = pt; } return sli_config_off + port_name->hdr.request_length; } /** * @ingroup sli * @brief Write a COMMON_WRITE_OBJECT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param noc True if the object should be written but not committed to flash. * @param eof True if this is the last write for this object. * @param desired_write_length Number of bytes of data to write to the object. * @param offset Offset, in bytes, from the start of the object. * @param object_name Name of the object to write. * @param dma DMA structure from which the data will be copied. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_write_object(sli4_t *sli4, void *buf, size_t size, uint16_t noc, uint16_t eof, uint32_t desired_write_length, uint32_t offset, char *object_name, ocs_dma_t *dma) { sli4_req_common_write_object_t *wr_obj = NULL; uint32_t sli_config_off = 0; sli4_bde_t *host_buffer; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_write_object_t) + sizeof (sli4_bde_t), NULL); } wr_obj = (sli4_req_common_write_object_t *)((uint8_t *)buf + sli_config_off); wr_obj->hdr.opcode = SLI4_OPC_COMMON_WRITE_OBJECT; wr_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; wr_obj->hdr.request_length = sizeof(*wr_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t); wr_obj->hdr.timeout = 0; wr_obj->hdr.version = 0; wr_obj->noc = noc; wr_obj->eof = eof; wr_obj->desired_write_length = desired_write_length; wr_obj->write_offset = offset; ocs_strncpy(wr_obj->object_name, object_name, sizeof(wr_obj->object_name)); wr_obj->host_buffer_descriptor_count = 1; host_buffer = (sli4_bde_t *)wr_obj->host_buffer_descriptor; /* Setup to transfer xfer_size bytes to device */ host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64; host_buffer->buffer_length = desired_write_length; host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys); host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys); return(sli_config_off + sizeof(sli4_req_common_write_object_t) + sizeof (sli4_bde_t)); } /** * @ingroup sli * @brief Write a COMMON_DELETE_OBJECT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param object_name Name of the object to write. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_delete_object(sli4_t *sli4, void *buf, size_t size, char *object_name) { sli4_req_common_delete_object_t *del_obj = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_delete_object_t), NULL); } del_obj = (sli4_req_common_delete_object_t *)((uint8_t *)buf + sli_config_off); del_obj->hdr.opcode = SLI4_OPC_COMMON_DELETE_OBJECT; del_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; del_obj->hdr.request_length = sizeof(*del_obj); del_obj->hdr.timeout = 0; del_obj->hdr.version = 0; ocs_strncpy(del_obj->object_name, object_name, sizeof(del_obj->object_name)); return(sli_config_off + sizeof(sli4_req_common_delete_object_t)); } /** * @ingroup sli * @brief Write a COMMON_READ_OBJECT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param desired_read_length Number of bytes of data to read from the object. * @param offset Offset, in bytes, from the start of the object. * @param object_name Name of the object to read. * @param dma DMA structure from which the data will be copied. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_read_object(sli4_t *sli4, void *buf, size_t size, uint32_t desired_read_length, uint32_t offset, char *object_name, ocs_dma_t *dma) { sli4_req_common_read_object_t *rd_obj = NULL; uint32_t sli_config_off = 0; sli4_bde_t *host_buffer; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_read_object_t) + sizeof (sli4_bde_t), NULL); } rd_obj = (sli4_req_common_read_object_t *)((uint8_t *)buf + sli_config_off); rd_obj->hdr.opcode = SLI4_OPC_COMMON_READ_OBJECT; rd_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; rd_obj->hdr.request_length = sizeof(*rd_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t); rd_obj->hdr.timeout = 0; rd_obj->hdr.version = 0; rd_obj->desired_read_length = desired_read_length; rd_obj->read_offset = offset; ocs_strncpy(rd_obj->object_name, object_name, sizeof(rd_obj->object_name)); rd_obj->host_buffer_descriptor_count = 1; host_buffer = (sli4_bde_t *)rd_obj->host_buffer_descriptor; /* Setup to transfer xfer_size bytes to device */ host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64; host_buffer->buffer_length = desired_read_length; if (dma != NULL) { host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys); host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } else { host_buffer->u.data.buffer_address_low = 0; host_buffer->u.data.buffer_address_high = 0; } return(sli_config_off + sizeof(sli4_req_common_read_object_t) + sizeof (sli4_bde_t)); } /** * @ingroup sli * @brief Write a DMTF_EXEC_CLP_CMD command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param cmd DMA structure that describes the buffer for the command. * @param resp DMA structure that describes the buffer for the response. * * @return Returns the number of bytes written. */ int32_t sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *cmd, ocs_dma_t *resp) { sli4_req_dmtf_exec_clp_cmd_t *clp_cmd = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_dmtf_exec_clp_cmd_t), NULL); } clp_cmd = (sli4_req_dmtf_exec_clp_cmd_t*)((uint8_t *)buf + sli_config_off); clp_cmd->hdr.opcode = SLI4_OPC_DMTF_EXEC_CLP_CMD; clp_cmd->hdr.subsystem = SLI4_SUBSYSTEM_DMTF; clp_cmd->hdr.request_length = sizeof(sli4_req_dmtf_exec_clp_cmd_t) - sizeof(sli4_req_hdr_t); clp_cmd->hdr.timeout = 0; clp_cmd->hdr.version = 0; clp_cmd->cmd_buf_length = cmd->size; clp_cmd->cmd_buf_addr_low = ocs_addr32_lo(cmd->phys); clp_cmd->cmd_buf_addr_high = ocs_addr32_hi(cmd->phys); clp_cmd->resp_buf_length = resp->size; clp_cmd->resp_buf_addr_low = ocs_addr32_lo(resp->phys); clp_cmd->resp_buf_addr_high = ocs_addr32_hi(resp->phys); return(sli_config_off + sizeof(sli4_req_dmtf_exec_clp_cmd_t)); } /** * @ingroup sli * @brief Write a COMMON_SET_DUMP_LOCATION command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param query Zero to set dump location, non-zero to query dump size * @param is_buffer_list Set to one if the buffer is a set of buffer descriptors or * set to 0 if the buffer is a contiguous dump area. * @param buffer DMA structure to which the dump will be copied. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size, uint8_t query, uint8_t is_buffer_list, ocs_dma_t *buffer, uint8_t fdb) { sli4_req_common_set_dump_location_t *set_dump_loc = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_set_dump_location_t), NULL); } set_dump_loc = (sli4_req_common_set_dump_location_t *)((uint8_t *)buf + sli_config_off); set_dump_loc->hdr.opcode = SLI4_OPC_COMMON_SET_DUMP_LOCATION; set_dump_loc->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; set_dump_loc->hdr.request_length = sizeof(sli4_req_common_set_dump_location_t) - sizeof(sli4_req_hdr_t); set_dump_loc->hdr.timeout = 0; set_dump_loc->hdr.version = 0; set_dump_loc->blp = is_buffer_list; set_dump_loc->qry = query; set_dump_loc->fdb = fdb; if (buffer) { set_dump_loc->buf_addr_low = ocs_addr32_lo(buffer->phys); set_dump_loc->buf_addr_high = ocs_addr32_hi(buffer->phys); set_dump_loc->buffer_length = buffer->len; } else { set_dump_loc->buf_addr_low = 0; set_dump_loc->buf_addr_high = 0; set_dump_loc->buffer_length = 0; } return(sli_config_off + sizeof(sli4_req_common_set_dump_location_t)); } /** * @ingroup sli * @brief Write a COMMON_SET_FEATURES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param feature Feature to set. * @param param_len Length of the parameter (must be a multiple of 4 bytes). * @param parameter Pointer to the parameter value. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_features(sli4_t *sli4, void *buf, size_t size, uint32_t feature, uint32_t param_len, void* parameter) { sli4_req_common_set_features_t *cmd = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_set_features_t), NULL); } cmd = (sli4_req_common_set_features_t *)((uint8_t *)buf + sli_config_off); cmd->hdr.opcode = SLI4_OPC_COMMON_SET_FEATURES; cmd->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cmd->hdr.request_length = sizeof(sli4_req_common_set_features_t) - sizeof(sli4_req_hdr_t); cmd->hdr.timeout = 0; cmd->hdr.version = 0; cmd->feature = feature; cmd->param_len = param_len; ocs_memcpy(cmd->params, parameter, param_len); return(sli_config_off + sizeof(sli4_req_common_set_features_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_GET_PROFILE_CONFIG command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param dma DMA capable memory used to retrieve profile. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_common_get_profile_config_t *req = NULL; uint32_t sli_config_off = 0; uint32_t payload_size; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_get_profile_config_t), dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_get_profile_config_t *)((uint8_t *)buf + sli_config_off); payload_size = sizeof(sli4_req_common_get_profile_config_t); } req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_CONFIG; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 1; return(sli_config_off + sizeof(sli4_req_common_get_profile_config_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_SET_PROFILE_CONFIG command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma DMA capable memory containing profile. * @param profile_id Profile ID to configure. * @param descriptor_count Number of descriptors in DMA buffer. * @param isap Implicit Set Active Profile value to use. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint8_t profile_id, uint32_t descriptor_count, uint8_t isap) { sli4_req_common_set_profile_config_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_set_profile_config_t), dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_set_profile_config_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_set_profile_config_t); } req->hdr.opcode = SLI4_OPC_COMMON_SET_PROFILE_CONFIG; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 1; req->profile_id = profile_id; req->desc_count = descriptor_count; req->isap = isap; return(cmd_off + sizeof(sli4_req_common_set_profile_config_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_GET_PROFILE_LIST command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param start_profile_index First profile index to return. * @param dma Buffer into which the list will be written. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size, uint32_t start_profile_index, ocs_dma_t *dma) { sli4_req_common_get_profile_list_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_get_profile_list_t), dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_get_profile_list_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_get_profile_list_t); } req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_LIST; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; req->start_profile_index = start_profile_index; return(cmd_off + sizeof(sli4_req_common_get_profile_list_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_GET_ACTIVE_PROFILE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_get_active_profile_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_get_active_profile_t), sizeof(sli4_res_common_get_active_profile_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } req = (sli4_req_common_get_active_profile_t *) ((uint8_t*)buf + cmd_off); req->hdr.opcode = SLI4_OPC_COMMON_GET_ACTIVE_PROFILE; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; return(cmd_off + sizeof(sli4_req_common_get_active_profile_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_SET_ACTIVE_PROFILE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param fd If non-zero, set profile to factory default. * @param active_profile_id ID of new active profile. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size, uint32_t fd, uint32_t active_profile_id) { sli4_req_common_set_active_profile_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_set_active_profile_t), sizeof(sli4_res_common_set_active_profile_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } req = (sli4_req_common_set_active_profile_t *) ((uint8_t*)buf + cmd_off); req->hdr.opcode = SLI4_OPC_COMMON_SET_ACTIVE_PROFILE; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; req->fd = fd; req->active_profile_id = active_profile_id; return(cmd_off + sizeof(sli4_req_common_set_active_profile_t)); } /** * @ingroup sli * @brief Write a COMMON_GET_RECONFIG_LINK_INFO command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param dma Buffer to store the supported link configuration modes from the physical device. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_common_get_reconfig_link_info_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_get_reconfig_link_info_t), sizeof(sli4_res_common_get_reconfig_link_info_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_get_reconfig_link_info_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_get_reconfig_link_info_t); } req->hdr.opcode = SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; return(cmd_off + sizeof(sli4_req_common_get_reconfig_link_info_t)); } /** * @ingroup sli * @brief Write a COMMON_SET_RECONFIG_LINK_ID command. * * @param sli4 SLI context. * @param buf destination buffer for the command. * @param size buffer size in bytes. * @param fd If non-zero, set link config to factory default. * @param active_link_config_id ID of new active profile. * @param dma Buffer to assign the link configuration mode that is to become active from the physical device. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint32_t fd, uint32_t active_link_config_id) { sli4_req_common_set_reconfig_link_id_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_set_reconfig_link_id_t), sizeof(sli4_res_common_set_reconfig_link_id_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_set_reconfig_link_id_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_set_reconfig_link_id_t); } req->hdr.opcode = SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; req->fd = fd; req->next_link_config_id = active_link_config_id; return(cmd_off + sizeof(sli4_req_common_set_reconfig_link_id_t)); } /** * @ingroup sli * @brief Check the mailbox/queue completion entry. * * @param buf Pointer to the MCQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_cqe_mq(void *buf) { sli4_mcqe_t *mcqe = buf; /* * Firmware can split mbx completions into two MCQEs: first with only * the "consumed" bit set and a second with the "complete" bit set. * Thus, ignore MCQE unless "complete" is set. */ if (!mcqe->cmp) { return -2; } if (mcqe->completion_status) { ocs_log_debug(NULL, "bad status (cmpl=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n", mcqe->completion_status, mcqe->extended_status, mcqe->con, mcqe->cmp, mcqe->ae, mcqe->val); } return mcqe->completion_status; } /** * @ingroup sli * @brief Check the asynchronous event completion entry. * * @param sli4 SLI context. * @param buf Pointer to the ACQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_cqe_async(sli4_t *sli4, void *buf) { sli4_acqe_t *acqe = buf; int32_t rc = -1; if (!sli4 || !buf) { ocs_log_err(NULL, "bad parameter sli4=%p buf=%p\n", sli4, buf); return -1; } switch (acqe->event_code) { case SLI4_ACQE_EVENT_CODE_LINK_STATE: rc = sli_fc_process_link_state(sli4, buf); break; case SLI4_ACQE_EVENT_CODE_FCOE_FIP: rc = sli_fc_process_fcoe(sli4, buf); break; case SLI4_ACQE_EVENT_CODE_GRP_5: /*TODO*/ocs_log_debug(sli4->os, "ACQE GRP5\n"); break; case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT: ocs_log_debug(sli4->os,"ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n", acqe->event_type, acqe->event_data[0], acqe->event_data[1]); #if defined(OCS_INCLUDE_DEBUG) ocs_dump32(OCS_DEBUG_ALWAYS, sli4->os, "acq", acqe, sizeof(*acqe)); #endif break; case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT: rc = sli_fc_process_link_attention(sli4, buf); break; default: /*TODO*/ocs_log_test(sli4->os, "ACQE unknown=%#x\n", acqe->event_code); } return rc; } /** * @brief Check the SLI_CONFIG response. * * @par Description * Function checks the SLI_CONFIG response and the payload status. * * @param buf Pointer to SLI_CONFIG response. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_res_sli_config(void *buf) { sli4_cmd_sli_config_t *sli_config = buf; if (!buf || (SLI4_MBOX_COMMAND_SLI_CONFIG != sli_config->hdr.command)) { ocs_log_err(NULL, "bad parameter buf=%p cmd=%#x\n", buf, buf ? sli_config->hdr.command : -1); return -1; } if (sli_config->hdr.status) { return sli_config->hdr.status; } if (sli_config->emb) { return sli_config->payload.embed[4]; } else { ocs_log_test(NULL, "external buffers not supported\n"); return -1; } } /** * @brief Issue a COMMON_FUNCTION_RESET command. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_common_function_reset(sli4_t *sli4) { if (sli_cmd_common_function_reset(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COM_FUNC_RESET)\n"); return -1; } if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status COM_FUNC_RESET\n"); return -1; } } else { ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); return -1; } return 0; } /** * @brief check to see if the FW is ready. * * @par Description * Based on SLI-4 Architecture Specification, Revision 4.x0-13 (2012).. * * @param sli4 SLI context. * @param timeout_ms Time, in milliseconds, to wait for the port to be ready * before failing. * * @return Returns TRUE for ready, or FALSE otherwise. */ static int32_t sli_wait_for_fw_ready(sli4_t *sli4, uint32_t timeout_ms) { uint32_t iter = timeout_ms / (SLI4_INIT_PORT_DELAY_US / 1000); uint32_t ready = FALSE; do { iter--; ocs_udelay(SLI4_INIT_PORT_DELAY_US); if (sli_fw_ready(sli4) == 1) { ready = TRUE; } } while (!ready && (iter > 0)); return ready; } /** * @brief Initialize the firmware. * * @par Description * Based on SLI-4 Architecture Specification, Revision 4.x0-13 (2012).. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_fw_init(sli4_t *sli4) { uint32_t ready; uint32_t endian; /* * Is firmware ready for operation? */ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!ready) { ocs_log_crit(sli4->os, "FW status is NOT ready\n"); return -1; } /* * Reset port to a known state */ switch (sli4->if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: case SLI4_IF_TYPE_BE3_SKH_VF: /* No SLIPORT_CONTROL register so use command sequence instead */ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n"); return -1; } if (sli_cmd_fw_initialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_INIT)\n"); return -1; } } else { ocs_log_crit(sli4->os, "bad FW_INIT write\n"); return -1; } if (sli_common_function_reset(sli4)) { ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); return -1; } break; case SLI4_IF_TYPE_LANCER_FC_ETH: #if BYTE_ORDER == LITTLE_ENDIAN endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN; #else endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN; #endif if (sli_sliport_control(sli4, endian)) return -1; break; default: ocs_log_test(sli4->os, "if_type %d not supported\n", sli4->if_type); return -1; } return 0; } /** * @brief Terminate the firmware. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_fw_term(sli4_t *sli4) { uint32_t endian; if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF || sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF) { /* No SLIPORT_CONTROL register so use command sequence instead */ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n"); return -1; } if (sli_common_function_reset(sli4)) { ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); return -1; } if (sli_cmd_fw_deinitialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_DEINIT)\n"); return -1; } } else { ocs_log_test(sli4->os, "bad FW_DEINIT write\n"); return -1; } } else { #if BYTE_ORDER == LITTLE_ENDIAN endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN; #else endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN; #endif /* type 2 etc. use SLIPORT_CONTROL to initialize port */ sli_sliport_control(sli4, endian); } return 0; } /** * @brief Write the doorbell register associated with the queue object. * * @param sli4 SLI context. * @param q Queue object. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_queue_doorbell(sli4_t *sli4, sli4_queue_t *q) { uint32_t val = 0; switch (q->type) { case SLI_QTYPE_EQ: val = sli_eq_doorbell(q->n_posted, q->id, FALSE); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; case SLI_QTYPE_CQ: val = sli_cq_doorbell(q->n_posted, q->id, FALSE); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; case SLI_QTYPE_MQ: val = SLI4_MQ_DOORBELL(q->n_posted, q->id); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; case SLI_QTYPE_RQ: { uint32_t n_posted = q->n_posted; /* * FC/FCoE has different rules for Receive Queues. The host * should only update the doorbell of the RQ-pair containing * the headers since the header / payload RQs are treated * as a matched unit. */ if (SLI4_PORT_TYPE_FC == sli4->port_type) { /* * In RQ-pair, an RQ either contains the FC header * (i.e. is_hdr == TRUE) or the payload. * * Don't ring doorbell for payload RQ */ if (!q->u.flag.is_hdr) { break; } /* * Some RQ cannot be incremented one entry at a time. Instead, * the driver collects a number of entries and updates the * RQ in batches. */ if (q->u.flag.rq_batch) { if (((q->index + q->n_posted) % SLI4_QUEUE_RQ_BATCH)) { break; } n_posted = SLI4_QUEUE_RQ_BATCH; } } val = SLI4_RQ_DOORBELL(n_posted, q->id); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; } case SLI_QTYPE_WQ: val = SLI4_WQ_DOORBELL(q->n_posted, q->index, q->id); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; default: ocs_log_test(sli4->os, "bad queue type %d\n", q->type); return -1; } return 0; } static int32_t sli_request_features(sli4_t *sli4, sli4_features_t *features, uint8_t query) { if (sli_cmd_request_features(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, *features, query)) { sli4_cmd_request_features_t *req_features = sli4->bmbx.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (REQUEST_FEATURES)\n"); return -1; } if (req_features->hdr.status) { ocs_log_err(sli4->os, "REQUEST_FEATURES bad status %#x\n", req_features->hdr.status); return -1; } features->dword = req_features->response.dword; } else { ocs_log_err(sli4->os, "bad REQUEST_FEATURES write\n"); return -1; } return 0; } /** * @brief Calculate max queue entries. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ void sli_calc_max_qentries(sli4_t *sli4) { sli4_qtype_e q; uint32_t alloc_size, qentries, qentry_size; for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { sli4->config.max_qentries[q] = sli_convert_mask_to_count(sli4->config.count_method[q], sli4->config.count_mask[q]); } /* single, continguous DMA allocations will be called for each queue * of size (max_qentries * queue entry size); since these can be large, * check against the OS max DMA allocation size */ for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { qentries = sli4->config.max_qentries[q]; qentry_size = sli_get_queue_entry_size(sli4, q); alloc_size = qentries * qentry_size; if (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) { while (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) { /* cut the qentries in hwf until alloc_size <= max DMA alloc size */ qentries >>= 1; alloc_size = qentries * qentry_size; } ocs_log_debug(sli4->os, "[%s]: max_qentries from %d to %d (max dma %d)\n", SLI_QNAME[q], sli4->config.max_qentries[q], qentries, ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)); sli4->config.max_qentries[q] = qentries; } } } /** * @brief Issue a FW_CONFIG mailbox command and store the results. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_query_fw_config(sli4_t *sli4) { /* * Read the device configuration * * Note: Only ulp0 fields contain values */ if (sli_cmd_common_query_fw_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_common_query_fw_config_t *fw_config = (sli4_res_common_query_fw_config_t *) (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (QUERY_FW_CONFIG)\n"); return -1; } if (fw_config->hdr.status) { ocs_log_err(sli4->os, "COMMON_QUERY_FW_CONFIG bad status %#x\n", fw_config->hdr.status); return -1; } sli4->physical_port = fw_config->physical_port; sli4->config.dual_ulp_capable = ((fw_config->function_mode & SLI4_FUNCTION_MODE_DUA_MODE) == 0 ? 0 : 1); sli4->config.is_ulp_fc[0] = ((fw_config->ulp0_mode & (SLI4_ULP_MODE_FCOE_INI | SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1); sli4->config.is_ulp_fc[1] = ((fw_config->ulp1_mode & (SLI4_ULP_MODE_FCOE_INI | SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1); if (sli4->config.dual_ulp_capable) { /* * Lancer will not support this, so we use the values * from the READ_CONFIG. */ if (sli4->config.is_ulp_fc[0] && sli4->config.is_ulp_fc[1]) { sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total + fw_config->ulp1_toe_wq_total; sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total + fw_config->ulp1_toe_defrq_total; } else if (sli4->config.is_ulp_fc[0]) { sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total; sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total; } else { sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp1_toe_wq_total; sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp1_toe_defrq_total; } } } else { ocs_log_err(sli4->os, "bad QUERY_FW_CONFIG write\n"); return -1; } return 0; } static int32_t sli_get_config(sli4_t *sli4) { ocs_dma_t get_cntl_addl_data; /* * Read the device configuration */ if (sli_cmd_read_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_read_config_t *read_config = sli4->bmbx.virt; uint32_t i; uint32_t total; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_CONFIG)\n"); return -1; } if (read_config->hdr.status) { ocs_log_err(sli4->os, "READ_CONFIG bad status %#x\n", read_config->hdr.status); return -1; } sli4->config.has_extents = read_config->ext; if (FALSE == sli4->config.has_extents) { uint32_t i = 0; uint32_t *base = sli4->config.extent[0].base; if (!base) { if (NULL == (base = ocs_malloc(sli4->os, SLI_RSRC_MAX * sizeof(uint32_t), OCS_M_ZERO | OCS_M_NOWAIT))) { ocs_log_err(sli4->os, "memory allocation failed for sli4_resource_t\n"); return -1; } } for (i = 0; i < SLI_RSRC_MAX; i++) { sli4->config.extent[i].number = 1; sli4->config.extent[i].n_alloc = 0; sli4->config.extent[i].base = &base[i]; } sli4->config.extent[SLI_RSRC_FCOE_VFI].base[0] = read_config->vfi_base; sli4->config.extent[SLI_RSRC_FCOE_VFI].size = read_config->vfi_count; sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] = read_config->vpi_base; sli4->config.extent[SLI_RSRC_FCOE_VPI].size = read_config->vpi_count; sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0] = read_config->rpi_base; sli4->config.extent[SLI_RSRC_FCOE_RPI].size = read_config->rpi_count; sli4->config.extent[SLI_RSRC_FCOE_XRI].base[0] = read_config->xri_base; sli4->config.extent[SLI_RSRC_FCOE_XRI].size = read_config->xri_count; sli4->config.extent[SLI_RSRC_FCOE_FCFI].base[0] = 0; sli4->config.extent[SLI_RSRC_FCOE_FCFI].size = read_config->fcfi_count; } else { /* TODO extents*/ ; } for (i = 0; i < SLI_RSRC_MAX; i++) { total = sli4->config.extent[i].number * sli4->config.extent[i].size; sli4->config.extent[i].use_map = ocs_bitmap_alloc(total); if (NULL == sli4->config.extent[i].use_map) { ocs_log_err(sli4->os, "bitmap memory allocation failed " "resource %d\n", i); return -1; } sli4->config.extent[i].map_size = total; } sli4->config.topology = read_config->topology; switch (sli4->config.topology) { case SLI4_READ_CFG_TOPO_FCOE: ocs_log_debug(sli4->os, "FCoE\n"); break; case SLI4_READ_CFG_TOPO_FC: ocs_log_debug(sli4->os, "FC (unknown)\n"); break; case SLI4_READ_CFG_TOPO_FC_DA: ocs_log_debug(sli4->os, "FC (direct attach)\n"); break; case SLI4_READ_CFG_TOPO_FC_AL: ocs_log_debug(sli4->os, "FC (arbitrated loop)\n"); break; default: ocs_log_test(sli4->os, "bad topology %#x\n", sli4->config.topology); } sli4->config.e_d_tov = read_config->e_d_tov; sli4->config.r_a_tov = read_config->r_a_tov; sli4->config.link_module_type = read_config->lmt; sli4->config.max_qcount[SLI_QTYPE_EQ] = read_config->eq_count; sli4->config.max_qcount[SLI_QTYPE_CQ] = read_config->cq_count; sli4->config.max_qcount[SLI_QTYPE_WQ] = read_config->wq_count; sli4->config.max_qcount[SLI_QTYPE_RQ] = read_config->rq_count; /* * READ_CONFIG doesn't give the max number of MQ. Applications * will typically want 1, but we may need another at some future * date. Dummy up a "max" MQ count here. */ sli4->config.max_qcount[SLI_QTYPE_MQ] = SLI_USER_MQ_COUNT; } else { ocs_log_err(sli4->os, "bad READ_CONFIG write\n"); return -1; } if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_common_get_sli4_parameters_t *parms = (sli4_res_common_get_sli4_parameters_t *) (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_SLI4_PARAMETERS)\n"); return -1; } else if (parms->hdr.status) { ocs_log_err(sli4->os, "COMMON_GET_SLI4_PARAMETERS bad status %#x att'l %#x\n", parms->hdr.status, parms->hdr.additional_status); return -1; } sli4->config.auto_reg = parms->areg; sli4->config.auto_xfer_rdy = parms->agxf; sli4->config.hdr_template_req = parms->hdrr; sli4->config.t10_dif_inline_capable = parms->timm; sli4->config.t10_dif_separate_capable = parms->tsmm; sli4->config.mq_create_version = parms->mqv; sli4->config.cq_create_version = parms->cqv; sli4->config.rq_min_buf_size = parms->min_rq_buffer_size; sli4->config.rq_max_buf_size = parms->max_rq_buffer_size; sli4->config.qpage_count[SLI_QTYPE_EQ] = parms->eq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_CQ] = parms->cq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_MQ] = parms->mq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_WQ] = parms->wq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_RQ] = parms->rq_page_cnt; /* save count methods and masks for each queue type */ sli4->config.count_mask[SLI_QTYPE_EQ] = parms->eqe_count_mask; sli4->config.count_method[SLI_QTYPE_EQ] = parms->eqe_count_method; sli4->config.count_mask[SLI_QTYPE_CQ] = parms->cqe_count_mask; sli4->config.count_method[SLI_QTYPE_CQ] = parms->cqe_count_method; sli4->config.count_mask[SLI_QTYPE_MQ] = parms->mqe_count_mask; sli4->config.count_method[SLI_QTYPE_MQ] = parms->mqe_count_method; sli4->config.count_mask[SLI_QTYPE_WQ] = parms->wqe_count_mask; sli4->config.count_method[SLI_QTYPE_WQ] = parms->wqe_count_method; sli4->config.count_mask[SLI_QTYPE_RQ] = parms->rqe_count_mask; sli4->config.count_method[SLI_QTYPE_RQ] = parms->rqe_count_method; /* now calculate max queue entries */ sli_calc_max_qentries(sli4); sli4->config.max_sgl_pages = parms->sgl_page_cnt; /* max # of pages */ sli4->config.sgl_page_sizes = parms->sgl_page_sizes; /* bit map of available sizes */ /* ignore HLM here. Use value from REQUEST_FEATURES */ sli4->config.sge_supported_length = parms->sge_supported_length; if (sli4->config.sge_supported_length > OCS_MAX_SGE_SIZE) sli4->config.sge_supported_length = OCS_MAX_SGE_SIZE; sli4->config.sgl_pre_registration_required = parms->sglr; /* default to using pre-registered SGL's */ sli4->config.sgl_pre_registered = TRUE; sli4->config.perf_hint = parms->phon; sli4->config.perf_wq_id_association = parms->phwq; sli4->config.rq_batch = parms->rq_db_window; /* save the fields for skyhawk SGL chaining */ sli4->config.sgl_chaining_params.chaining_capable = (parms->sglc == 1); sli4->config.sgl_chaining_params.frag_num_field_offset = parms->frag_num_field_offset; sli4->config.sgl_chaining_params.frag_num_field_mask = (1ull << parms->frag_num_field_size) - 1; sli4->config.sgl_chaining_params.sgl_index_field_offset = parms->sgl_index_field_offset; sli4->config.sgl_chaining_params.sgl_index_field_mask = (1ull << parms->sgl_index_field_size) - 1; sli4->config.sgl_chaining_params.chain_sge_initial_value_lo = parms->chain_sge_initial_value_lo; sli4->config.sgl_chaining_params.chain_sge_initial_value_hi = parms->chain_sge_initial_value_hi; /* Use the highest available WQE size. */ if (parms->wqe_sizes & SLI4_128BYTE_WQE_SUPPORT) { sli4->config.wqe_size = SLI4_WQE_EXT_BYTES; } else { sli4->config.wqe_size = SLI4_WQE_BYTES; } } if (sli_query_fw_config(sli4)) { ocs_log_err(sli4->os, "Error sending QUERY_FW_CONFIG\n"); return -1; } sli4->config.port_number = 0; /* * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily * uses VPD DMA buffer as the response won't fit in the embedded * buffer. */ if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) { sli4_res_common_get_cntl_attributes_t *attr = sli4->vpd.data.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ATTRIBUTES)\n"); return -1; } else if (attr->hdr.status) { ocs_log_err(sli4->os, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x att'l %#x\n", attr->hdr.status, attr->hdr.additional_status); return -1; } sli4->config.port_number = attr->port_number; ocs_memcpy(sli4->config.bios_version_string, attr->bios_version_string, sizeof(sli4->config.bios_version_string)); } else { ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ATTRIBUTES write\n"); return -1; } if (ocs_dma_alloc(sli4->os, &get_cntl_addl_data, sizeof(sli4_res_common_get_cntl_addl_attributes_t), OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(sli4->os, "Failed to allocate memory for GET_CNTL_ADDL_ATTR data\n"); } else { if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &get_cntl_addl_data)) { sli4_res_common_get_cntl_addl_attributes_t *attr = get_cntl_addl_data.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ADDL_ATTRIBUTES)\n"); ocs_dma_free(sli4->os, &get_cntl_addl_data); return -1; } if (attr->hdr.status) { ocs_log_err(sli4->os, "COMMON_GET_CNTL_ADDL_ATTRIBUTES bad status %#x\n", attr->hdr.status); ocs_dma_free(sli4->os, &get_cntl_addl_data); return -1; } ocs_memcpy(sli4->config.ipl_name, attr->ipl_file_name, sizeof(sli4->config.ipl_name)); ocs_log_debug(sli4->os, "IPL:%s \n", (char*)sli4->config.ipl_name); } else { ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ADDL_ATTRIBUTES write\n"); ocs_dma_free(sli4->os, &get_cntl_addl_data); return -1; } ocs_dma_free(sli4->os, &get_cntl_addl_data); } if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_common_get_port_name_t *port_name = (sli4_res_common_get_port_name_t *)(((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_PORT_NAME)\n"); return -1; } sli4->config.port_name[0] = port_name->port_name[sli4->config.port_number]; } sli4->config.port_name[1] = '\0'; if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) { sli4_cmd_read_rev_t *read_rev = sli4->bmbx.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_REV)\n"); return -1; } if (read_rev->hdr.status) { ocs_log_err(sli4->os, "READ_REV bad status %#x\n", read_rev->hdr.status); return -1; } sli4->config.fw_rev[0] = read_rev->first_fw_id; ocs_memcpy(sli4->config.fw_name[0],read_rev->first_fw_name, sizeof(sli4->config.fw_name[0])); sli4->config.fw_rev[1] = read_rev->second_fw_id; ocs_memcpy(sli4->config.fw_name[1],read_rev->second_fw_name, sizeof(sli4->config.fw_name[1])); sli4->config.hw_rev[0] = read_rev->first_hw_revision; sli4->config.hw_rev[1] = read_rev->second_hw_revision; sli4->config.hw_rev[2] = read_rev->third_hw_revision; ocs_log_debug(sli4->os, "FW1:%s (%08x) / FW2:%s (%08x)\n", read_rev->first_fw_name, read_rev->first_fw_id, read_rev->second_fw_name, read_rev->second_fw_id); ocs_log_debug(sli4->os, "HW1: %08x / HW2: %08x\n", read_rev->first_hw_revision, read_rev->second_hw_revision); /* Check that all VPD data was returned */ if (read_rev->returned_vpd_length != read_rev->actual_vpd_length) { ocs_log_test(sli4->os, "VPD length: available=%d returned=%d actual=%d\n", read_rev->available_length, read_rev->returned_vpd_length, read_rev->actual_vpd_length); } sli4->vpd.length = read_rev->returned_vpd_length; } else { ocs_log_err(sli4->os, "bad READ_REV write\n"); return -1; } if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_cmd_read_nvparms_t *read_nvparms = sli4->bmbx.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_NVPARMS)\n"); return -1; } if (read_nvparms->hdr.status) { ocs_log_err(sli4->os, "READ_NVPARMS bad status %#x\n", read_nvparms->hdr.status); return -1; } ocs_memcpy(sli4->config.wwpn, read_nvparms->wwpn, sizeof(sli4->config.wwpn)); ocs_memcpy(sli4->config.wwnn, read_nvparms->wwnn, sizeof(sli4->config.wwnn)); ocs_log_debug(sli4->os, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", sli4->config.wwpn[0], sli4->config.wwpn[1], sli4->config.wwpn[2], sli4->config.wwpn[3], sli4->config.wwpn[4], sli4->config.wwpn[5], sli4->config.wwpn[6], sli4->config.wwpn[7]); ocs_log_debug(sli4->os, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", sli4->config.wwnn[0], sli4->config.wwnn[1], sli4->config.wwnn[2], sli4->config.wwnn[3], sli4->config.wwnn[4], sli4->config.wwnn[5], sli4->config.wwnn[6], sli4->config.wwnn[7]); } else { ocs_log_err(sli4->os, "bad READ_NVPARMS write\n"); return -1; } return 0; } /**************************************************************************** * Public functions */ /** * @ingroup sli * @brief Set up the SLI context. * * @param sli4 SLI context. * @param os Device abstraction. * @param port_type Protocol type of port (for example, FC and NIC). * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_setup(sli4_t *sli4, ocs_os_handle_t os, sli4_port_type_e port_type) { uint32_t sli_intf = UINT32_MAX; uint32_t pci_class_rev = 0; uint32_t rev_id = 0; uint32_t family = 0; uint32_t i; sli4_asic_entry_t *asic; ocs_memset(sli4, 0, sizeof(sli4_t)); sli4->os = os; sli4->port_type = port_type; /* * Read the SLI_INTF register to discover the register layout * and other capability information */ sli_intf = ocs_config_read32(os, SLI4_INTF_REG); if (sli_intf_valid_check(sli_intf)) { ocs_log_err(os, "SLI_INTF is not valid\n"); return -1; } /* driver only support SLI-4 */ sli4->sli_rev = sli_intf_sli_revision(sli_intf); if (4 != sli4->sli_rev) { ocs_log_err(os, "Unsupported SLI revision (intf=%#x)\n", sli_intf); return -1; } sli4->sli_family = sli_intf_sli_family(sli_intf); sli4->if_type = sli_intf_if_type(sli_intf); if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) { ocs_log_debug(os, "status=%#x error1=%#x error2=%#x\n", sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2)); } /* * set the ASIC type and revision */ pci_class_rev = ocs_config_read32(os, SLI4_PCI_CLASS_REVISION); rev_id = sli_pci_rev_id(pci_class_rev); family = sli4->sli_family; if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) { uint32_t asic_id = ocs_config_read32(os, SLI4_ASIC_ID_REG); family = sli_asic_gen(asic_id); } for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) { if ((rev_id == asic->rev_id) && (family == asic->family)) { sli4->asic_type = asic->type; sli4->asic_rev = asic->rev; break; } } /* Fail if no matching asic type/rev was found */ if( (sli4->asic_type == 0) || (sli4->asic_rev == 0)) { ocs_log_err(os, "no matching asic family/rev found: %02x/%02x\n", family, rev_id); return -1; } /* * The bootstrap mailbox is equivalent to a MQ with a single 256 byte * entry, a CQ with a single 16 byte entry, and no event queue. * Alignment must be 16 bytes as the low order address bits in the * address register are also control / status. */ if (ocs_dma_alloc(sli4->os, &sli4->bmbx, SLI4_BMBX_SIZE + sizeof(sli4_mcqe_t), 16)) { ocs_log_err(os, "bootstrap mailbox allocation failed\n"); return -1; } if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) { ocs_log_err(os, "bad alignment for bootstrap mailbox\n"); return -1; } ocs_log_debug(os, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt, ocs_addr32_hi(sli4->bmbx.phys), ocs_addr32_lo(sli4->bmbx.phys), sli4->bmbx.size); /* TODO 4096 is arbitrary. What should this value actually be? */ if (ocs_dma_alloc(sli4->os, &sli4->vpd.data, 4096/*TODO*/, 4096)) { /* Note that failure isn't fatal in this specific case */ sli4->vpd.data.size = 0; ocs_log_test(os, "VPD buffer allocation failed\n"); } if (sli_fw_init(sli4)) { ocs_log_err(sli4->os, "FW initialization failed\n"); return -1; } /* * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true * in addition to any other desired features */ sli4->config.features.flag.iaab = TRUE; sli4->config.features.flag.npiv = TRUE; sli4->config.features.flag.dif = TRUE; sli4->config.features.flag.vf = TRUE; sli4->config.features.flag.fcpc = TRUE; sli4->config.features.flag.iaar = TRUE; sli4->config.features.flag.hlm = TRUE; sli4->config.features.flag.perfh = TRUE; sli4->config.features.flag.rxseq = TRUE; sli4->config.features.flag.rxri = TRUE; sli4->config.features.flag.mrqp = TRUE; /* use performance hints if available */ if (sli4->config.perf_hint) { sli4->config.features.flag.perfh = TRUE; } if (sli_request_features(sli4, &sli4->config.features, TRUE)) { return -1; } if (sli_get_config(sli4)) { return -1; } return 0; } int32_t sli_init(sli4_t *sli4) { if (sli4->config.has_extents) { /* TODO COMMON_ALLOC_RESOURCE_EXTENTS */; ocs_log_test(sli4->os, "XXX need to implement extent allocation\n"); return -1; } sli4->config.features.flag.hlm = sli4->config.high_login_mode; sli4->config.features.flag.rxseq = FALSE; sli4->config.features.flag.rxri = FALSE; if (sli_request_features(sli4, &sli4->config.features, FALSE)) { return -1; } return 0; } int32_t sli_reset(sli4_t *sli4) { uint32_t i; if (sli_fw_init(sli4)) { ocs_log_crit(sli4->os, "FW initialization failed\n"); return -1; } if (sli4->config.extent[0].base) { ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t)); sli4->config.extent[0].base = NULL; } for (i = 0; i < SLI_RSRC_MAX; i++) { if (sli4->config.extent[i].use_map) { ocs_bitmap_free(sli4->config.extent[i].use_map); sli4->config.extent[i].use_map = NULL; } sli4->config.extent[i].base = NULL; } if (sli_get_config(sli4)) { return -1; } return 0; } /** * @ingroup sli * @brief Issue a Firmware Reset. * * @par Description * Issues a Firmware Reset to the chip. This reset affects the entire chip, * so all PCI function on the same PCI bus and device are affected. * @n @n This type of reset can be used to activate newly downloaded firmware. * @n @n The driver should be considered to be in an unknown state after this * reset and should be reloaded. * * @param sli4 SLI context. * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_fw_reset(sli4_t *sli4) { uint32_t val; uint32_t ready; /* * Firmware must be ready before issuing the reset. */ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!ready) { ocs_log_crit(sli4->os, "FW status is NOT ready\n"); return -1; } switch(sli4->if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: /* BE3 / Skyhawk use PCICFG_SOFT_RESET_CSR */ val = ocs_config_read32(sli4->os, SLI4_PCI_SOFT_RESET_CSR); val |= SLI4_PCI_SOFT_RESET_MASK; ocs_config_write32(sli4->os, SLI4_PCI_SOFT_RESET_CSR, val); break; case SLI4_IF_TYPE_LANCER_FC_ETH: /* Lancer uses PHYDEV_CONTROL */ val = SLI4_PHYDEV_CONTROL_FRST; sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, val); break; default: ocs_log_test(sli4->os, "Unexpected iftype %d\n", sli4->if_type); return -1; break; } /* wait for the FW to become ready after the reset */ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!ready) { ocs_log_crit(sli4->os, "Failed to become ready after firmware reset\n"); return -1; } return 0; } /** * @ingroup sli * @brief Tear down a SLI context. * * @param sli4 SLI context. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_teardown(sli4_t *sli4) { uint32_t i; if (sli4->config.extent[0].base) { ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t)); sli4->config.extent[0].base = NULL; } for (i = 0; i < SLI_RSRC_MAX; i++) { if (sli4->config.has_extents) { /* TODO COMMON_DEALLOC_RESOURCE_EXTENTS */; } sli4->config.extent[i].base = NULL; ocs_bitmap_free(sli4->config.extent[i].use_map); sli4->config.extent[i].use_map = NULL; } if (sli_fw_term(sli4)) { ocs_log_err(sli4->os, "FW deinitialization failed\n"); } ocs_dma_free(sli4->os, &sli4->vpd.data); ocs_dma_free(sli4->os, &sli4->bmbx); return 0; } /** * @ingroup sli * @brief Register a callback for the given event. * * @param sli4 SLI context. * @param which Event of interest. * @param func Function to call when the event occurs. * @param arg Argument passed to the callback function. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_callback(sli4_t *sli4, sli4_callback_e which, void *func, void *arg) { if (!sli4 || !func || (which >= SLI4_CB_MAX)) { ocs_log_err(NULL, "bad parameter sli4=%p which=%#x func=%p\n", sli4, which, func); return -1; } switch (which) { case SLI4_CB_LINK: sli4->link = func; sli4->link_arg = arg; break; case SLI4_CB_FIP: sli4->fip = func; sli4->fip_arg = arg; break; default: ocs_log_test(sli4->os, "unknown callback %#x\n", which); return -1; } return 0; } /** * @ingroup sli * @brief Initialize a queue object. * * @par Description * This initializes the sli4_queue_t object members, including the underlying * DMA memory. * * @param sli4 SLI context. * @param q Pointer to queue object. * @param qtype Type of queue to create. * @param size Size of each entry. * @param n_entries Number of entries to allocate. * @param align Starting memory address alignment. * * @note Checks if using the existing DMA memory (if any) is possible. If not, * it frees the existing memory and re-allocates. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t __sli_queue_init(sli4_t *sli4, sli4_queue_t *q, uint32_t qtype, size_t size, uint32_t n_entries, uint32_t align) { if ((q->dma.virt == NULL) || (size != q->size) || (n_entries != q->length)) { if (q->dma.size) { ocs_dma_free(sli4->os, &q->dma); } ocs_memset(q, 0, sizeof(sli4_queue_t)); if (ocs_dma_alloc(sli4->os, &q->dma, size * n_entries, align)) { ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]); return -1; } ocs_memset(q->dma.virt, 0, size * n_entries); ocs_lock_init(sli4->os, &q->lock, "%s lock[%d:%p]", SLI_QNAME[qtype], ocs_instance(sli4->os), &q->lock); q->type = qtype; q->size = size; q->length = n_entries; /* Limit to hwf the queue size per interrupt */ q->proc_limit = n_entries / 2; switch(q->type) { case SLI_QTYPE_EQ: q->posted_limit = q->length / 2; break; default: if ((sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) || (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF)) { /* For Skyhawk, ring the doorbell more often */ q->posted_limit = 8; } else { q->posted_limit = 64; } break; } } return 0; } /** * @ingroup sli * @brief Issue the command to create a queue. * * @param sli4 SLI context. * @param q Pointer to queue object. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t __sli_create_queue(sli4_t *sli4, sli4_queue_t *q) { sli4_res_common_create_queue_t *res_q = NULL; if (sli_bmbx_command(sli4)){ ocs_log_crit(sli4->os, "bootstrap mailbox write fail %s\n", SLI_QNAME[q->type]); ocs_dma_free(sli4->os, &q->dma); return -1; } if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status create %s\n", SLI_QNAME[q->type]); ocs_dma_free(sli4->os, &q->dma); return -1; } res_q = (void *)((uint8_t *)sli4->bmbx.virt + offsetof(sli4_cmd_sli_config_t, payload)); if (res_q->hdr.status) { ocs_log_err(sli4->os, "bad create %s status=%#x addl=%#x\n", SLI_QNAME[q->type], res_q->hdr.status, res_q->hdr.additional_status); ocs_dma_free(sli4->os, &q->dma); return -1; } else { q->id = res_q->q_id; q->doorbell_offset = res_q->db_offset; q->doorbell_rset = res_q->db_rs; switch (q->type) { case SLI_QTYPE_EQ: /* No doorbell information in response for EQs */ q->doorbell_offset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].rset; break; case SLI_QTYPE_CQ: /* No doorbell information in response for CQs */ q->doorbell_offset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].rset; break; case SLI_QTYPE_MQ: /* No doorbell information in response for MQs */ q->doorbell_offset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].rset; break; case SLI_QTYPE_RQ: /* set the doorbell for non-skyhawks */ if (!sli4->config.dual_ulp_capable) { q->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset; } break; case SLI_QTYPE_WQ: /* set the doorbell for non-skyhawks */ if (!sli4->config.dual_ulp_capable) { q->doorbell_offset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].rset; } break; default: break; } } return 0; } /** * @ingroup sli * @brief Get queue entry size. * * Get queue entry size given queue type. * * @param sli4 SLI context * @param qtype Type for which the entry size is returned. * * @return Returns > 0 on success (queue entry size), or a negative value on failure. */ int32_t sli_get_queue_entry_size(sli4_t *sli4, uint32_t qtype) { uint32_t size = 0; if (!sli4) { ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4); return -1; } switch (qtype) { case SLI_QTYPE_EQ: size = sizeof(uint32_t); break; case SLI_QTYPE_CQ: size = 16; break; case SLI_QTYPE_MQ: size = 256; break; case SLI_QTYPE_WQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { size = sli4->config.wqe_size; } else { /* TODO */ ocs_log_test(sli4->os, "unsupported queue entry size\n"); return -1; } break; case SLI_QTYPE_RQ: size = SLI4_FCOE_RQE_SIZE; break; default: ocs_log_test(sli4->os, "unknown queue type %d\n", qtype); return -1; } return size; } /** * @ingroup sli * @brief Modify the delay timer for all the EQs * * @param sli4 SLI context. * @param eq Array of EQs. * @param num_eq Count of EQs. * @param shift Phase shift for staggering interrupts. * @param delay_mult Delay multiplier for limiting interrupt frequency. * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult) { sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, eq, num_eq, shift, delay_mult); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n"); return -1; } if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status MODIFY EQ DELAY\n"); return -1; } return 0; } /** * @ingroup sli * @brief Allocate a queue. * * @par Description * Allocates DMA memory and configures the requested queue type. * * @param sli4 SLI context. * @param qtype Type of queue to create. * @param q Pointer to the queue object. * @param n_entries Number of entries to allocate. * @param assoc Associated queue (that is, the EQ for a CQ, the CQ for a MQ, and so on). * @param ulp The ULP to bind, which is only used for WQ and RQs * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_queue_alloc(sli4_t *sli4, uint32_t qtype, sli4_queue_t *q, uint32_t n_entries, sli4_queue_t *assoc, uint16_t ulp) { int32_t size; uint32_t align = 0; sli4_create_q_fn_t create = NULL; if (!sli4 || !q) { ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q); return -1; } /* get queue size */ size = sli_get_queue_entry_size(sli4, qtype); if (size < 0) return -1; align = SLI_PAGE_SIZE; switch (qtype) { case SLI_QTYPE_EQ: create = sli_cmd_common_create_eq; break; case SLI_QTYPE_CQ: create = sli_cmd_common_create_cq; break; case SLI_QTYPE_MQ: /* Validate the number of entries */ switch (n_entries) { case 16: case 32: case 64: case 128: break; default: ocs_log_test(sli4->os, "illegal n_entries value %d for MQ\n", n_entries); return -1; } assoc->u.flag.is_mq = TRUE; create = sli_cmd_common_create_mq_ext; break; case SLI_QTYPE_WQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) { create = sli_cmd_fcoe_wq_create; } else { create = sli_cmd_fcoe_wq_create_v1; } } else { /* TODO */ ocs_log_test(sli4->os, "unsupported WQ create\n"); return -1; } break; default: ocs_log_test(sli4->os, "unknown queue type %d\n", qtype); return -1; } if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) { ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]); return -1; } if (create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, assoc ? assoc->id : 0, ulp)) { if (__sli_create_queue(sli4, q)) { ocs_log_err(sli4->os, "create %s failed\n", SLI_QNAME[qtype]); return -1; } q->ulp = ulp; } else { ocs_log_err(sli4->os, "cannot create %s\n", SLI_QNAME[qtype]); return -1; } return 0; } /** * @ingroup sli * @brief Allocate a c queue set. * * @param sli4 SLI context. * @param num_cqs to create * @param qs Pointers to the queue objects. * @param n_entries Number of entries to allocate per CQ. * @param eqs Associated event queues * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs, uint32_t n_entries, sli4_queue_t *eqs[]) { uint32_t i, offset = 0, page_bytes = 0, payload_size, cmd_size = 0; uint32_t p = 0, page_size = 0, n_cqe = 0, num_pages_cq; uintptr_t addr; ocs_dma_t dma; sli4_req_common_create_cq_set_v0_t *req = NULL; sli4_res_common_create_queue_set_t *res = NULL; if (!sli4) { ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4); return -1; } + memset(&dma, 0, sizeof(dma)); + /* Align the queue DMA memory */ for (i = 0; i < num_cqs; i++) { if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES, n_entries, SLI_PAGE_SIZE)) { ocs_log_err(sli4->os, "Queue init failed.\n"); goto error; } } n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; switch (n_cqe) { case 256: case 512: case 1024: case 2048: page_size = 1; break; case 4096: page_size = 2; break; default: return -1; } page_bytes = page_size * SLI_PAGE_SIZE; num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); cmd_size = sizeof(sli4_req_common_create_cq_set_v0_t) + (8 * num_pages_cq * num_cqs); payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_set_t)); if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) { ocs_log_err(sli4->os, "DMA allocation failed\n"); goto error; } ocs_memset(dma.virt, 0, payload_size); if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, payload_size, &dma) == -1) { goto error; } /* Fill the request structure */ req = (sli4_req_common_create_cq_set_v0_t *)((uint8_t *)dma.virt); req->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ_SET; req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; req->hdr.version = 0; req->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); req->page_size = page_size; req->num_pages = num_pages_cq; switch (req->num_pages) { case 1: req->cqecnt = SLI4_CQ_CNT_256; break; case 2: req->cqecnt = SLI4_CQ_CNT_512; break; case 4: req->cqecnt = SLI4_CQ_CNT_1024; break; case 8: req->cqecnt = SLI4_CQ_CNT_LARGE; req->cqe_count = n_cqe; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", req->num_pages); goto error; } req->evt = TRUE; req->valid = TRUE; req->arm = FALSE; req->num_cq_req = num_cqs; /* Fill page addresses of all the CQs. */ for (i = 0; i < num_cqs; i++) { req->eq_id[i] = eqs[i]->id; for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += page_bytes) { req->page_physical_address[offset].low = ocs_addr32_lo(addr); req->page_physical_address[offset].high = ocs_addr32_hi(addr); offset++; } } if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail CQSet\n"); goto error; } res = (void *)((uint8_t *)dma.virt); if (res->hdr.status) { ocs_log_err(sli4->os, "bad create CQSet status=%#x addl=%#x\n", res->hdr.status, res->hdr.additional_status); goto error; } else { /* Check if we got all requested CQs. */ if (res->num_q_allocated != num_cqs) { ocs_log_crit(sli4->os, "Requested count CQs doesnt match.\n"); goto error; } /* Fill the resp cq ids. */ for (i = 0; i < num_cqs; i++) { qs[i]->id = res->q_id + i; qs[i]->doorbell_offset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].off; qs[i]->doorbell_rset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].rset; } } ocs_dma_free(sli4->os, &dma); return 0; error: for (i = 0; i < num_cqs; i++) { if (qs[i]->dma.size) { ocs_dma_free(sli4->os, &qs[i]->dma); } } if (dma.size) { ocs_dma_free(sli4->os, &dma); } return -1; } /** * @ingroup sli * @brief Free a queue. * * @par Description * Frees DMA memory and de-registers the requested queue. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param destroy_queues Non-zero if the mailbox commands should be sent to destroy the queues. * @param free_memory Non-zero if the DMA memory associated with the queue should be freed. * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_queue_free(sli4_t *sli4, sli4_queue_t *q, uint32_t destroy_queues, uint32_t free_memory) { sli4_destroy_q_fn_t destroy = NULL; int32_t rc = -1; if (!sli4 || !q) { ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q); return -1; } if (destroy_queues) { switch (q->type) { case SLI_QTYPE_EQ: destroy = sli_cmd_common_destroy_eq; break; case SLI_QTYPE_CQ: destroy = sli_cmd_common_destroy_cq; break; case SLI_QTYPE_MQ: destroy = sli_cmd_common_destroy_mq; break; case SLI_QTYPE_WQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { destroy = sli_cmd_fcoe_wq_destroy; } else { /* TODO */ ocs_log_test(sli4->os, "unsupported WQ destroy\n"); return -1; } break; case SLI_QTYPE_RQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { destroy = sli_cmd_fcoe_rq_destroy; } else { /* TODO */ ocs_log_test(sli4->os, "unsupported RQ destroy\n"); return -1; } break; default: ocs_log_test(sli4->os, "bad queue type %d\n", q->type); return -1; } /* * Destroying queues makes BE3 sad (version 0 interface type). Rely * on COMMON_FUNCTION_RESET to free host allocated queue resources * inside the SLI Port. */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { destroy = NULL; } /* Destroy the queue if the operation is defined */ if (destroy && destroy(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, q->id)) { sli4_res_hdr_t *res = NULL; if (sli_bmbx_command(sli4)){ ocs_log_crit(sli4->os, "bootstrap mailbox write fail destroy %s\n", SLI_QNAME[q->type]); } else if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status destroy %s\n", SLI_QNAME[q->type]); } else { res = (void *)((uint8_t *)sli4->bmbx.virt + offsetof(sli4_cmd_sli_config_t, payload)); if (res->status) { ocs_log_err(sli4->os, "bad destroy %s status=%#x addl=%#x\n", SLI_QNAME[q->type], res->status, res->additional_status); } else { rc = 0; } } } } if (free_memory) { ocs_lock_free(&q->lock); if (ocs_dma_free(sli4->os, &q->dma)) { ocs_log_err(sli4->os, "%s queue ID %d free failed\n", SLI_QNAME[q->type], q->id); rc = -1; } } return rc; } int32_t sli_queue_reset(sli4_t *sli4, sli4_queue_t *q) { ocs_lock(&q->lock); q->index = 0; q->n_posted = 0; if (SLI_QTYPE_MQ == q->type) { q->u.r_idx = 0; } if (q->dma.virt != NULL) { - ocs_memset(q->dma.virt, 0, (q->size * q->length)); + ocs_memset(q->dma.virt, 0, (q->size * (uint64_t)q->length)); } ocs_unlock(&q->lock); return 0; } /** * @ingroup sli * @brief Check if the given queue is empty. * * @par Description * If the valid bit of the current entry is unset, the queue is empty. * * @param sli4 SLI context. * @param q Pointer to the queue object. * * @return Returns TRUE if empty, or FALSE otherwise. */ int32_t sli_queue_is_empty(sli4_t *sli4, sli4_queue_t *q) { int32_t rc = TRUE; uint8_t *qe = q->dma.virt; ocs_lock(&q->lock); ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD); qe += q->index * q->size; rc = !sli_queue_entry_is_valid(q, qe, FALSE); ocs_unlock(&q->lock); return rc; } /** * @ingroup sli * @brief Arm an EQ. * * @param sli4 SLI context. * @param q Pointer to queue object. * @param arm If TRUE, arm the EQ. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_queue_eq_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm) { uint32_t val = 0; ocs_lock(&q->lock); val = sli_eq_doorbell(q->n_posted, q->id, arm); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); q->n_posted = 0; ocs_unlock(&q->lock); return 0; } /** * @ingroup sli * @brief Arm a queue. * * @param sli4 SLI context. * @param q Pointer to queue object. * @param arm If TRUE, arm the queue. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_queue_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm) { uint32_t val = 0; ocs_lock(&q->lock); switch (q->type) { case SLI_QTYPE_EQ: val = sli_eq_doorbell(q->n_posted, q->id, arm); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); q->n_posted = 0; break; case SLI_QTYPE_CQ: val = sli_cq_doorbell(q->n_posted, q->id, arm); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); q->n_posted = 0; break; default: ocs_log_test(sli4->os, "should only be used for EQ/CQ, not %s\n", SLI_QNAME[q->type]); } ocs_unlock(&q->lock); return 0; } /** * @ingroup sli * @brief Write an entry to the queue object. * * Note: Assumes the q->lock will be locked and released by the caller. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param entry Pointer to the entry contents. * * @return Returns queue index on success, or negative error value otherwise. */ int32_t _sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) { int32_t rc = 0; uint8_t *qe = q->dma.virt; uint32_t qindex; qindex = q->index; qe += q->index * q->size; if (entry) { if ((SLI_QTYPE_WQ == q->type) && sli4->config.perf_wq_id_association) { sli_set_wq_id_association(entry, q->id); } #if defined(OCS_INCLUDE_DEBUG) switch (q->type) { case SLI_QTYPE_WQ: { ocs_dump32(OCS_DEBUG_ENABLE_WQ_DUMP, sli4->os, "wqe", entry, q->size); break; } case SLI_QTYPE_MQ: /* Note: we don't really need to dump the whole * 256 bytes, just do 64 */ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mqe outbound", entry, 64); break; default: break; } #endif ocs_memcpy(qe, entry, q->size); q->n_posted = 1; } ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); rc = sli_queue_doorbell(sli4, q); q->index = (q->index + q->n_posted) & (q->length - 1); q->n_posted = 0; if (rc < 0) { /* failure */ return rc; } else if (rc > 0) { /* failure, but we need to return a negative value on failure */ return -rc; } else { return qindex; } } /** * @ingroup sli * @brief Write an entry to the queue object. * * Note: Assumes the q->lock will be locked and released by the caller. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param entry Pointer to the entry contents. * * @return Returns queue index on success, or negative error value otherwise. */ int32_t sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) { int32_t rc; ocs_lock(&q->lock); rc = _sli_queue_write(sli4, q, entry); ocs_unlock(&q->lock); return rc; } /** * @brief Check if the current queue entry is valid. * * @param q Pointer to the queue object. * @param qe Pointer to the queue entry. * @param clear Boolean to clear valid bit. * * @return Returns TRUE if the entry is valid, or FALSE otherwise. */ static uint8_t sli_queue_entry_is_valid(sli4_queue_t *q, uint8_t *qe, uint8_t clear) { uint8_t valid = FALSE; switch (q->type) { case SLI_QTYPE_EQ: valid = ((sli4_eqe_t *)qe)->vld; if (valid && clear) { ((sli4_eqe_t *)qe)->vld = 0; } break; case SLI_QTYPE_CQ: /* * For both MCQE and WCQE/RCQE, the valid bit * is bit 31 of dword 3 (0 based) */ valid = (qe[15] & 0x80) != 0; if (valid & clear) { qe[15] &= ~0x80; } break; case SLI_QTYPE_MQ: valid = q->index != q->u.r_idx; break; case SLI_QTYPE_RQ: valid = TRUE; clear = FALSE; break; default: ocs_log_test(NULL, "doesn't handle type=%#x\n", q->type); } if (clear) { ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); } return valid; } /** * @ingroup sli * @brief Read an entry from the queue object. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param entry Destination pointer for the queue entry contents. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_queue_read(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) { int32_t rc = 0; uint8_t *qe = q->dma.virt; uint32_t *qindex = NULL; if (SLI_QTYPE_MQ == q->type) { qindex = &q->u.r_idx; } else { qindex = &q->index; } ocs_lock(&q->lock); ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD); qe += *qindex * q->size; if (!sli_queue_entry_is_valid(q, qe, TRUE)) { ocs_unlock(&q->lock); return -1; } if (entry) { ocs_memcpy(entry, qe, q->size); #if defined(OCS_INCLUDE_DEBUG) switch(q->type) { case SLI_QTYPE_CQ: ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "cq", entry, q->size); break; case SLI_QTYPE_MQ: ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mq Compl", entry, 64); break; case SLI_QTYPE_EQ: ocs_dump32(OCS_DEBUG_ENABLE_EQ_DUMP, sli4->os, "eq Compl", entry, q->size); break; default: break; } #endif } switch (q->type) { case SLI_QTYPE_EQ: case SLI_QTYPE_CQ: case SLI_QTYPE_MQ: *qindex = (*qindex + 1) & (q->length - 1); if (SLI_QTYPE_MQ != q->type) { q->n_posted++; } break; default: /* reads don't update the index */ break; } ocs_unlock(&q->lock); return rc; } int32_t sli_queue_index(sli4_t *sli4, sli4_queue_t *q) { if (q) { return q->index; } else { return -1; } } int32_t sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry) { int32_t rc; ocs_lock(&q->lock); rc = _sli_queue_poke(sli4, q, index, entry); ocs_unlock(&q->lock); return rc; } int32_t _sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry) { int32_t rc = 0; uint8_t *qe = q->dma.virt; if (index >= q->length) { return -1; } qe += index * q->size; if (entry) { ocs_memcpy(qe, entry, q->size); } ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); return rc; } /** * @ingroup sli * @brief Allocate SLI Port resources. * * @par Description * Allocate port-related resources, such as VFI, RPI, XRI, and so on. * Resources are modeled using extents, regardless of whether the underlying * device implements resource extents. If the device does not implement * extents, the SLI layer models this as a single (albeit large) extent. * * @param sli4 SLI context. * @param rtype Resource type (for example, RPI or XRI) * @param rid Allocated resource ID. * @param index Index into the bitmap. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_resource_alloc(sli4_t *sli4, sli4_resource_e rtype, uint32_t *rid, uint32_t *index) { int32_t rc = 0; uint32_t size; uint32_t extent_idx; uint32_t item_idx; int status; *rid = UINT32_MAX; *index = UINT32_MAX; switch (rtype) { case SLI_RSRC_FCOE_VFI: case SLI_RSRC_FCOE_VPI: case SLI_RSRC_FCOE_RPI: case SLI_RSRC_FCOE_XRI: status = ocs_bitmap_find(sli4->config.extent[rtype].use_map, sli4->config.extent[rtype].map_size); if (status < 0) { ocs_log_err(sli4->os, "out of resource %d (alloc=%d)\n", rtype, sli4->config.extent[rtype].n_alloc); rc = -1; break; } else { *index = status; } size = sli4->config.extent[rtype].size; extent_idx = *index / size; item_idx = *index % size; *rid = sli4->config.extent[rtype].base[extent_idx] + item_idx; sli4->config.extent[rtype].n_alloc++; break; default: rc = -1; } return rc; } /** * @ingroup sli * @brief Free the SLI Port resources. * * @par Description * Free port-related resources, such as VFI, RPI, XRI, and so. See discussion of * "extent" usage in sli_resource_alloc. * * @param sli4 SLI context. * @param rtype Resource type (for example, RPI or XRI). * @param rid Allocated resource ID. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_resource_free(sli4_t *sli4, sli4_resource_e rtype, uint32_t rid) { int32_t rc = -1; uint32_t x; uint32_t size, *base; switch (rtype) { case SLI_RSRC_FCOE_VFI: case SLI_RSRC_FCOE_VPI: case SLI_RSRC_FCOE_RPI: case SLI_RSRC_FCOE_XRI: /* * Figure out which extent contains the resource ID. I.e. find * the extent such that * extent->base <= resource ID < extent->base + extent->size */ base = sli4->config.extent[rtype].base; size = sli4->config.extent[rtype].size; /* * In the case of FW reset, this may be cleared but the force_free path will * still attempt to free the resource. Prevent a NULL pointer access. */ if (base != NULL) { for (x = 0; x < sli4->config.extent[rtype].number; x++) { if ((rid >= base[x]) && (rid < (base[x] + size))) { rid -= base[x]; ocs_bitmap_clear(sli4->config.extent[rtype].use_map, (x * size) + rid); rc = 0; break; } } } break; default: ; } return rc; } int32_t sli_resource_reset(sli4_t *sli4, sli4_resource_e rtype) { int32_t rc = -1; uint32_t i; switch (rtype) { case SLI_RSRC_FCOE_VFI: case SLI_RSRC_FCOE_VPI: case SLI_RSRC_FCOE_RPI: case SLI_RSRC_FCOE_XRI: for (i = 0; i < sli4->config.extent[rtype].map_size; i++) { ocs_bitmap_clear(sli4->config.extent[rtype].use_map, i); } rc = 0; break; default: ; } return rc; } /** * @ingroup sli * @brief Parse an EQ entry to retrieve the CQ_ID for this event. * * @param sli4 SLI context. * @param buf Pointer to the EQ entry. * @param cq_id CQ_ID for this entry (only valid on success). * * @return * - 0 if success. * - < 0 if error. * - > 0 if firmware detects EQ overflow. */ int32_t sli_eq_parse(sli4_t *sli4, uint8_t *buf, uint16_t *cq_id) { sli4_eqe_t *eqe = (void *)buf; int32_t rc = 0; if (!sli4 || !buf || !cq_id) { ocs_log_err(NULL, "bad parameters sli4=%p buf=%p cq_id=%p\n", sli4, buf, cq_id); return -1; } switch (eqe->major_code) { case SLI4_MAJOR_CODE_STANDARD: *cq_id = eqe->resource_id; break; case SLI4_MAJOR_CODE_SENTINEL: ocs_log_debug(sli4->os, "sentinel EQE\n"); rc = 1; break; default: ocs_log_test(sli4->os, "Unsupported EQE: major %x minor %x\n", eqe->major_code, eqe->minor_code); rc = -1; } return rc; } /** * @ingroup sli * @brief Parse a CQ entry to retrieve the event type and the associated queue. * * @param sli4 SLI context. * @param cq CQ to process. * @param cqe Pointer to the CQ entry. * @param etype CQ event type. * @param q_id Queue ID associated with this completion message * (that is, MQ_ID, RQ_ID, and so on). * * @return * - 0 if call completed correctly and CQE status is SUCCESS. * - -1 if call failed (no CQE status). * - Other value if call completed correctly and return value is a CQE status value. */ int32_t sli_cq_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype, uint16_t *q_id) { int32_t rc = 0; if (!sli4 || !cq || !cqe || !etype) { ocs_log_err(NULL, "bad parameters sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", sli4, cq, cqe, etype, q_id); return -1; } if (cq->u.flag.is_mq) { sli4_mcqe_t *mcqe = (void *)cqe; if (mcqe->ae) { *etype = SLI_QENTRY_ASYNC; } else { *etype = SLI_QENTRY_MQ; rc = sli_cqe_mq(mcqe); } *q_id = -1; } else if (SLI4_PORT_TYPE_FC == sli4->port_type) { rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); } else { ocs_log_test(sli4->os, "implement CQE parsing type = %#x\n", sli4->port_type); rc = -1; } return rc; } /** * @ingroup sli * @brief Cause chip to enter an unrecoverable error state. * * @par Description * Cause chip to enter an unrecoverable error state. This is * used when detecting unexpected FW behavior so FW can be * hwted from the driver as soon as error is detected. * * @param sli4 SLI context. * @param dump Generate dump as part of reset. * * @return Returns 0 if call completed correctly, or -1 if call failed (unsupported chip). */ int32_t sli_raise_ue(sli4_t *sli4, uint8_t dump) { #define FDD 2 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { switch(sli_get_asic_type(sli4)) { case SLI4_ASIC_TYPE_BE3: { sli_reg_write(sli4, SLI4_REG_SW_UE_CSR1, 0xffffffff); sli_reg_write(sli4, SLI4_REG_SW_UE_CSR2, 0); break; } case SLI4_ASIC_TYPE_SKYHAWK: { uint32_t value; value = ocs_config_read32(sli4->os, SLI4_SW_UE_REG); ocs_config_write32(sli4->os, SLI4_SW_UE_REG, (value | (1U << 24))); break; } default: ocs_log_test(sli4->os, "invalid asic type %d\n", sli_get_asic_type(sli4)); return -1; } } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) { if (dump == FDD) { sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, SLI4_SLIPORT_CONTROL_FDD | SLI4_SLIPORT_CONTROL_IP); } else { uint32_t value = SLI4_PHYDEV_CONTROL_FRST; if (dump == 1) { value |= SLI4_PHYDEV_CONTROL_DD; } sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, value); } } else { ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4)); return -1; } return 0; } /** * @ingroup sli * @brief Read the SLIPORT_STATUS register to to check if a dump is present. * * @param sli4 SLI context. * * @return Returns 1 if the chip is ready, or 0 if the chip is not ready, 2 if fdp is present. */ int32_t sli_dump_is_ready(sli4_t *sli4) { int32_t rc = 0; uint32_t port_val; uint32_t bmbx_val; uint32_t uerr_lo; uint32_t uerr_hi; uint32_t uerr_mask_lo; uint32_t uerr_mask_hi; if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { /* for iftype=0, dump ready when UE is encountered */ uerr_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO); uerr_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI); uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO); uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI); if ((uerr_lo & ~uerr_mask_lo) || (uerr_hi & ~uerr_mask_hi)) { rc = 1; } } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) { /* * Ensure that the port is ready AND the mailbox is * ready before signaling that the dump is ready to go. */ port_val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); bmbx_val = sli_reg_read(sli4, SLI4_REG_BMBX); if ((bmbx_val & SLI4_BMBX_RDY) && SLI4_PORT_STATUS_READY(port_val)) { if(SLI4_PORT_STATUS_DUMP_PRESENT(port_val)) { rc = 1; }else if( SLI4_PORT_STATUS_FDP_PRESENT(port_val)) { rc = 2; } } } else { ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4)); return -1; } return rc; } /** * @ingroup sli * @brief Read the SLIPORT_STATUS register to check if a dump is present. * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and no dump is present. * - 1 if call completed and dump is present. * - -1 if call failed (unsupported chip). */ int32_t sli_dump_is_present(sli4_t *sli4) { uint32_t val; uint32_t ready; if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(sli4)) { ocs_log_test(sli4->os, "Function only supported for I/F type 2"); return -1; } /* If the chip is not ready, then there cannot be a dump */ ready = sli_wait_for_fw_ready(sli4, SLI4_INIT_PORT_DELAY_US); if (!ready) { return 0; } val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); if (UINT32_MAX == val) { ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n"); return -1; } else { return ((val & SLI4_PORT_STATUS_DIP) ? 1 : 0); } } /** * @ingroup sli * @brief Read the SLIPORT_STATUS register to check if the reset required is set. * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and reset is not required. * - 1 if call completed and reset is required. * - -1 if call failed. */ int32_t sli_reset_required(sli4_t *sli4) { uint32_t val; if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { ocs_log_test(sli4->os, "reset required N/A for iftype 0\n"); return 0; } val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); if (UINT32_MAX == val) { ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n"); return -1; } else { return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0); } } /** * @ingroup sli * @brief Read the SLIPORT_SEMAPHORE and SLIPORT_STATUS registers to check if * the port status indicates that a FW error has occurred. * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and no FW error occurred. * - > 0 which indicates that a FW error has occurred. * - -1 if call failed. */ int32_t sli_fw_error_status(sli4_t *sli4) { uint32_t sliport_semaphore; int32_t rc = 0; sliport_semaphore = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE); if (UINT32_MAX == sliport_semaphore) { ocs_log_err(sli4->os, "error reading SLIPORT_SEMAPHORE register\n"); return 1; } rc = (SLI4_PORT_SEMAPHORE_IN_ERR(sliport_semaphore) ? 1 : 0); if (rc == 0) { if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type || (SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type)) { uint32_t uerr_mask_lo, uerr_mask_hi; uint32_t uerr_status_lo, uerr_status_hi; uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO); uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI); uerr_status_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO); uerr_status_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI); if ((uerr_mask_lo & uerr_status_lo) != 0 || (uerr_mask_hi & uerr_status_hi) != 0) { rc = 1; } } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type)) { uint32_t sliport_status; sliport_status = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); rc = (SLI4_PORT_STATUS_ERROR(sliport_status) ? 1 : 0); } } return rc; } /** * @ingroup sli * @brief Determine if the chip FW is in a ready state * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and FW is not ready. * - 1 if call completed correctly and FW is ready. * - -1 if call failed. */ int32_t sli_fw_ready(sli4_t *sli4) { uint32_t val; int32_t rc = -1; /* * Is firmware ready for operation? Check needed depends on IF_TYPE */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type || SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type) { val = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE); rc = ((SLI4_PORT_SEMAPHORE_STATUS_POST_READY == SLI4_PORT_SEMAPHORE_PORT(val)) && (!SLI4_PORT_SEMAPHORE_IN_ERR(val)) ? 1 : 0); } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) { val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); rc = (SLI4_PORT_STATUS_READY(val) ? 1 : 0); } return rc; } /** * @ingroup sli * @brief Determine if the link can be configured * * @param sli4 SLI context. * * @return * - 0 if link is not configurable. * - 1 if link is configurable. */ int32_t sli_link_is_configurable(sli4_t *sli) { int32_t rc = 0; /* * Link config works on: Skyhawk and Lancer * Link config does not work on: LancerG6 */ switch (sli_get_asic_type(sli)) { case SLI4_ASIC_TYPE_SKYHAWK: case SLI4_ASIC_TYPE_LANCER: case SLI4_ASIC_TYPE_CORSAIR: rc = 1; break; case SLI4_ASIC_TYPE_LANCERG6: case SLI4_ASIC_TYPE_BE3: default: rc = 0; break; } return rc; } /* vim: set noexpandtab textwidth=120: */ /** * @ingroup sli_fc * @brief Write an FCOE_WQ_CREATE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ulp The ULP to bind * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_wq_create(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp) { sli4_req_fcoe_wq_create_t *wq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_wq_create_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } wq = (sli4_req_fcoe_wq_create_t *)((uint8_t *)buf + sli_config_off); wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE; wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1-4 (sec 4.5.1) */ wq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES)) { return 0; } wq->cq_id = cq_id; if (sli4->config.dual_ulp_capable) { wq->dua = 1; wq->bqu = 1; wq->ulp = ulp; } for (p = 0, addr = qmem->phys; p < wq->num_pages; p++, addr += SLI_PAGE_SIZE) { wq->page_physical_address[p].low = ocs_addr32_lo(addr); wq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_WQ_CREATE_V1 command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ignored This parameter carries the ULP for WQ (ignored for V1) * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_wq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored) { sli4_req_fcoe_wq_create_v1_t *wq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; uint32_t page_size = 0; uint32_t page_bytes = 0; uint32_t n_wqe = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_wq_create_v1_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } wq = (sli4_req_fcoe_wq_create_v1_t *)((uint8_t *)buf + sli_config_off); wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE; wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_v1_t) - sizeof(sli4_req_hdr_t); wq->hdr.version = 1; n_wqe = qmem->size / sli4->config.wqe_size; /* This heuristic to determine the page size is simplistic * but could be made more sophisticated */ switch (qmem->size) { case 4096: case 8192: case 16384: case 32768: page_size = 1; break; case 65536: page_size = 2; break; case 131072: page_size = 4; break; case 262144: page_size = 8; break; case 524288: page_size = 10; break; default: return 0; } page_bytes = page_size * SLI_PAGE_SIZE; /* valid values for number of pages: 1-8 */ wq->num_pages = sli_page_count(qmem->size, page_bytes); if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES)) { return 0; } wq->cq_id = cq_id; wq->page_size = page_size; if (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) { wq->wqe_size = SLI4_WQE_EXT_SIZE; } else { wq->wqe_size = SLI4_WQE_SIZE; } wq->wqe_count = n_wqe; for (p = 0, addr = qmem->phys; p < wq->num_pages; p++, addr += page_bytes) { wq->page_physical_address[p].low = ocs_addr32_lo(addr); wq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_v1_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_WQ_DESTROY command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param wq_id WQ_ID. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_wq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t wq_id) { sli4_req_fcoe_wq_destroy_t *wq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_wq_destroy_t), sizeof(sli4_res_hdr_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } wq = (sli4_req_fcoe_wq_destroy_t *)((uint8_t *)buf + sli_config_off); wq->hdr.opcode = SLI4_OPC_FCOE_WQ_DESTROY; wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_destroy_t) - sizeof(sli4_req_hdr_t); wq->wq_id = wq_id; return(sli_config_off + sizeof(sli4_req_fcoe_wq_destroy_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_POST_SGL_PAGES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param xri starting XRI * @param xri_count XRI * @param page0 First SGL memory page. * @param page1 Second SGL memory page (optional). * @param dma DMA buffer for non-embedded mailbox command (options) * * if non-embedded mbx command is used, dma buffer must be at least (32 + xri_count*16) in length * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_post_sgl_pages(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint32_t xri_count, ocs_dma_t *page0[], ocs_dma_t *page1[], ocs_dma_t *dma) { sli4_req_fcoe_post_sgl_pages_t *post = NULL; uint32_t sli_config_off = 0; uint32_t i; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_post_sgl_pages_t), sizeof(sli4_res_hdr_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); } if (dma) { post = dma->virt; ocs_memset(post, 0, dma->size); } else { post = (sli4_req_fcoe_post_sgl_pages_t *)((uint8_t *)buf + sli_config_off); } post->hdr.opcode = SLI4_OPC_FCOE_POST_SGL_PAGES; post->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; /* payload size calculation * 4 = xri_start + xri_count * xri_count = # of XRI's registered * sizeof(uint64_t) = physical address size * 2 = # of physical addresses per page set */ post->hdr.request_length = 4 + (xri_count * (sizeof(uint64_t) * 2)); post->xri_start = xri; post->xri_count = xri_count; for (i = 0; i < xri_count; i++) { post->page_set[i].page0_low = ocs_addr32_lo(page0[i]->phys); post->page_set[i].page0_high = ocs_addr32_hi(page0[i]->phys); } if (page1) { for (i = 0; i < xri_count; i++) { post->page_set[i].page1_low = ocs_addr32_lo(page1[i]->phys); post->page_set[i].page1_high = ocs_addr32_hi(page1[i]->phys); } } return dma ? sli_config_off : (sli_config_off + sizeof(sli4_req_fcoe_post_sgl_pages_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_RQ_CREATE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ulp This parameter carries the ULP for the RQ * @param buffer_size Buffer size pointed to by each RQE. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_rq_create(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size) { sli4_req_fcoe_rq_create_t *rq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_rq_create_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } rq = (sli4_req_fcoe_rq_create_t *)((uint8_t *)buf + sli_config_off); rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1-8 (sec 4.5.6) */ rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES)) { ocs_log_test(sli4->os, "num_pages %d not valid\n", rq->num_pages); return 0; } /* * RQE count is the log base 2 of the total number of entries */ rq->rqe_count = ocs_lg2(qmem->size / SLI4_FCOE_RQE_SIZE); if ((buffer_size < SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE) || (buffer_size > SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE)) { ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n", buffer_size, SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE, SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE); return -1; } rq->buffer_size = buffer_size; rq->cq_id = cq_id; if (sli4->config.dual_ulp_capable) { rq->dua = 1; rq->bqu = 1; rq->ulp = ulp; } for (p = 0, addr = qmem->phys; p < rq->num_pages; p++, addr += SLI_PAGE_SIZE) { rq->page_physical_address[p].low = ocs_addr32_lo(addr); rq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_RQ_CREATE_V1 command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ulp This parameter carries the ULP for RQ (ignored for V1) * @param buffer_size Buffer size pointed to by each RQE. * * @note This creates a Version 0 message * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_rq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size) { sli4_req_fcoe_rq_create_v1_t *rq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_rq_create_v1_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } rq = (sli4_req_fcoe_rq_create_v1_t *)((uint8_t *)buf + sli_config_off); rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v1_t) - sizeof(sli4_req_hdr_t); rq->hdr.version = 1; /* Disable "no buffer warnings" to avoid Lancer bug */ rq->dnb = TRUE; /* valid values for number of pages: 1-8 (sec 4.5.6) */ rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES)) { ocs_log_test(sli4->os, "num_pages %d not valid, max %d\n", rq->num_pages, SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES); return 0; } /* * RQE count is the total number of entries (note not lg2(# entries)) */ rq->rqe_count = qmem->size / SLI4_FCOE_RQE_SIZE; rq->rqe_size = SLI4_FCOE_RQE_SIZE_8; rq->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096; if ((buffer_size < sli4->config.rq_min_buf_size) || (buffer_size > sli4->config.rq_max_buf_size)) { ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n", buffer_size, sli4->config.rq_min_buf_size, sli4->config.rq_max_buf_size); return -1; } rq->buffer_size = buffer_size; rq->cq_id = cq_id; for (p = 0, addr = qmem->phys; p < rq->num_pages; p++, addr += SLI_PAGE_SIZE) { rq->page_physical_address[p].low = ocs_addr32_lo(addr); rq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_v1_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_RQ_DESTROY command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param rq_id RQ_ID. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_rq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t rq_id) { sli4_req_fcoe_rq_destroy_t *rq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_rq_destroy_t), sizeof(sli4_res_hdr_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } rq = (sli4_req_fcoe_rq_destroy_t *)((uint8_t *)buf + sli_config_off); rq->hdr.opcode = SLI4_OPC_FCOE_RQ_DESTROY; rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_destroy_t) - sizeof(sli4_req_hdr_t); rq->rq_id = rq_id; return(sli_config_off + sizeof(sli4_req_fcoe_rq_destroy_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_READ_FCF_TABLE command. * * @note * The response of this command exceeds the size of an embedded * command and requires an external buffer with DMA capability to hold the results. * The caller should allocate the ocs_dma_t structure / memory. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma Pointer to DMA memory structure. This is allocated by the caller. * @param index FCF table index to retrieve. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_read_fcf_table(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t index) { sli4_req_fcoe_read_fcf_table_t *read_fcf = NULL; if (SLI4_PORT_TYPE_FC != sli4->port_type) { ocs_log_test(sli4->os, "FCOE_READ_FCF_TABLE only supported on FC\n"); return -1; } read_fcf = dma->virt; ocs_memset(read_fcf, 0, sizeof(sli4_req_fcoe_read_fcf_table_t)); read_fcf->hdr.opcode = SLI4_OPC_FCOE_READ_FCF_TABLE; read_fcf->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; read_fcf->hdr.request_length = dma->size - sizeof(sli4_req_fcoe_read_fcf_table_t); read_fcf->fcf_index = index; return sli_cmd_sli_config(sli4, buf, size, 0, dma); } /** * @ingroup sli_fc * @brief Write an FCOE_POST_HDR_TEMPLATES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma Pointer to DMA memory structure. This is allocated by the caller. * @param rpi Starting RPI index for the header templates. * @param payload_dma Pointer to DMA memory used to hold larger descriptor counts. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_post_hdr_templates(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t rpi, ocs_dma_t *payload_dma) { sli4_req_fcoe_post_hdr_templates_t *template = NULL; uint32_t sli_config_off = 0; uintptr_t phys = 0; uint32_t i = 0; uint32_t page_count; uint32_t payload_size; page_count = sli_page_count(dma->size, SLI_PAGE_SIZE); payload_size = sizeof(sli4_req_fcoe_post_hdr_templates_t) + page_count * sizeof(sli4_physical_page_descriptor_t); if (page_count > 16) { /* We can't fit more than 16 descriptors into an embedded mailbox command, it has to be non-embedded */ if (ocs_dma_alloc(sli4->os, payload_dma, payload_size, 4)) { ocs_log_err(sli4->os, "mailbox payload memory allocation fail\n"); return 0; } sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, payload_dma); template = (sli4_req_fcoe_post_hdr_templates_t *)payload_dma->virt; } else { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); template = (sli4_req_fcoe_post_hdr_templates_t *)((uint8_t *)buf + sli_config_off); } if (UINT16_MAX == rpi) { rpi = sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0]; } template->hdr.opcode = SLI4_OPC_FCOE_POST_HDR_TEMPLATES; template->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; template->hdr.request_length = sizeof(sli4_req_fcoe_post_hdr_templates_t) - sizeof(sli4_req_hdr_t); template->rpi_offset = rpi; template->page_count = page_count; phys = dma->phys; for (i = 0; i < template->page_count; i++) { template->page_descriptor[i].low = ocs_addr32_lo(phys); template->page_descriptor[i].high = ocs_addr32_hi(phys); phys += SLI_PAGE_SIZE; } return(sli_config_off + payload_size); } int32_t sli_cmd_fcoe_rediscover_fcf(sli4_t *sli4, void *buf, size_t size, uint16_t index) { sli4_req_fcoe_rediscover_fcf_t *redisc = NULL; uint32_t sli_config_off = 0; sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_fcoe_rediscover_fcf_t), NULL); redisc = (sli4_req_fcoe_rediscover_fcf_t *)((uint8_t *)buf + sli_config_off); redisc->hdr.opcode = SLI4_OPC_FCOE_REDISCOVER_FCF; redisc->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; redisc->hdr.request_length = sizeof(sli4_req_fcoe_rediscover_fcf_t) - sizeof(sli4_req_hdr_t); if (index == UINT16_MAX) { redisc->fcf_count = 0; } else { redisc->fcf_count = 1; redisc->fcf_index[0] = index; } return(sli_config_off + sizeof(sli4_req_fcoe_rediscover_fcf_t)); } /** * @ingroup sli_fc * @brief Write an ABORT_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param type Abort type, such as XRI, abort tag, and request tag. * @param send_abts Boolean to cause the hardware to automatically generate an ABTS. * @param ids ID of IOs to abort. * @param mask Mask applied to the ID values to abort. * @param tag Tag value associated with this abort. * @param cq_id The id of the completion queue where the WQE response is sent. * @param dnrx When set to 1, this field indicates that the SLI Port must not return the associated XRI to the SLI * Port's optimized write XRI pool. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_abort_wqe(sli4_t *sli4, void *buf, size_t size, sli4_abort_type_e type, uint32_t send_abts, uint32_t ids, uint32_t mask, uint16_t tag, uint16_t cq_id) { sli4_abort_wqe_t *abort = buf; ocs_memset(buf, 0, size); switch (type) { case SLI_ABORT_XRI: abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; if (mask) { ocs_log_warn(sli4->os, "warning non-zero mask %#x when aborting XRI %#x\n", mask, ids); mask = 0; } break; case SLI_ABORT_ABORT_ID: abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG; break; case SLI_ABORT_REQUEST_ID: abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG; break; default: ocs_log_test(sli4->os, "unsupported type %#x\n", type); return -1; } abort->ia = send_abts ? 0 : 1; /* Suppress ABTS retries */ abort->ir = 1; abort->t_mask = mask; abort->t_tag = ids; abort->command = SLI4_WQE_ABORT; abort->request_tag = tag; abort->qosd = TRUE; abort->cq_id = cq_id; abort->cmd_type = SLI4_CMD_ABORT_WQE; return 0; } /** * @ingroup sli_fc * @brief Write an ELS_REQUEST64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the ELS request. * @param req_type ELS request type. * @param req_len Length of ELS request in bytes. * @param max_rsp_len Max length of ELS response in bytes. * @param timeout Time, in seconds, before an IO times out. Zero means 2 * R_A_TOV. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rnode Destination of ELS request (that is, the remote node). * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_els_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint8_t req_type, uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode) { sli4_els_request64_wqe_t *els = buf; sli4_sge_t *sge = sgl->virt; uint8_t is_fabric = FALSE; ocs_memset(buf, 0, size); if (sli4->config.sgl_pre_registered) { els->xbl = FALSE; els->dbde = TRUE; els->els_request_payload.bde_type = SLI4_BDE_TYPE_BDE_64; els->els_request_payload.buffer_length = req_len; els->els_request_payload.u.data.buffer_address_low = sge[0].buffer_address_low; els->els_request_payload.u.data.buffer_address_high = sge[0].buffer_address_high; } else { els->xbl = TRUE; els->els_request_payload.bde_type = SLI4_BDE_TYPE_BLP; els->els_request_payload.buffer_length = 2 * sizeof(sli4_sge_t); els->els_request_payload.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); els->els_request_payload.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } els->els_request_payload_length = req_len; els->max_response_payload_length = max_rsp_len; els->xri_tag = xri; els->timer = timeout; els->class = SLI4_ELS_REQUEST64_CLASS_3; els->command = SLI4_WQE_ELS_REQUEST64; els->request_tag = tag; if (rnode->node_group) { els->hlm = TRUE; els->remote_id = rnode->fc_id & 0x00ffffff; } els->iod = SLI4_ELS_REQUEST64_DIR_READ; els->qosd = TRUE; /* figure out the ELS_ID value from the request buffer */ switch (req_type) { case FC_ELS_CMD_LOGO: els->els_id = SLI4_ELS_REQUEST64_LOGO; if (rnode->attached) { els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; els->context_tag = rnode->indicator; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; } if (FC_ADDR_FABRIC == rnode->fc_id) { is_fabric = TRUE; } break; case FC_ELS_CMD_FDISC: if (FC_ADDR_FABRIC == rnode->fc_id) { is_fabric = TRUE; } if (0 == rnode->sport->fc_id) { els->els_id = SLI4_ELS_REQUEST64_FDISC; is_fabric = TRUE; } else { els->els_id = SLI4_ELS_REQUEST64_OTHER; } els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; els->sp = TRUE; break; case FC_ELS_CMD_FLOGI: els->els_id = SLI4_ELS_REQUEST64_FLOGIN; is_fabric = TRUE; if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { if (!rnode->sport->domain) { ocs_log_test(sli4->os, "invalid domain handle\n"); return -1; } /* * IF_TYPE 0 skips INIT_VFI/INIT_VPI and therefore must use the * FCFI here */ els->ct = SLI4_ELS_REQUEST64_CONTEXT_FCFI; els->context_tag = rnode->sport->domain->fcf_indicator; els->sp = TRUE; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; /* * Set SP here ... we haven't done a REG_VPI yet * TODO: need to maybe not set this when we have * completed VFI/VPI registrations ... * * Use the FC_ID of the SPORT if it has been allocated, otherwise * use an S_ID of zero. */ els->sp = TRUE; if (rnode->sport->fc_id != UINT32_MAX) { els->sid = rnode->sport->fc_id; } } break; case FC_ELS_CMD_PLOGI: els->els_id = SLI4_ELS_REQUEST64_PLOGI; els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; break; case FC_ELS_CMD_SCR: els->els_id = SLI4_ELS_REQUEST64_OTHER; els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; break; default: els->els_id = SLI4_ELS_REQUEST64_OTHER; if (rnode->attached) { els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; els->context_tag = rnode->indicator; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; } break; } if (is_fabric) { els->cmd_type = SLI4_ELS_REQUEST64_CMD_FABRIC; } else { els->cmd_type = SLI4_ELS_REQUEST64_CMD_NON_FABRIC; } els->cq_id = cq_id; if (SLI4_ELS_REQUEST64_CONTEXT_RPI != els->ct) { els->remote_id = rnode->fc_id; } if (SLI4_ELS_REQUEST64_CONTEXT_VPI == els->ct) { els->temporary_rpi = rnode->indicator; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_ICMND64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (that is, the remote node). * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_icmnd64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint16_t xri, uint16_t tag, uint16_t cq_id, uint32_t rpi, ocs_remote_node_t *rnode, uint8_t timeout) { sli4_fcp_icmnd64_wqe_t *icmnd = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { icmnd->xbl = FALSE; icmnd->dbde = TRUE; icmnd->bde.bde_type = SLI4_BDE_TYPE_BDE_64; icmnd->bde.buffer_length = sge[0].buffer_length; icmnd->bde.u.data.buffer_address_low = sge[0].buffer_address_low; icmnd->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { icmnd->xbl = TRUE; icmnd->bde.bde_type = SLI4_BDE_TYPE_BLP; icmnd->bde.buffer_length = sgl->size; icmnd->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); icmnd->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } icmnd->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; icmnd->xri_tag = xri; icmnd->context_tag = rpi; icmnd->timer = timeout; icmnd->pu = 2; /* WQE word 4 contains read transfer length */ icmnd->class = SLI4_ELS_REQUEST64_CLASS_3; icmnd->command = SLI4_WQE_FCP_ICMND64; icmnd->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; icmnd->abort_tag = xri; icmnd->request_tag = tag; icmnd->len_loc = 3; if (rnode->node_group) { icmnd->hlm = TRUE; icmnd->remote_n_port_id = rnode->fc_id & 0x00ffffff; } if (((ocs_node_t *)rnode->node)->fcp2device) { icmnd->erp = TRUE; } icmnd->cmd_type = SLI4_CMD_FCP_ICMND64_WQE; icmnd->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Write an FCP_IREAD64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_iread64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint32_t rpi, ocs_remote_node_t *rnode, uint8_t dif, uint8_t bs, uint8_t timeout) { sli4_fcp_iread64_wqe_t *iread = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { iread->xbl = FALSE; iread->dbde = TRUE; iread->bde.bde_type = SLI4_BDE_TYPE_BDE_64; iread->bde.buffer_length = sge[0].buffer_length; iread->bde.u.data.buffer_address_low = sge[0].buffer_address_low; iread->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { iread->xbl = TRUE; iread->bde.bde_type = SLI4_BDE_TYPE_BLP; iread->bde.buffer_length = sgl->size; iread->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); iread->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); /* fill out fcp_cmnd buffer len and change resp buffer to be of type * "skip" (note: response will still be written to sge[1] if necessary) */ iread->fcp_cmd_buffer_length = sge[0].buffer_length; sge[1].sge_type = SLI4_SGE_TYPE_SKIP; } iread->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; iread->total_transfer_length = xfer_len; iread->xri_tag = xri; iread->context_tag = rpi; iread->timer = timeout; iread->pu = 2; /* WQE word 4 contains read transfer length */ iread->class = SLI4_ELS_REQUEST64_CLASS_3; iread->command = SLI4_WQE_FCP_IREAD64; iread->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; iread->dif = dif; iread->bs = bs; iread->abort_tag = xri; iread->request_tag = tag; iread->len_loc = 3; if (rnode->node_group) { iread->hlm = TRUE; iread->remote_n_port_id = rnode->fc_id & 0x00ffffff; } if (((ocs_node_t *)rnode->node)->fcp2device) { iread->erp = TRUE; } iread->iod = 1; iread->cmd_type = SLI4_CMD_FCP_IREAD64_WQE; iread->cq_id = cq_id; if (sli4->config.perf_hint) { iread->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; iread->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; iread->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; iread->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_IWRITE64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param xfer_len Data transfer length. * @param first_burst The number of first burst bytes * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node) * @param dif T10 DIF operation, or 0 to disable * @param bs T10 DIF block size, or 0 if DIF is disabled * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_iwrite64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t xfer_len, uint32_t first_burst, uint16_t xri, uint16_t tag, uint16_t cq_id, uint32_t rpi, ocs_remote_node_t *rnode, uint8_t dif, uint8_t bs, uint8_t timeout) { sli4_fcp_iwrite64_wqe_t *iwrite = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { iwrite->xbl = FALSE; iwrite->dbde = TRUE; iwrite->bde.bde_type = SLI4_BDE_TYPE_BDE_64; iwrite->bde.buffer_length = sge[0].buffer_length; iwrite->bde.u.data.buffer_address_low = sge[0].buffer_address_low; iwrite->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { iwrite->xbl = TRUE; iwrite->bde.bde_type = SLI4_BDE_TYPE_BLP; iwrite->bde.buffer_length = sgl->size; iwrite->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); iwrite->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); /* fill out fcp_cmnd buffer len and change resp buffer to be of type * "skip" (note: response will still be written to sge[1] if necessary) */ iwrite->fcp_cmd_buffer_length = sge[0].buffer_length; sge[1].sge_type = SLI4_SGE_TYPE_SKIP; } iwrite->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; iwrite->total_transfer_length = xfer_len; iwrite->initial_transfer_length = MIN(xfer_len, first_burst); iwrite->xri_tag = xri; iwrite->context_tag = rpi; iwrite->timer = timeout; iwrite->pu = 2; /* WQE word 4 contains read transfer length */ iwrite->class = SLI4_ELS_REQUEST64_CLASS_3; iwrite->command = SLI4_WQE_FCP_IWRITE64; iwrite->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; iwrite->dif = dif; iwrite->bs = bs; iwrite->abort_tag = xri; iwrite->request_tag = tag; iwrite->len_loc = 3; if (rnode->node_group) { iwrite->hlm = TRUE; iwrite->remote_n_port_id = rnode->fc_id & 0x00ffffff; } if (((ocs_node_t *)rnode->node)->fcp2device) { iwrite->erp = TRUE; } iwrite->cmd_type = SLI4_CMD_FCP_IWRITE64_WQE; iwrite->cq_id = cq_id; if (sli4->config.perf_hint) { iwrite->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; iwrite->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; iwrite->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; iwrite->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_TRECEIVE64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the Scatter-Gather List. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param relative_off Relative offset of the IO (if any). * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param xid OX_ID for the exchange. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active. * - AUTO RSP - Automatically generate a good FCP_RSP. * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param csctl value of csctl field. * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) { sli4_fcp_treceive64_wqe_t *trecv = buf; sli4_fcp_128byte_wqe_t *trecv_128 = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { trecv->xbl = FALSE; trecv->dbde = TRUE; trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64; trecv->bde.buffer_length = sge[0].buffer_length; trecv->bde.u.data.buffer_address_low = sge[0].buffer_address_low; trecv->bde.u.data.buffer_address_high = sge[0].buffer_address_high; trecv->payload_offset_length = sge[0].buffer_length; } else { trecv->xbl = TRUE; /* if data is a single physical address, use a BDE */ if (!dif && (xfer_len <= sge[2].buffer_length)) { trecv->dbde = TRUE; trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64; trecv->bde.buffer_length = sge[2].buffer_length; trecv->bde.u.data.buffer_address_low = sge[2].buffer_address_low; trecv->bde.u.data.buffer_address_high = sge[2].buffer_address_high; } else { trecv->bde.bde_type = SLI4_BDE_TYPE_BLP; trecv->bde.buffer_length = sgl->size; trecv->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); trecv->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } } trecv->relative_offset = relative_off; if (flags & SLI4_IO_CONTINUATION) { trecv->xc = TRUE; } trecv->xri_tag = xri; trecv->context_tag = rpi; trecv->pu = TRUE; /* WQE uses relative offset */ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { trecv->ar = TRUE; } trecv->command = SLI4_WQE_FCP_TRECEIVE64; trecv->class = SLI4_ELS_REQUEST64_CLASS_3; trecv->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; trecv->dif = dif; trecv->bs = bs; trecv->remote_xid = xid; trecv->request_tag = tag; trecv->iod = 1; trecv->len_loc = 0x2; if (rnode->node_group) { trecv->hlm = TRUE; trecv->dword5.dword = rnode->fc_id & 0x00ffffff; } trecv->cmd_type = SLI4_CMD_FCP_TRECEIVE64_WQE; trecv->cq_id = cq_id; trecv->fcp_data_receive_length = xfer_len; if (sli4->config.perf_hint) { trecv->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; trecv->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; trecv->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; trecv->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } /* The upper 7 bits of csctl is the priority */ if (csctl & SLI4_MASK_CCP) { trecv->ccpe = 1; trecv->ccp = (csctl & SLI4_MASK_CCP); } if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trecv->eat) { trecv->app_id_valid = 1; trecv->wqes = 1; trecv_128->dw[31] = app_id; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_CONT_TRECEIVE64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the Scatter-Gather List. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param relative_off Relative offset of the IO (if any). * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param sec_xri Secondary XRI for this exchange. (BZ 161832 workaround) * @param tag IO tag value. * @param xid OX_ID for the exchange. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active. * - AUTO RSP - Automatically generate a good FCP_RSP. * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param csctl value of csctl field. * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_cont_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t sec_xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) { int32_t rc; rc = sli_fcp_treceive64_wqe(sli4, buf, size, sgl, first_data_sge, relative_off, xfer_len, xri, tag, cq_id, xid, rpi, rnode, flags, dif, bs, csctl, app_id); if (rc == 0) { sli4_fcp_treceive64_wqe_t *trecv = buf; trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64; trecv->dword5.sec_xri_tag = sec_xri; } return rc; } /** * @ingroup sli_fc * @brief Write an FCP_TRSP64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the Scatter-Gather List. * @param rsp_len Response data length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param xid OX_ID for the exchange. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active * - AUTO RSP - Automatically generate a good FCP_RSP. * @param csctl value of csctl field. * @param port_owned 0/1 to indicate if the XRI is port owned (used to set XBL=0) * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_trsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t csctl, uint8_t port_owned, uint32_t app_id) { sli4_fcp_trsp64_wqe_t *trsp = buf; sli4_fcp_128byte_wqe_t *trsp_128 = buf; ocs_memset(buf, 0, size); if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { trsp->ag = TRUE; /* * The SLI-4 documentation states that the BDE is ignored when * using auto-good response, but, at least for IF_TYPE 0 devices, * this does not appear to be true. */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { trsp->bde.buffer_length = 12; /* byte size of RSP */ } } else { sli4_sge_t *sge = sgl->virt; if (sli4->config.sgl_pre_registered || port_owned) { trsp->dbde = TRUE; } else { trsp->xbl = TRUE; } trsp->bde.bde_type = SLI4_BDE_TYPE_BDE_64; trsp->bde.buffer_length = sge[0].buffer_length; trsp->bde.u.data.buffer_address_low = sge[0].buffer_address_low; trsp->bde.u.data.buffer_address_high = sge[0].buffer_address_high; trsp->fcp_response_length = rsp_len; } if (flags & SLI4_IO_CONTINUATION) { trsp->xc = TRUE; } if (rnode->node_group) { trsp->hlm = TRUE; trsp->dword5 = rnode->fc_id & 0x00ffffff; } trsp->xri_tag = xri; trsp->rpi = rpi; trsp->command = SLI4_WQE_FCP_TRSP64; trsp->class = SLI4_ELS_REQUEST64_CLASS_3; trsp->remote_xid = xid; trsp->request_tag = tag; trsp->dnrx = ((flags & SLI4_IO_DNRX) == 0 ? 0 : 1); trsp->len_loc = 0x1; trsp->cq_id = cq_id; trsp->cmd_type = SLI4_CMD_FCP_TRSP64_WQE; /* The upper 7 bits of csctl is the priority */ if (csctl & SLI4_MASK_CCP) { trsp->ccpe = 1; trsp->ccp = (csctl & SLI4_MASK_CCP); } if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trsp->eat) { trsp->app_id_valid = 1; trsp->wqes = 1; trsp_128->dw[31] = app_id; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_TSEND64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param relative_off Relative offset of the IO (if any). * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param xid OX_ID for the exchange. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active. * - AUTO RSP - Automatically generate a good FCP_RSP. * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param csctl value of csctl field. * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_tsend64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) { sli4_fcp_tsend64_wqe_t *tsend = buf; sli4_fcp_128byte_wqe_t *tsend_128 = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { tsend->xbl = FALSE; tsend->dbde = TRUE; tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64; /* TSEND64_WQE specifies first two SGE are skipped * (i.e. 3rd is valid) */ tsend->bde.buffer_length = sge[2].buffer_length; tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low; tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high; } else { tsend->xbl = TRUE; /* if data is a single physical address, use a BDE */ if (!dif && (xfer_len <= sge[2].buffer_length)) { tsend->dbde = TRUE; tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64; /* TSEND64_WQE specifies first two SGE are skipped * (i.e. 3rd is valid) */ tsend->bde.buffer_length = sge[2].buffer_length; tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low; tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high; } else { tsend->bde.bde_type = SLI4_BDE_TYPE_BLP; tsend->bde.buffer_length = sgl->size; tsend->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); tsend->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } } tsend->relative_offset = relative_off; if (flags & SLI4_IO_CONTINUATION) { tsend->xc = TRUE; } tsend->xri_tag = xri; tsend->rpi = rpi; tsend->pu = TRUE; /* WQE uses relative offset */ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { tsend->ar = TRUE; } tsend->command = SLI4_WQE_FCP_TSEND64; tsend->class = SLI4_ELS_REQUEST64_CLASS_3; tsend->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; tsend->dif = dif; tsend->bs = bs; tsend->remote_xid = xid; tsend->request_tag = tag; tsend->len_loc = 0x2; if (rnode->node_group) { tsend->hlm = TRUE; tsend->dword5 = rnode->fc_id & 0x00ffffff; } tsend->cq_id = cq_id; tsend->cmd_type = SLI4_CMD_FCP_TSEND64_WQE; tsend->fcp_data_transmit_length = xfer_len; if (sli4->config.perf_hint) { tsend->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; tsend->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; tsend->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; tsend->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } /* The upper 7 bits of csctl is the priority */ if (csctl & SLI4_MASK_CCP) { tsend->ccpe = 1; tsend->ccp = (csctl & SLI4_MASK_CCP); } if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !tsend->eat) { tsend->app_id_valid = 1; tsend->wqes = 1; tsend_128->dw[31] = app_id; } return 0; } /** * @ingroup sli_fc * @brief Write a GEN_REQUEST64 work queue entry. * * @note This WQE is only used to send FC-CT commands. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the request. * @param req_len Length of request. * @param max_rsp_len Max length of response. * @param timeout Time, in seconds, before an IO times out. Zero means infinite. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rnode Destination of request (that is, the remote node). * @param r_ctl R_CTL value for sequence. * @param type TYPE value for sequence. * @param df_ctl DF_CTL value for sequence. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_gen_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint8_t r_ctl, uint8_t type, uint8_t df_ctl) { sli4_gen_request64_wqe_t *gen = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { gen->xbl = FALSE; gen->dbde = TRUE; gen->bde.bde_type = SLI4_BDE_TYPE_BDE_64; gen->bde.buffer_length = req_len; gen->bde.u.data.buffer_address_low = sge[0].buffer_address_low; gen->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { gen->xbl = TRUE; gen->bde.bde_type = SLI4_BDE_TYPE_BLP; gen->bde.buffer_length = 2 * sizeof(sli4_sge_t); gen->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); gen->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } gen->request_payload_length = req_len; gen->max_response_payload_length = max_rsp_len; gen->df_ctl = df_ctl; gen->type = type; gen->r_ctl = r_ctl; gen->xri_tag = xri; gen->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; gen->context_tag = rnode->indicator; gen->class = SLI4_ELS_REQUEST64_CLASS_3; gen->command = SLI4_WQE_GEN_REQUEST64; gen->timer = timeout; gen->request_tag = tag; gen->iod = SLI4_ELS_REQUEST64_DIR_READ; gen->qosd = TRUE; if (rnode->node_group) { gen->hlm = TRUE; gen->remote_n_port_id = rnode->fc_id & 0x00ffffff; } gen->cmd_type = SLI4_CMD_GEN_REQUEST64_WQE; gen->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Write a SEND_FRAME work queue entry * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sof Start of frame value * @param eof End of frame value * @param hdr Pointer to FC header data * @param payload DMA memory for the payload. * @param req_len Length of payload. * @param timeout Time, in seconds, before an IO times out. Zero means infinite. * @param xri XRI for this exchange. * @param req_tag IO tag value. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr, ocs_dma_t *payload, uint32_t req_len, uint8_t timeout, uint16_t xri, uint16_t req_tag) { sli4_send_frame_wqe_t *sf = buf; ocs_memset(buf, 0, size); sf->dbde = TRUE; sf->bde.buffer_length = req_len; sf->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); sf->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); /* Copy FC header */ sf->fc_header_0_1[0] = hdr[0]; sf->fc_header_0_1[1] = hdr[1]; sf->fc_header_2_5[0] = hdr[2]; sf->fc_header_2_5[1] = hdr[3]; sf->fc_header_2_5[2] = hdr[4]; sf->fc_header_2_5[3] = hdr[5]; sf->frame_length = req_len; sf->xri_tag = xri; sf->pu = 0; sf->context_tag = 0; sf->ct = 0; sf->command = SLI4_WQE_SEND_FRAME; sf->class = SLI4_ELS_REQUEST64_CLASS_3; sf->timer = timeout; sf->request_tag = req_tag; sf->eof = eof; sf->sof = sof; sf->qosd = 0; sf->lenloc = 1; sf->xc = 0; sf->xbl = 1; sf->cmd_type = SLI4_CMD_SEND_FRAME_WQE; sf->cq_id = 0xffff; return 0; } /** * @ingroup sli_fc * @brief Write a XMIT_SEQUENCE64 work queue entry. * * This WQE is used to send FC-CT response frames. * * @note This API implements a restricted use for this WQE, a TODO: would * include passing in sequence initiative, and full SGL's * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param payload DMA memory for the request. * @param payload_len Length of request. * @param timeout Time, in seconds, before an IO times out. Zero means infinite. * @param ox_id originator exchange ID * @param xri XRI for this exchange. * @param tag IO tag value. * @param rnode Destination of request (that is, the remote node). * @param r_ctl R_CTL value for sequence. * @param type TYPE value for sequence. * @param df_ctl DF_CTL value for sequence. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_xmit_sequence64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload, uint32_t payload_len, uint8_t timeout, uint16_t ox_id, uint16_t xri, uint16_t tag, ocs_remote_node_t *rnode, uint8_t r_ctl, uint8_t type, uint8_t df_ctl) { sli4_xmit_sequence64_wqe_t *xmit = buf; ocs_memset(buf, 0, size); if ((payload == NULL) || (payload->virt == NULL)) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", payload, payload ? payload->virt : NULL); return -1; } if (sli4->config.sgl_pre_registered) { xmit->dbde = TRUE; } else { xmit->xbl = TRUE; } xmit->bde.bde_type = SLI4_BDE_TYPE_BDE_64; xmit->bde.buffer_length = payload_len; xmit->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); xmit->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); xmit->sequence_payload_len = payload_len; xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff; xmit->relative_offset = 0; xmit->si = 0; /* sequence initiative - this matches what is seen from * FC switches in response to FCGS commands */ xmit->ft = 0; /* force transmit */ xmit->xo = 0; /* exchange responder */ xmit->ls = 1; /* last in seqence */ xmit->df_ctl = df_ctl; xmit->type = type; xmit->r_ctl = r_ctl; xmit->xri_tag = xri; xmit->context_tag = rnode->indicator; xmit->dif = 0; xmit->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; xmit->bs = 0; xmit->command = SLI4_WQE_XMIT_SEQUENCE64; xmit->class = SLI4_ELS_REQUEST64_CLASS_3; xmit->pu = 0; xmit->timer = timeout; xmit->abort_tag = 0; xmit->request_tag = tag; xmit->remote_xid = ox_id; xmit->iod = SLI4_ELS_REQUEST64_DIR_READ; if (rnode->node_group) { xmit->hlm = TRUE; xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff; } xmit->cmd_type = SLI4_CMD_XMIT_SEQUENCE64_WQE; xmit->len_loc = 2; xmit->cq_id = 0xFFFF; return 0; } /** * @ingroup sli_fc * @brief Write a REQUEUE_XRI_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_requeue_xri_wqe(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint16_t tag, uint16_t cq_id) { sli4_requeue_xri_wqe_t *requeue = buf; ocs_memset(buf, 0, size); requeue->command = SLI4_WQE_REQUEUE_XRI; requeue->xri_tag = xri; requeue->request_tag = tag; requeue->xc = 1; requeue->qosd = 1; requeue->cq_id = cq_id; requeue->cmd_type = SLI4_CMD_REQUEUE_XRI_WQE; return 0; } int32_t sli_xmit_bcast64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload, uint32_t payload_len, uint8_t timeout, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint8_t r_ctl, uint8_t type, uint8_t df_ctl) { sli4_xmit_bcast64_wqe_t *bcast = buf; /* Command requires a temporary RPI (i.e. unused remote node) */ if (rnode->attached) { ocs_log_test(sli4->os, "remote node %d in use\n", rnode->indicator); return -1; } ocs_memset(buf, 0, size); bcast->dbde = TRUE; bcast->sequence_payload.bde_type = SLI4_BDE_TYPE_BDE_64; bcast->sequence_payload.buffer_length = payload_len; bcast->sequence_payload.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); bcast->sequence_payload.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); bcast->sequence_payload_length = payload_len; bcast->df_ctl = df_ctl; bcast->type = type; bcast->r_ctl = r_ctl; bcast->xri_tag = xri; bcast->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; bcast->context_tag = rnode->sport->indicator; bcast->class = SLI4_ELS_REQUEST64_CLASS_3; bcast->command = SLI4_WQE_XMIT_BCAST64; bcast->timer = timeout; bcast->request_tag = tag; bcast->temporary_rpi = rnode->indicator; bcast->len_loc = 0x1; bcast->iod = SLI4_ELS_REQUEST64_DIR_WRITE; bcast->cmd_type = SLI4_CMD_XMIT_BCAST64_WQE; bcast->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Write an XMIT_BLS_RSP64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param payload Contents of the BLS payload to be sent. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rnode Destination of request (that is, the remote node). * @param s_id Source ID to use in the response. If UINT32_MAX, use SLI Port's ID. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_xmit_bls_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, sli_bls_payload_t *payload, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint32_t s_id) { sli4_xmit_bls_rsp_wqe_t *bls = buf; /* * Callers can either specify RPI or S_ID, but not both */ if (rnode->attached && (s_id != UINT32_MAX)) { ocs_log_test(sli4->os, "S_ID specified for attached remote node %d\n", rnode->indicator); return -1; } ocs_memset(buf, 0, size); if (SLI_BLS_ACC == payload->type) { bls->payload_word0 = (payload->u.acc.seq_id_last << 16) | (payload->u.acc.seq_id_validity << 24); bls->high_seq_cnt = payload->u.acc.high_seq_cnt; bls->low_seq_cnt = payload->u.acc.low_seq_cnt; } else if (SLI_BLS_RJT == payload->type) { bls->payload_word0 = *((uint32_t *)&payload->u.rjt); bls->ar = TRUE; } else { ocs_log_test(sli4->os, "bad BLS type %#x\n", payload->type); return -1; } bls->ox_id = payload->ox_id; bls->rx_id = payload->rx_id; if (rnode->attached) { bls->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; bls->context_tag = rnode->indicator; } else { bls->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; bls->context_tag = rnode->sport->indicator; if (UINT32_MAX != s_id) { bls->local_n_port_id = s_id & 0x00ffffff; } else { bls->local_n_port_id = rnode->sport->fc_id & 0x00ffffff; } bls->remote_id = rnode->fc_id & 0x00ffffff; bls->temporary_rpi = rnode->indicator; } bls->xri_tag = xri; bls->class = SLI4_ELS_REQUEST64_CLASS_3; bls->command = SLI4_WQE_XMIT_BLS_RSP; bls->request_tag = tag; bls->qosd = TRUE; if (rnode->node_group) { bls->hlm = TRUE; bls->remote_id = rnode->fc_id & 0x00ffffff; } bls->cq_id = cq_id; bls->cmd_type = SLI4_CMD_XMIT_BLS_RSP64_WQE; return 0; } /** * @ingroup sli_fc * @brief Write a XMIT_ELS_RSP64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param rsp DMA memory for the ELS response. * @param rsp_len Length of ELS response, in bytes. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param ox_id OX_ID of the exchange containing the request. * @param rnode Destination of the ELS response (that is, the remote node). * @param flags Optional attributes, including: * - SLI4_IO_CONTINUATION - IO is already active. * @param s_id S_ID used for special responses. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_xmit_els_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *rsp, uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t ox_id, ocs_remote_node_t *rnode, uint32_t flags, uint32_t s_id) { sli4_xmit_els_rsp64_wqe_t *els = buf; ocs_memset(buf, 0, size); if (sli4->config.sgl_pre_registered) { els->dbde = TRUE; } else { els->xbl = TRUE; } els->els_response_payload.bde_type = SLI4_BDE_TYPE_BDE_64; els->els_response_payload.buffer_length = rsp_len; els->els_response_payload.u.data.buffer_address_low = ocs_addr32_lo(rsp->phys); els->els_response_payload.u.data.buffer_address_high = ocs_addr32_hi(rsp->phys); els->els_response_payload_length = rsp_len; els->xri_tag = xri; els->class = SLI4_ELS_REQUEST64_CLASS_3; els->command = SLI4_WQE_ELS_RSP64; els->request_tag = tag; els->ox_id = ox_id; els->iod = SLI4_ELS_REQUEST64_DIR_WRITE; els->qosd = TRUE; if (flags & SLI4_IO_CONTINUATION) { els->xc = TRUE; } if (rnode->attached) { els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; els->context_tag = rnode->indicator; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; els->remote_id = rnode->fc_id & 0x00ffffff; els->temporary_rpi = rnode->indicator; if (UINT32_MAX != s_id) { els->sp = TRUE; els->s_id = s_id & 0x00ffffff; } } if (rnode->node_group) { els->hlm = TRUE; els->remote_id = rnode->fc_id & 0x00ffffff; } els->cmd_type = SLI4_ELS_REQUEST64_CMD_GEN; els->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Process an asynchronous Link State event entry. * * @par Description * Parses Asynchronous Completion Queue Entry (ACQE), * creates an abstracted event, and calls registered callback functions. * * @param sli4 SLI context. * @param acqe Pointer to the ACQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_process_link_state(sli4_t *sli4, void *acqe) { sli4_link_state_t *link_state = acqe; sli4_link_event_t event = { 0 }; int32_t rc = 0; if (!sli4->link) { /* bail if there is no callback */ return 0; } if (SLI4_LINK_TYPE_ETHERNET == link_state->link_type) { event.topology = SLI_LINK_TOPO_NPORT; event.medium = SLI_LINK_MEDIUM_ETHERNET; } else { /* TODO is this supported for anything other than FCoE? */ ocs_log_test(sli4->os, "unsupported link type %#x\n", link_state->link_type); event.topology = SLI_LINK_TOPO_MAX; event.medium = SLI_LINK_MEDIUM_MAX; rc = -1; } switch (link_state->port_link_status) { case SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN: case SLI4_PORT_LINK_STATUS_LOGICAL_DOWN: event.status = SLI_LINK_STATUS_DOWN; break; case SLI4_PORT_LINK_STATUS_PHYSICAL_UP: case SLI4_PORT_LINK_STATUS_LOGICAL_UP: event.status = SLI_LINK_STATUS_UP; break; default: ocs_log_test(sli4->os, "unsupported link status %#x\n", link_state->port_link_status); event.status = SLI_LINK_STATUS_MAX; rc = -1; } switch (link_state->port_speed) { case 0: event.speed = 0; break; case 1: event.speed = 10; break; case 2: event.speed = 100; break; case 3: event.speed = 1000; break; case 4: event.speed = 10000; break; case 5: event.speed = 20000; break; case 6: event.speed = 25000; break; case 7: event.speed = 40000; break; case 8: event.speed = 100000; break; default: ocs_log_test(sli4->os, "unsupported port_speed %#x\n", link_state->port_speed); rc = -1; } sli4->link(sli4->link_arg, (void *)&event); return rc; } /** * @ingroup sli_fc * @brief Process an asynchronous Link Attention event entry. * * @par Description * Parses Asynchronous Completion Queue Entry (ACQE), * creates an abstracted event, and calls the registered callback functions. * * @param sli4 SLI context. * @param acqe Pointer to the ACQE. * * @todo XXX all events return LINK_UP. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_process_link_attention(sli4_t *sli4, void *acqe) { sli4_link_attention_t *link_attn = acqe; sli4_link_event_t event = { 0 }; ocs_log_debug(sli4->os, "link_number=%d attn_type=%#x topology=%#x port_speed=%#x " "port_fault=%#x shared_link_status=%#x logical_link_speed=%#x " "event_tag=%#x\n", link_attn->link_number, link_attn->attn_type, link_attn->topology, link_attn->port_speed, link_attn->port_fault, link_attn->shared_link_status, link_attn->logical_link_speed, link_attn->event_tag); if (!sli4->link) { return 0; } event.medium = SLI_LINK_MEDIUM_FC; switch (link_attn->attn_type) { case SLI4_LINK_ATTN_TYPE_LINK_UP: event.status = SLI_LINK_STATUS_UP; break; case SLI4_LINK_ATTN_TYPE_LINK_DOWN: event.status = SLI_LINK_STATUS_DOWN; break; case SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA: ocs_log_debug(sli4->os, "attn_type: no hard alpa\n"); event.status = SLI_LINK_STATUS_NO_ALPA; break; default: ocs_log_test(sli4->os, "attn_type: unknown\n"); break; } switch (link_attn->event_type) { case SLI4_FC_EVENT_LINK_ATTENTION: break; case SLI4_FC_EVENT_SHARED_LINK_ATTENTION: ocs_log_debug(sli4->os, "event_type: FC shared link event \n"); break; default: ocs_log_test(sli4->os, "event_type: unknown\n"); break; } switch (link_attn->topology) { case SLI4_LINK_ATTN_P2P: event.topology = SLI_LINK_TOPO_NPORT; break; case SLI4_LINK_ATTN_FC_AL: event.topology = SLI_LINK_TOPO_LOOP; break; case SLI4_LINK_ATTN_INTERNAL_LOOPBACK: ocs_log_debug(sli4->os, "topology Internal loopback\n"); event.topology = SLI_LINK_TOPO_LOOPBACK_INTERNAL; break; case SLI4_LINK_ATTN_SERDES_LOOPBACK: ocs_log_debug(sli4->os, "topology serdes loopback\n"); event.topology = SLI_LINK_TOPO_LOOPBACK_EXTERNAL; break; default: ocs_log_test(sli4->os, "topology: unknown\n"); break; } event.speed = link_attn->port_speed * 1000; sli4->link(sli4->link_arg, (void *)&event); return 0; } /** * @ingroup sli_fc * @brief Parse an FC/FCoE work queue CQ entry. * * @param sli4 SLI context. * @param cq CQ to process. * @param cqe Pointer to the CQ entry. * @param etype CQ event type. * @param r_id Resource ID associated with this completion message (such as the IO tag). * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_cqe_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype, uint16_t *r_id) { uint8_t code = cqe[SLI4_CQE_CODE_OFFSET]; int32_t rc = -1; switch (code) { case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION: { sli4_fc_wcqe_t *wcqe = (void *)cqe; *etype = SLI_QENTRY_WQ; *r_id = wcqe->request_tag; rc = wcqe->status; /* Flag errors except for FCP_RSP_FAILURE */ if (rc && (rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE)) { ocs_log_test(sli4->os, "WCQE: status=%#x hw_status=%#x tag=%#x w1=%#x w2=%#x xb=%d\n", wcqe->status, wcqe->hw_status, wcqe->request_tag, wcqe->wqe_specific_1, wcqe->wqe_specific_2, wcqe->xb); ocs_log_test(sli4->os, " %08X %08X %08X %08X\n", ((uint32_t*) cqe)[0], ((uint32_t*) cqe)[1], ((uint32_t*) cqe)[2], ((uint32_t*) cqe)[3]); } /* TODO: need to pass additional status back out of here as well * as status (could overload rc as status/addlstatus are only 8 bits each) */ break; } case SLI4_CQE_CODE_RQ_ASYNC: { sli4_fc_async_rcqe_t *rcqe = (void *)cqe; *etype = SLI_QENTRY_RQ; *r_id = rcqe->rq_id; rc = rcqe->status; break; } case SLI4_CQE_CODE_RQ_ASYNC_V1: { sli4_fc_async_rcqe_v1_t *rcqe = (void *)cqe; *etype = SLI_QENTRY_RQ; *r_id = rcqe->rq_id; rc = rcqe->status; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: { sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe; *etype = SLI_QENTRY_OPT_WRITE_CMD; *r_id = optcqe->rq_id; rc = optcqe->status; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA: { sli4_fc_optimized_write_data_cqe_t *dcqe = (void *)cqe; *etype = SLI_QENTRY_OPT_WRITE_DATA; *r_id = dcqe->xri; rc = dcqe->status; /* Flag errors */ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) { ocs_log_test(sli4->os, "Optimized DATA CQE: status=%#x hw_status=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n", dcqe->status, dcqe->hw_status, dcqe->xri, dcqe->total_data_placed, ((uint32_t*) cqe)[3], dcqe->xb); } break; } case SLI4_CQE_CODE_RQ_COALESCING: { sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe; *etype = SLI_QENTRY_RQ; *r_id = rcqe->rq_id; rc = rcqe->status; break; } case SLI4_CQE_CODE_XRI_ABORTED: { sli4_fc_xri_aborted_cqe_t *xa = (void *)cqe; *etype = SLI_QENTRY_XABT; *r_id = xa->xri; rc = 0; break; } case SLI4_CQE_CODE_RELEASE_WQE: { sli4_fc_wqec_t *wqec = (void*) cqe; *etype = SLI_QENTRY_WQ_RELEASE; *r_id = wqec->wq_id; rc = 0; break; } default: ocs_log_test(sli4->os, "CQE completion code %d not handled\n", code); *etype = SLI_QENTRY_MAX; *r_id = UINT16_MAX; } return rc; } /** * @ingroup sli_fc * @brief Return the ELS/CT response length. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * * @return Returns the length, in bytes. */ uint32_t sli_fc_response_length(sli4_t *sli4, uint8_t *cqe) { sli4_fc_wcqe_t *wcqe = (void *)cqe; return wcqe->wqe_specific_1; } /** * @ingroup sli_fc * @brief Return the FCP IO length. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * * @return Returns the length, in bytes. */ uint32_t sli_fc_io_length(sli4_t *sli4, uint8_t *cqe) { sli4_fc_wcqe_t *wcqe = (void *)cqe; return wcqe->wqe_specific_1; } /** * @ingroup sli_fc * @brief Retrieve the D_ID from the completion. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * @param d_id Pointer where the D_ID is written. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_els_did(sli4_t *sli4, uint8_t *cqe, uint32_t *d_id) { sli4_fc_wcqe_t *wcqe = (void *)cqe; *d_id = 0; if (wcqe->status) { return -1; } else { *d_id = wcqe->wqe_specific_2 & 0x00ffffff; return 0; } } uint32_t sli_fc_ext_status(sli4_t *sli4, uint8_t *cqe) { sli4_fc_wcqe_t *wcqe = (void *)cqe; uint32_t mask; switch (wcqe->status) { case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: mask = UINT32_MAX; break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: case SLI4_FC_WCQE_STATUS_CMD_REJECT: mask = 0xff; break; case SLI4_FC_WCQE_STATUS_NPORT_RJT: case SLI4_FC_WCQE_STATUS_FABRIC_RJT: case SLI4_FC_WCQE_STATUS_NPORT_BSY: case SLI4_FC_WCQE_STATUS_FABRIC_BSY: case SLI4_FC_WCQE_STATUS_LS_RJT: mask = UINT32_MAX; break; case SLI4_FC_WCQE_STATUS_DI_ERROR: mask = UINT32_MAX; break; default: mask = 0; } return wcqe->wqe_specific_2 & mask; } /** * @ingroup sli_fc * @brief Retrieve the RQ index from the completion. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * @param rq_id Pointer where the rq_id is written. * @param index Pointer where the index is written. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_rqe_rqid_and_index(sli4_t *sli4, uint8_t *cqe, uint16_t *rq_id, uint32_t *index) { sli4_fc_async_rcqe_t *rcqe = (void *)cqe; sli4_fc_async_rcqe_v1_t *rcqe_v1 = (void *)cqe; int32_t rc = -1; uint8_t code = 0; *rq_id = 0; *index = UINT32_MAX; code = cqe[SLI4_CQE_CODE_OFFSET]; if (code == SLI4_CQE_CODE_RQ_ASYNC) { *rq_id = rcqe->rq_id; if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) { *index = rcqe->rq_element_index; rc = 0; } else { *index = rcqe->rq_element_index; rc = rcqe->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length); } } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) { *rq_id = rcqe_v1->rq_id; if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe_v1->status) { *index = rcqe_v1->rq_element_index; rc = 0; } else { *index = rcqe_v1->rq_element_index; rc = rcqe_v1->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status), rcqe_v1->rq_id, rcqe_v1->rq_element_index, rcqe_v1->payload_data_placement_length, rcqe_v1->sof_byte, rcqe_v1->eof_byte, rcqe_v1->header_data_placement_length); } } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) { sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe; *rq_id = optcqe->rq_id; if (SLI4_FC_ASYNC_RQ_SUCCESS == optcqe->status) { *index = optcqe->rq_element_index; rc = 0; } else { *index = optcqe->rq_element_index; rc = optcqe->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x hdpl=%x oox=%d agxr=%d xri=0x%x rpi=0x%x\n", optcqe->status, sli_fc_get_status_string(optcqe->status), optcqe->rq_id, optcqe->rq_element_index, optcqe->payload_data_placement_length, optcqe->header_data_placement_length, optcqe->oox, optcqe->agxr, optcqe->xri, optcqe->rpi); } } else if (code == SLI4_CQE_CODE_RQ_COALESCING) { sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe; *rq_id = rcqe->rq_id; if (SLI4_FC_COALESCE_RQ_SUCCESS == rcqe->status) { *index = rcqe->rq_element_index; rc = 0; } else { *index = UINT32_MAX; rc = rcqe->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x rq_id=%#x sdpl=%x\n", rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id, rcqe->rq_element_index, rcqe->rq_id, rcqe->sequence_reporting_placement_length); } } else { *index = UINT32_MAX; rc = rcqe->status; ocs_log_debug(sli4->os, "status=%02x rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", rcqe->status, rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length); } return rc; } /** * @ingroup sli_fc * @brief Process an asynchronous FCoE event entry. * * @par Description * Parses Asynchronous Completion Queue Entry (ACQE), * creates an abstracted event, and calls the registered callback functions. * * @param sli4 SLI context. * @param acqe Pointer to the ACQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_process_fcoe(sli4_t *sli4, void *acqe) { sli4_fcoe_fip_t *fcoe = acqe; sli4_fip_event_t event = { 0 }; uint32_t mask = UINT32_MAX; ocs_log_debug(sli4->os, "ACQE FCoE FIP type=%02x count=%d tag=%#x\n", fcoe->event_type, fcoe->fcf_count, fcoe->event_tag); if (!sli4->fip) { return 0; } event.type = fcoe->event_type; event.index = UINT32_MAX; switch (fcoe->event_type) { case SLI4_FCOE_FIP_FCF_DISCOVERED: ocs_log_debug(sli4->os, "FCF Discovered index=%d\n", fcoe->event_information); break; case SLI4_FCOE_FIP_FCF_TABLE_FULL: ocs_log_debug(sli4->os, "FCF Table Full\n"); mask = 0; break; case SLI4_FCOE_FIP_FCF_DEAD: ocs_log_debug(sli4->os, "FCF Dead/Gone index=%d\n", fcoe->event_information); break; case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: mask = UINT16_MAX; ocs_log_debug(sli4->os, "Clear VLINK Received VPI=%#x\n", fcoe->event_information & mask); break; case SLI4_FCOE_FIP_FCF_MODIFIED: ocs_log_debug(sli4->os, "FCF Modified\n"); break; default: ocs_log_test(sli4->os, "bad FCoE type %#x", fcoe->event_type); mask = 0; } if (mask != 0) { event.index = fcoe->event_information & mask; } sli4->fip(sli4->fip_arg, &event); return 0; } /** * @ingroup sli_fc * @brief Allocate a receive queue. * * @par Description * Allocates DMA memory and configures the requested queue type. * * @param sli4 SLI context. * @param q Pointer to the queue object for the header. * @param n_entries Number of entries to allocate. * @param buffer_size buffer size for the queue. * @param cq Associated CQ. * @param ulp The ULP to bind * @param is_hdr Used to validate the rq_id and set the type of queue * * @return Returns 0 on success, or -1 on failure. */ int32_t sli_fc_rq_alloc(sli4_t *sli4, sli4_queue_t *q, uint32_t n_entries, uint32_t buffer_size, sli4_queue_t *cq, uint16_t ulp, uint8_t is_hdr) { int32_t (*rq_create)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t); if ((sli4 == NULL) || (q == NULL)) { void *os = sli4 != NULL ? sli4->os : NULL; ocs_log_err(os, "bad parameter sli4=%p q=%p\n", sli4, q); return -1; } if (__sli_queue_init(sli4, q, SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE, n_entries, SLI_PAGE_SIZE)) { return -1; } if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) { rq_create = sli_cmd_fcoe_rq_create; } else { rq_create = sli_cmd_fcoe_rq_create_v1; } if (rq_create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, cq->id, ulp, buffer_size)) { if (__sli_create_queue(sli4, q)) { ocs_dma_free(sli4->os, &q->dma); return -1; } if (is_hdr && q->id & 1) { ocs_log_test(sli4->os, "bad header RQ_ID %d\n", q->id); ocs_dma_free(sli4->os, &q->dma); return -1; } else if (!is_hdr && (q->id & 1) == 0) { ocs_log_test(sli4->os, "bad data RQ_ID %d\n", q->id); ocs_dma_free(sli4->os, &q->dma); return -1; } } else { return -1; } q->u.flag.is_hdr = is_hdr; if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { q->u.flag.rq_batch = TRUE; } return 0; } /** * @ingroup sli_fc * @brief Allocate a receive queue set. * * @param sli4 SLI context. * @param num_rq_pairs to create * @param qs Pointers to the queue objects for both header and data. * Length of this arrays should be 2 * num_rq_pairs * @param base_cq_id. Assumes base_cq_id : (base_cq_id + num_rq_pairs) cqs as allotted. * @param n_entries number of entries in each RQ queue. * @param header_buffer_size * @param payload_buffer_size * @param ulp The ULP to bind * * @return Returns 0 on success, or -1 on failure. */ int32_t sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs, sli4_queue_t *qs[], uint32_t base_cq_id, uint32_t n_entries, uint32_t header_buffer_size, uint32_t payload_buffer_size, uint16_t ulp) { uint32_t i, p, offset = 0; uint32_t payload_size, total_page_count = 0; uintptr_t addr; ocs_dma_t dma; sli4_res_common_create_queue_set_t *rsp = NULL; sli4_req_fcoe_rq_create_v2_t *req = NULL; + + ocs_memset(&dma, 0, sizeof(dma)); for (i = 0; i < (num_rq_pairs * 2); i++) { if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE, n_entries, SLI_PAGE_SIZE)) { goto error; } } total_page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rq_pairs * 2; /* Payload length must accommodate both request and response */ payload_size = max((sizeof(sli4_req_fcoe_rq_create_v1_t) + (8 * total_page_count)), sizeof(sli4_res_common_create_queue_set_t)); if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) { ocs_log_err(sli4->os, "DMA allocation failed\n"); goto error; } ocs_memset(dma.virt, 0, payload_size); if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, payload_size, &dma) == -1) { goto error; } req = (sli4_req_fcoe_rq_create_v2_t *)((uint8_t *)dma.virt); /* Fill Header fields */ req->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; req->hdr.version = 2; req->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v2_t) - sizeof(sli4_req_hdr_t) + (8 * total_page_count); /* Fill Payload fields */ req->dnb = TRUE; req->num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); req->rqe_count = qs[0]->dma.size / SLI4_FCOE_RQE_SIZE; req->rqe_size = SLI4_FCOE_RQE_SIZE_8; req->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096; req->rq_count = num_rq_pairs * 2; req->base_cq_id = base_cq_id; req->hdr_buffer_size = header_buffer_size; req->payload_buffer_size = payload_buffer_size; for (i = 0; i < (num_rq_pairs * 2); i++) { for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += SLI_PAGE_SIZE) { req->page_physical_address[offset].low = ocs_addr32_lo(addr); req->page_physical_address[offset].high = ocs_addr32_hi(addr); offset++; } } if (sli_bmbx_command(sli4)){ ocs_log_crit(sli4->os, "bootstrap mailbox write faild RQSet\n"); goto error; } rsp = (void *)((uint8_t *)dma.virt); if (rsp->hdr.status) { ocs_log_err(sli4->os, "bad create RQSet status=%#x addl=%#x\n", rsp->hdr.status, rsp->hdr.additional_status); goto error; } else { for (i = 0; i < (num_rq_pairs * 2); i++) { qs[i]->id = i + rsp->q_id; if ((qs[i]->id & 1) == 0) { qs[i]->u.flag.is_hdr = TRUE; } else { qs[i]->u.flag.is_hdr = FALSE; } qs[i]->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off; qs[i]->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset; } } ocs_dma_free(sli4->os, &dma); return 0; error: for (i = 0; i < (num_rq_pairs * 2); i++) { if (qs[i]->dma.size) { ocs_dma_free(sli4->os, &qs[i]->dma); } } if (dma.size) { ocs_dma_free(sli4->os, &dma); } return -1; } /** * @ingroup sli_fc * @brief Get the RPI resource requirements. * * @param sli4 SLI context. * @param n_rpi Number of RPIs desired. * * @return Returns the number of bytes needed. This value may be zero. */ uint32_t sli_fc_get_rpi_requirements(sli4_t *sli4, uint32_t n_rpi) { uint32_t bytes = 0; /* Check if header templates needed */ if (sli4->config.hdr_template_req) { /* round up to a page */ bytes = SLI_ROUND_PAGE(n_rpi * SLI4_FCOE_HDR_TEMPLATE_SIZE); } return bytes; } /** * @ingroup sli_fc * @brief Return a text string corresponding to a CQE status value * * @param status Status value * * @return Returns corresponding string, otherwise "unknown" */ const char * sli_fc_get_status_string(uint32_t status) { static struct { uint32_t code; const char *label; } lookup[] = { {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"}, {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"}, {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"}, {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"}, {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"}, {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"}, {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"}, {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"}, {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"}, {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"}, {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"}, {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"}, {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"}, {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"}, {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"}, {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"}, {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"}, {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"}, }; uint32_t i; for (i = 0; i < ARRAY_SIZE(lookup); i++) { if (status == lookup[i].code) { return lookup[i].label; } } return "unknown"; }