diff --git a/sys/dev/ocs_fc/ocs_hw.c b/sys/dev/ocs_fc/ocs_hw.c index 8fcf31b21514..65fc01f62846 100644 --- a/sys/dev/ocs_fc/ocs_hw.c +++ b/sys/dev/ocs_fc/ocs_hw.c @@ -1,12524 +1,12686 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Defines and implements the Hardware Abstraction Layer (HW). * All interaction with the hardware is performed through the HW, which abstracts * the details of the underlying SLI-4 implementation. */ /** * @defgroup devInitShutdown Device Initialization and Shutdown * @defgroup domain Domain Functions * @defgroup port Port Functions * @defgroup node Remote Node Functions * @defgroup io IO Functions * @defgroup interrupt Interrupt handling * @defgroup os OS Required Functions */ #include "ocs.h" #include "ocs_os.h" #include "ocs_hw.h" #include "ocs_hw_queues.h" #define OCS_HW_MQ_DEPTH 128 #define OCS_HW_READ_FCF_SIZE 4096 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256 #define OCS_HW_WQ_TIMER_PERIOD_MS 500 /* values used for setting the auto xfer rdy parameters */ #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */ #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0 #define OCS_HW_REQUE_XRI_REGTAG 65534 /* max command and response buffer lengths -- arbitrary at the moment */ #define OCS_HW_DMTF_CLP_CMD_MAX 256 #define OCS_HW_DMTF_CLP_RSP_MAX 256 /* HW global data */ ocs_hw_global_t hw_global; static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t); static void ocs_hw_adjust_wqs(ocs_hw_t *hw); static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw); static int32_t ocs_hw_cb_link(void *, void *); static int32_t ocs_hw_cb_fip(void *, void *); static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t); static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *); static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *); static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *); static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *); static int32_t ocs_hw_flush(ocs_hw_t *); static int32_t ocs_hw_command_cancel(ocs_hw_t *); static int32_t ocs_hw_io_cancel(ocs_hw_t *); static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io); static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *); static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *); static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg); static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len); typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg); static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *); static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg); static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license); static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw); static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw); static void ocs_hw_io_free_internal(void *arg); static void ocs_hw_io_free_port_owned(void *arg); static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf); static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint); static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status); static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t); static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw); static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable); /* HW domain database operations */ static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *); static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *); /* Port state machine */ static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); /* Domain state machine */ static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data); static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *); static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); /* BZ 161832 */ static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw); /* WQE timeouts */ static void target_wqe_timer_cb(void *arg); static void shutdown_target_wqe_timer(ocs_hw_t *hw); static inline void ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) { if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) { /* * Active WQE list currently only used for * target WQE timeouts. */ ocs_lock(&hw->io_lock); ocs_list_add_tail(&hw->io_timed_wqe, io); io->submit_ticks = ocs_get_os_ticks(); ocs_unlock(&hw->io_lock); } } static inline void ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) { if (hw->config.emulate_tgt_wqe_timeout) { /* * If target wqe timeouts are enabled, * remove from active wqe list. */ ocs_lock(&hw->io_lock); if (ocs_list_on_list(&io->wqe_link)) { ocs_list_remove(&hw->io_timed_wqe, io); } ocs_unlock(&hw->io_lock); } } static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type) { switch (io_type) { case OCS_HW_IO_INITIATOR_READ: case OCS_HW_IO_INITIATOR_WRITE: case OCS_HW_IO_INITIATOR_NODATA: case OCS_HW_FC_CT: case OCS_HW_ELS_REQ: return 1; default: return 0; } } static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb) { /* if exchange not active, nothing to abort */ if (!xb) { return FALSE; } if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) { switch (ext) { /* exceptions where abort is not needed */ case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */ return FALSE; default: break; } } return TRUE; } /** * @brief Determine the number of chutes on the device. * * @par Description * Some devices require queue resources allocated per protocol processor * (chute). This function returns the number of chutes on this device. * * @param hw Hardware context allocated by the caller. * * @return Returns the number of chutes on the device for protocol. */ static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw) { uint32_t num_chutes = 1; if (sli_get_is_dual_ulp_capable(&hw->sli) && sli_get_is_ulp_enabled(&hw->sli, 0) && sli_get_is_ulp_enabled(&hw->sli, 1)) { num_chutes = 2; } return num_chutes; } static ocs_hw_rtn_e ocs_hw_link_event_init(ocs_hw_t *hw) { ocs_hw_assert(hw); hw->link.status = SLI_LINK_STATUS_MAX; hw->link.topology = SLI_LINK_TOPO_NONE; hw->link.medium = SLI_LINK_MEDIUM_MAX; hw->link.speed = 0; hw->link.loop_map = NULL; hw->link.fc_id = UINT32_MAX; return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief If this is physical port 0, then read the max dump size. * * @par Description * Queries the FW for the maximum dump size * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ static ocs_hw_rtn_e ocs_hw_read_max_dump_size(ocs_hw_t *hw) { uint8_t buf[SLI4_BMBX_SIZE]; uint8_t bus, dev, func; int rc; /* lancer only */ if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) && (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(&hw->sli))) { ocs_log_debug(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } /* * Make sure the FW is new enough to support this command. If the FW * is too old, the FW will UE. */ if (hw->workaround.disable_dump_loc) { ocs_log_test(hw->os, "FW version is too old for this feature\n"); return OCS_HW_RTN_ERROR; } /* attempt to detemine the dump size for function 0 only. */ ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); if (func == 0) { if (sli_cmd_common_set_dump_location(&hw->sli, buf, SLI4_BMBX_SIZE, 1, 0, NULL, 0)) { sli4_res_common_set_dump_location_t *rsp = (sli4_res_common_set_dump_location_t *) (buf + offsetof(sli4_cmd_sli_config_t, payload.embed)); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "set dump location command failed\n"); return rc; } else { hw->dump_size = rsp->buffer_length; ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length); } } } return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Set up the Hardware Abstraction Layer module. * * @par Description * Calls set up to configure the hardware. * * @param hw Hardware context allocated by the caller. * @param os Device abstraction. * @param port_type Protocol type of port, such as FC and NIC. * * @todo Why is port_type a parameter? * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type) { uint32_t i; char prop_buf[32]; if (hw == NULL) { ocs_log_err(os, "bad parameter(s) hw=%p\n", hw); return OCS_HW_RTN_ERROR; } if (hw->hw_setup_called) { /* Setup run-time workarounds. * Call for each setup, to allow for hw_war_version */ ocs_hw_workaround_setup(hw); return OCS_HW_RTN_SUCCESS; } /* * ocs_hw_init() relies on NULL pointers indicating that a structure * needs allocation. If a structure is non-NULL, ocs_hw_init() won't * free/realloc that memory */ ocs_memset(hw, 0, sizeof(ocs_hw_t)); hw->hw_setup_called = TRUE; hw->os = os; ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os)); ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link); ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link); hw->cmd_head_count = 0; ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os)); ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os)); ocs_atomic_init(&hw->io_alloc_failed_count, 0); hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; hw->config.dif_seed = 0; hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT; hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT; hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT; hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT; if (sli_setup(&hw->sli, hw->os, port_type)) { ocs_log_err(hw->os, "SLI setup failed\n"); return OCS_HW_RTN_ERROR; } ocs_memset(hw->domains, 0, sizeof(hw->domains)); ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); ocs_hw_link_event_init(hw); sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw); sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw); /* * Set all the queue sizes to the maximum allowed. These values may * be changes later by the adjust and workaround functions. */ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) { hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i); } /* * The RQ assignment for RQ pair mode. */ hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD; hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) { hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0); } /* by default, enable initiator-only auto-ABTS emulation */ hw->config.i_only_aab = TRUE; /* Setup run-time workarounds */ ocs_hw_workaround_setup(hw); /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { hw->first_domain_idx = -1; } /* Must be done after the workaround setup */ if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))) { (void)ocs_hw_read_max_dump_size(hw); } /* calculate the number of WQs required. */ ocs_hw_adjust_wqs(hw); /* Set the default dif mode */ if (! sli_is_dif_inline_capable(&hw->sli)) { ocs_log_test(hw->os, "not inline capable, setting mode to separate\n"); hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE; } /* Workaround: BZ 161832 */ if (hw->workaround.use_dif_sec_xri) { ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link); } /* * Figure out the starting and max ULP to spread the WQs across the * ULPs. */ if (sli_get_is_dual_ulp_capable(&hw->sli)) { if (sli_get_is_ulp_enabled(&hw->sli, 0) && sli_get_is_ulp_enabled(&hw->sli, 1)) { hw->ulp_start = 0; hw->ulp_max = 1; } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) { hw->ulp_start = 0; hw->ulp_max = 0; } else { hw->ulp_start = 1; hw->ulp_max = 1; } } else { if (sli_get_is_ulp_enabled(&hw->sli, 0)) { hw->ulp_start = 0; hw->ulp_max = 0; } else { hw->ulp_start = 1; hw->ulp_max = 1; } } ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n", hw->ulp_start, hw->ulp_max); hw->config.queue_topology = hw_global.queue_topology_string; hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology); hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ]; hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ]; hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ]; hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ]; hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ]; /* Verify qtop configuration against driver supported configuration */ if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) { ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n", OCE_HW_MAX_NUM_MRQ_PAIRS); return OCS_HW_RTN_ERROR; } if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) { ocs_log_crit(hw->os, "Max supported EQs = %d\n", OCS_HW_MAX_NUM_EQ); return OCS_HW_RTN_ERROR; } if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) { ocs_log_crit(hw->os, "Max supported CQs = %d\n", OCS_HW_MAX_NUM_CQ); return OCS_HW_RTN_ERROR; } if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { ocs_log_crit(hw->os, "Max supported WQs = %d\n", OCS_HW_MAX_NUM_WQ); return OCS_HW_RTN_ERROR; } if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) { ocs_log_crit(hw->os, "Max supported MQs = %d\n", OCS_HW_MAX_NUM_MQ); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Allocate memory structures to prepare for the device operation. * * @par Description * Allocates memory structures needed by the device and prepares the device * for operation. * @n @n @b Note: This function may be called more than once (for example, at * initialization and then after a reset), but the size of the internal resources * may not be changed without tearing down the HW (ocs_hw_teardown()). * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_init(ocs_hw_t *hw) { ocs_hw_rtn_e rc; uint32_t i = 0; uint8_t buf[SLI4_BMBX_SIZE]; uint32_t max_rpi; int rem_count; int written_size = 0; uint32_t count; char prop_buf[32]; uint32_t ramdisc_blocksize = 512; uint32_t q_count = 0; /* * Make sure the command lists are empty. If this is start-of-day, * they'll be empty since they were just initialized in ocs_hw_setup. * If we've just gone through a reset, the command and command pending * lists should have been cleaned up as part of the reset (ocs_hw_reset()). */ ocs_lock(&hw->cmd_lock); if (!ocs_list_empty(&hw->cmd_head)) { ocs_log_test(hw->os, "command found on cmd list\n"); ocs_unlock(&hw->cmd_lock); return OCS_HW_RTN_ERROR; } if (!ocs_list_empty(&hw->cmd_pending)) { ocs_log_test(hw->os, "command found on pending list\n"); ocs_unlock(&hw->cmd_lock); return OCS_HW_RTN_ERROR; } ocs_unlock(&hw->cmd_lock); /* Free RQ buffers if prevously allocated */ ocs_hw_rx_free(hw); /* * The IO queues must be initialized here for the reset case. The * ocs_hw_init_io() function will re-add the IOs to the free list. * The cmd_head list should be OK since we free all entries in * ocs_hw_command_cancel() that is called in the ocs_hw_reset(). */ /* If we are in this function due to a reset, there may be stale items * on lists that need to be removed. Clean them up. */ rem_count=0; if (ocs_list_valid(&hw->io_wait_free)) { while ((!ocs_list_empty(&hw->io_wait_free))) { rem_count++; ocs_list_remove_head(&hw->io_wait_free); } if (rem_count > 0) { ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count); } } rem_count=0; if (ocs_list_valid(&hw->io_inuse)) { while ((!ocs_list_empty(&hw->io_inuse))) { rem_count++; ocs_list_remove_head(&hw->io_inuse); } if (rem_count > 0) { ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count); } } rem_count=0; if (ocs_list_valid(&hw->io_free)) { while ((!ocs_list_empty(&hw->io_free))) { rem_count++; ocs_list_remove_head(&hw->io_free); } if (rem_count > 0) { ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count); } } if (ocs_list_valid(&hw->io_port_owned)) { while ((!ocs_list_empty(&hw->io_port_owned))) { ocs_list_remove_head(&hw->io_port_owned); } } ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link); ocs_list_init(&hw->io_free, ocs_hw_io_t, link); ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link); ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link); ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link); ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link); /* If MRQ not required, Make sure we dont request feature. */ if (hw->config.n_rq == 1) { hw->sli.config.features.flag.mrqp = FALSE; } if (sli_init(&hw->sli)) { ocs_log_err(hw->os, "SLI failed to initialize\n"); return OCS_HW_RTN_ERROR; } /* * Enable the auto xfer rdy feature if requested. */ hw->auto_xfer_rdy_enabled = FALSE; if (sli_get_auto_xfer_rdy_capable(&hw->sli) && hw->config.auto_xfer_rdy_size > 0) { if (hw->config.esoc){ if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); } written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize); } else { written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size); } if (written_size) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "config auto xfer rdy failed\n"); return rc; } } hw->auto_xfer_rdy_enabled = TRUE; if (hw->config.auto_xfer_rdy_t10_enable) { rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n"); return rc; } } } if(hw->sliport_healthcheck) { rc = ocs_hw_config_sli_port_health_check(hw, 0, 1); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "Enabling Sliport Health check failed \n"); return rc; } } /* * Set FDT transfer hint, only works on Lancer */ if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) { /* * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature. */ ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT); } /* * Verify that we have not exceeded any queue sizes */ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ), OCS_HW_MAX_NUM_EQ); if (hw->config.n_eq > q_count) { ocs_log_err(hw->os, "requested %d EQ but %d allowed\n", hw->config.n_eq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ), OCS_HW_MAX_NUM_CQ); if (hw->config.n_cq > q_count) { ocs_log_err(hw->os, "requested %d CQ but %d allowed\n", hw->config.n_cq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ), OCS_HW_MAX_NUM_MQ); if (hw->config.n_mq > q_count) { ocs_log_err(hw->os, "requested %d MQ but %d allowed\n", hw->config.n_mq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ), OCS_HW_MAX_NUM_RQ); if (hw->config.n_rq > q_count) { ocs_log_err(hw->os, "requested %d RQ but %d allowed\n", hw->config.n_rq, q_count); return OCS_HW_RTN_ERROR; } q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ), OCS_HW_MAX_NUM_WQ); if (hw->config.n_wq > q_count) { ocs_log_err(hw->os, "requested %d WQ but %d allowed\n", hw->config.n_wq, q_count); return OCS_HW_RTN_ERROR; } /* zero the hashes */ ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n", OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE); ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n", OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE); ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n", OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE); rc = ocs_hw_init_queues(hw, hw->qtop); if (rc != OCS_HW_RTN_SUCCESS) { return rc; } max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi); if (i) { ocs_dma_t payload_memory; rc = OCS_HW_RTN_ERROR; if (hw->rnode_mem.size) { ocs_dma_free(hw->os, &hw->rnode_mem); } if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) { ocs_log_err(hw->os, "remote node memory allocation fail\n"); return OCS_HW_RTN_NO_MEMORY; } payload_memory.size = 0; if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->rnode_mem, UINT16_MAX, &payload_memory)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (payload_memory.size != 0) { /* The command was non-embedded - need to free the dma buffer */ ocs_dma_free(hw->os, &payload_memory); } } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "header template registration failed\n"); return rc; } } /* Allocate and post RQ buffers */ rc = ocs_hw_rx_allocate(hw); if (rc) { ocs_log_err(hw->os, "rx_allocate failed\n"); return rc; } /* Populate hw->seq_free_list */ if (hw->seq_pool == NULL) { uint32_t count = 0; uint32_t i; /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */ for (i = 0; i < hw->hw_rq_count; i++) { count += hw->hw_rq[i]->entry_count; } hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count); if (hw->seq_pool == NULL) { ocs_log_err(hw->os, "malloc seq_pool failed\n"); return OCS_HW_RTN_NO_MEMORY; } } if(ocs_hw_rx_post(hw)) { ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n"); } /* Allocate rpi_ref if not previously allocated */ if (hw->rpi_ref == NULL) { hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref), OCS_M_ZERO | OCS_M_NOWAIT); if (hw->rpi_ref == NULL) { ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i); return OCS_HW_RTN_NO_MEMORY; } } for (i = 0; i < max_rpi; i ++) { ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0); ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0); } ocs_memset(hw->domains, 0, sizeof(hw->domains)); /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { hw->first_domain_idx = -1; } ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); /* Register a FCFI to allow unsolicited frames to be routed to the driver */ if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { if (hw->hw_mrq_count) { ocs_log_debug(hw->os, "using REG_FCFI MRQ\n"); rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n"); return rc; } rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n"); return rc; } } else { sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; ocs_log_debug(hw->os, "using REG_FCFI standard\n"); /* Set the filter match/mask values from hw's filter_def values */ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { rq_cfg[i].rq_id = 0xffff; rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); } /* * Update the rq_id's of the FCF configuration (don't update more than the number * of rq_cfg elements) */ for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) { hw_rq_t *rq = hw->hw_rq[i]; uint32_t j; for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) { uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1; if (mask & (1U << j)) { rq_cfg[j].rq_id = rq->hdr->id; ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n", j, hw->config.filter_def[j], i, rq->hdr->id); } } } rc = OCS_HW_RTN_ERROR; if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "FCFI registration failed\n"); return rc; } hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi; } } /* * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits, * thus the pool allocation size of 64k) */ rc = ocs_hw_reqtag_init(hw); if (rc) { ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc); return rc; } rc = ocs_hw_setup_io(hw); if (rc) { ocs_log_err(hw->os, "IO allocation failure\n"); return rc; } rc = ocs_hw_init_io(hw); if (rc) { ocs_log_err(hw->os, "IO initialization failure\n"); return rc; } ocs_queue_history_init(hw->os, &hw->q_hist); /* get hw link config; polling, so callback will be called immediately */ hw->linkcfg = OCS_HW_LINKCFG_NA; ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw); /* if lancer ethernet, ethernet ports need to be enabled */ if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) { if (ocs_hw_set_eth_license(hw, hw->eth_license)) { /* log warning but continue */ ocs_log_err(hw->os, "Failed to set ethernet license\n"); } } /* Set the DIF seed - only for lancer right now */ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) && ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "Failed to set DIF seed value\n"); return rc; } /* Set the DIF mode - skyhawk only */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) && sli_get_dif_capable(&hw->sli)) { rc = ocs_hw_set_dif_mode(hw); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "Failed to set DIF mode value\n"); return rc; } } /* * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries */ for (i = 0; i < hw->eq_count; i++) { sli_queue_arm(&hw->sli, &hw->eq[i], TRUE); } /* * Initialize RQ hash */ for (i = 0; i < hw->rq_count; i++) { ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); } /* * Initialize WQ hash */ for (i = 0; i < hw->wq_count; i++) { ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); } /* * Arming the CQ allows (e.g.) MQ completions to write CQ entries */ for (i = 0; i < hw->cq_count; i++) { ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); sli_queue_arm(&hw->sli, &hw->cq[i], TRUE); } /* record the fact that the queues are functional */ hw->state = OCS_HW_STATE_ACTIVE; /* Note: Must be after the IOs are setup and the state is active*/ if (ocs_hw_rqpair_init(hw)) { ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n"); } /* finally kick off periodic timer to check for timed out target WQEs */ if (hw->config.emulate_tgt_wqe_timeout) { ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS); } /* * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there * are none of those, allocate one for WQ[0] */ if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) { for (i = 0; i < count; i++) { hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]); wq->send_frame_io = ocs_hw_io_alloc(hw); if (wq->send_frame_io == NULL) { ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); } } } else { hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw); if (hw->hw_wq[0]->send_frame_io == NULL) { ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); } } /* Initialize send frame frame sequence id */ ocs_atomic_init(&hw->send_frame_seq_id, 0); /* Initialize watchdog timer if enabled by user */ hw->expiration_logged = 0; if(hw->watchdog_timeout) { if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) { ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n"); }else if(!ocs_hw_config_watchdog_timer(hw)) { ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout); } } if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) { ocs_log_err(hw->os, "domain node memory allocation fail\n"); return OCS_HW_RTN_NO_MEMORY; } if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) { ocs_log_err(hw->os, "domain fcf memory allocation fail\n"); return OCS_HW_RTN_NO_MEMORY; } if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map, SLI4_MIN_LOOP_MAP_BYTES, 4)) { ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size); } return OCS_HW_RTN_SUCCESS; } /** * @brief Configure Multi-RQ * * @param hw Hardware context allocated by the caller. * @param mode 1 to set MRQ filters and 0 to set FCFI index * @param vlanid valid in mode 0 * @param fcf_index valid in mode 0 * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index) { uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0; hw_rq_t *rq; sli4_cmd_reg_fcfi_mrq_t *rsp = NULL; uint32_t i, j; sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; int32_t rc; if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { goto issue_cmd; } /* Set the filter match/mask values from hw's filter_def values */ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { rq_filter[i].rq_id = 0xffff; rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); } /* Accumulate counts for each filter type used, build rq_ids[] list */ for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) { if (rq->filter_mask & (1U << j)) { if (rq_filter[j].rq_id != 0xffff) { /* Already used. Bailout ifts not RQset case */ if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) { ocs_log_err(hw->os, "Wrong queue topology.\n"); return OCS_HW_RTN_ERROR; } continue; } if (rq->is_mrq) { rq_filter[j].rq_id = rq->base_mrq_id; mrq_bitmask |= (1U << j); } else { rq_filter[j].rq_id = rq->hdr->id; } } } } issue_cmd: /* Invoke REG_FCFI_MRQ */ rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, /* buf */ SLI4_BMBX_SIZE, /* size */ mode, /* mode 1 */ fcf_index, /* fcf_index */ vlanid, /* vlan_id */ hw->config.rq_selection_policy, /* RQ selection policy*/ mrq_bitmask, /* MRQ bitmask */ hw->hw_mrq_count, /* num_mrqs */ rq_filter); /* RQ filter */ if (rc == 0) { ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc); return OCS_HW_RTN_ERROR; } rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf; if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) { ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n", rsp->hdr.command, rsp->hdr.status); return OCS_HW_RTN_ERROR; } if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { hw->fcf_indicator = rsp->fcfi; } return 0; } /** * @brief Callback function for getting linkcfg during HW initialization. * * @param status Status of the linkcfg get operation. * @param value Link configuration enum to which the link configuration is set. * @param arg Callback argument (ocs_hw_t *). * * @return None. */ static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg) { ocs_hw_t *hw = (ocs_hw_t *)arg; if (status == 0) { hw->linkcfg = (ocs_hw_linkcfg_e)value; } else { hw->linkcfg = OCS_HW_LINKCFG_NA; } ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg); } /** * @ingroup devInitShutdown * @brief Tear down the Hardware Abstraction Layer module. * * @par Description * Frees memory structures needed by the device, and shuts down the device. Does * not free the HW context memory (which is done by the caller). * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_teardown(ocs_hw_t *hw) { uint32_t i = 0; uint32_t iters = 10;/*XXX*/ uint32_t max_rpi; uint32_t destroy_queues; uint32_t free_memory; if (!hw) { ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw); return OCS_HW_RTN_ERROR; } destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE); free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED); /* shutdown target wqe timer */ shutdown_target_wqe_timer(hw); /* Cancel watchdog timer if enabled */ if(hw->watchdog_timeout) { hw->watchdog_timeout = 0; ocs_hw_config_watchdog_timer(hw); } /* Cancel Sliport Healthcheck */ if(hw->sliport_healthcheck) { hw->sliport_healthcheck = 0; ocs_hw_config_sli_port_health_check(hw, 0, 0); } if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) { hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; ocs_hw_flush(hw); /* If there are outstanding commands, wait for them to complete */ while (!ocs_list_empty(&hw->cmd_head) && iters) { ocs_udelay(10000); ocs_hw_flush(hw); iters--; } if (ocs_list_empty(&hw->cmd_head)) { ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); } else { ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); } /* Cancel any remaining commands */ ocs_hw_command_cancel(hw); } else { hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; } ocs_lock_free(&hw->cmd_lock); /* Free unregistered RPI if workaround is in force */ if (hw->workaround.use_unregistered_rpi) { sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid); } max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); if (hw->rpi_ref) { for (i = 0; i < max_rpi; i++) { if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) { ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n", i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count)); } } ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref)); hw->rpi_ref = NULL; } ocs_dma_free(hw->os, &hw->rnode_mem); if (hw->io) { for (i = 0; i < hw->config.n_io; i++) { if (hw->io[i] && (hw->io[i]->sgl != NULL) && (hw->io[i]->sgl->virt != NULL)) { if(hw->io[i]->is_port_owned) { ocs_lock_free(&hw->io[i]->axr_lock); } ocs_dma_free(hw->os, hw->io[i]->sgl); } ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); hw->io[i] = NULL; } ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size); hw->wqe_buffs = NULL; ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *)); hw->io = NULL; } ocs_dma_free(hw->os, &hw->xfer_rdy); ocs_dma_free(hw->os, &hw->dump_sges); ocs_dma_free(hw->os, &hw->loop_map); ocs_lock_free(&hw->io_lock); ocs_lock_free(&hw->io_abort_lock); for (i = 0; i < hw->wq_count; i++) { sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory); } for (i = 0; i < hw->rq_count; i++) { sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory); } for (i = 0; i < hw->mq_count; i++) { sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory); } for (i = 0; i < hw->cq_count; i++) { sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory); } for (i = 0; i < hw->eq_count; i++) { sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory); } ocs_hw_qtop_free(hw->qtop); /* Free rq buffers */ ocs_hw_rx_free(hw); hw_queue_teardown(hw); ocs_hw_rqpair_teardown(hw); if (sli_teardown(&hw->sli)) { ocs_log_err(hw->os, "SLI teardown failed\n"); } ocs_queue_history_free(&hw->q_hist); /* record the fact that the queues are non-functional */ hw->state = OCS_HW_STATE_UNINITIALIZED; /* free sequence free pool */ ocs_array_free(hw->seq_pool); hw->seq_pool = NULL; /* free hw_wq_callback pool */ ocs_pool_free(hw->wq_reqtag_pool); ocs_dma_free(hw->os, &hw->domain_dmem); ocs_dma_free(hw->os, &hw->fcf_dmem); /* Mark HW setup as not having been called */ hw->hw_setup_called = FALSE; return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset) { uint32_t i; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t iters; ocs_hw_state_e prev_state = hw->state; if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_test(hw->os, "HW state %d is not active\n", hw->state); } hw->state = OCS_HW_STATE_RESET_IN_PROGRESS; /* shutdown target wqe timer */ shutdown_target_wqe_timer(hw); ocs_hw_flush(hw); /* * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM), * then the FW will UE when the reset is issued. So attempt to complete * all mailbox commands. */ iters = 10; while (!ocs_list_empty(&hw->cmd_head) && iters) { ocs_udelay(10000); ocs_hw_flush(hw); iters--; } if (ocs_list_empty(&hw->cmd_head)) { ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); } else { ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); } /* Reset the chip */ switch(reset) { case OCS_HW_RESET_FUNCTION: ocs_log_debug(hw->os, "issuing function level reset\n"); if (sli_reset(&hw->sli)) { ocs_log_err(hw->os, "sli_reset failed\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_RESET_FIRMWARE: ocs_log_debug(hw->os, "issuing firmware reset\n"); if (sli_fw_reset(&hw->sli)) { ocs_log_err(hw->os, "sli_soft_reset failed\n"); rc = OCS_HW_RTN_ERROR; } /* * Because the FW reset leaves the FW in a non-running state, * follow that with a regular reset. */ ocs_log_debug(hw->os, "issuing function level reset\n"); if (sli_reset(&hw->sli)) { ocs_log_err(hw->os, "sli_reset failed\n"); rc = OCS_HW_RTN_ERROR; } break; default: ocs_log_test(hw->os, "unknown reset type - no reset performed\n"); hw->state = prev_state; return OCS_HW_RTN_ERROR; } /* Not safe to walk command/io lists unless they've been initialized */ if (prev_state != OCS_HW_STATE_UNINITIALIZED) { ocs_hw_command_cancel(hw); /* Clean up the inuse list, the free list and the wait free list */ ocs_hw_io_cancel(hw); ocs_memset(hw->domains, 0, sizeof(hw->domains)); ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); ocs_hw_link_event_init(hw); ocs_lock(&hw->io_lock); /* The io lists should be empty, but remove any that didn't get cleaned up. */ while (!ocs_list_empty(&hw->io_timed_wqe)) { ocs_list_remove_head(&hw->io_timed_wqe); } /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */ while (!ocs_list_empty(&hw->io_free)) { ocs_list_remove_head(&hw->io_free); } while (!ocs_list_empty(&hw->io_wait_free)) { ocs_list_remove_head(&hw->io_wait_free); } /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */ ocs_hw_reqtag_reset(hw); ocs_unlock(&hw->io_lock); } if (prev_state != OCS_HW_STATE_UNINITIALIZED) { for (i = 0; i < hw->wq_count; i++) { sli_queue_reset(&hw->sli, &hw->wq[i]); } for (i = 0; i < hw->rq_count; i++) { sli_queue_reset(&hw->sli, &hw->rq[i]); } for (i = 0; i < hw->hw_rq_count; i++) { hw_rq_t *rq = hw->hw_rq[i]; if (rq->rq_tracker != NULL) { uint32_t j; for (j = 0; j < rq->entry_count; j++) { rq->rq_tracker[j] = NULL; } } } for (i = 0; i < hw->mq_count; i++) { sli_queue_reset(&hw->sli, &hw->mq[i]); } for (i = 0; i < hw->cq_count; i++) { sli_queue_reset(&hw->sli, &hw->cq[i]); } for (i = 0; i < hw->eq_count; i++) { sli_queue_reset(&hw->sli, &hw->eq[i]); } /* Free rq buffers */ ocs_hw_rx_free(hw); /* Teardown the HW queue topology */ hw_queue_teardown(hw); } else { /* Free rq buffers */ ocs_hw_rx_free(hw); } /* * Re-apply the run-time workarounds after clearing the SLI config * fields in sli_reset. */ ocs_hw_workaround_setup(hw); hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; return rc; } int32_t ocs_hw_get_num_eq(ocs_hw_t *hw) { return hw->eq_count; } static int32_t ocs_hw_get_fw_timed_out(ocs_hw_t *hw) { /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf * No further explanation is given in the document. * */ return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 && sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10); } ocs_hw_rtn_e ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; int32_t tmp; if (!value) { return OCS_HW_RTN_ERROR; } *value = 0; switch (prop) { case OCS_HW_N_IO: *value = hw->config.n_io; break; case OCS_HW_N_SGL: *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED); break; case OCS_HW_MAX_IO: *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); break; case OCS_HW_MAX_NODES: *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); break; case OCS_HW_MAX_RQ_ENTRIES: *value = hw->num_qentries[SLI_QTYPE_RQ]; break; case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: *value = hw->config.rq_default_buffer_size; break; case OCS_HW_AUTO_XFER_RDY_CAPABLE: *value = sli_get_auto_xfer_rdy_capable(&hw->sli); break; case OCS_HW_AUTO_XFER_RDY_XRI_CNT: *value = hw->config.auto_xfer_rdy_xri_cnt; break; case OCS_HW_AUTO_XFER_RDY_SIZE: *value = hw->config.auto_xfer_rdy_size; break; case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: switch (hw->config.auto_xfer_rdy_blk_size_chip) { case 0: *value = 512; break; case 1: *value = 1024; break; case 2: *value = 2048; break; case 3: *value = 4096; break; case 4: *value = 520; break; default: *value = 0; rc = OCS_HW_RTN_ERROR; break; } break; case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: *value = hw->config.auto_xfer_rdy_t10_enable; break; case OCS_HW_AUTO_XFER_RDY_P_TYPE: *value = hw->config.auto_xfer_rdy_p_type; break; case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: *value = hw->config.auto_xfer_rdy_ref_tag_is_lba; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: *value = hw->config.auto_xfer_rdy_app_tag_valid; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: *value = hw->config.auto_xfer_rdy_app_tag_value; break; case OCS_HW_MAX_SGE: *value = sli_get_max_sge(&hw->sli); break; case OCS_HW_MAX_SGL: *value = sli_get_max_sgl(&hw->sli); break; case OCS_HW_TOPOLOGY: /* * Infer link.status based on link.speed. * Report OCS_HW_TOPOLOGY_NONE if the link is down. */ if (hw->link.speed == 0) { *value = OCS_HW_TOPOLOGY_NONE; break; } switch (hw->link.topology) { case SLI_LINK_TOPO_NPORT: *value = OCS_HW_TOPOLOGY_NPORT; break; case SLI_LINK_TOPO_LOOP: *value = OCS_HW_TOPOLOGY_LOOP; break; case SLI_LINK_TOPO_NONE: *value = OCS_HW_TOPOLOGY_NONE; break; default: ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology); rc = OCS_HW_RTN_ERROR; break; } break; case OCS_HW_CONFIG_TOPOLOGY: *value = hw->config.topology; break; case OCS_HW_LINK_SPEED: *value = hw->link.speed; break; case OCS_HW_LINK_CONFIG_SPEED: switch (hw->config.speed) { case FC_LINK_SPEED_10G: *value = 10000; break; case FC_LINK_SPEED_AUTO_16_8_4: *value = 0; break; case FC_LINK_SPEED_2G: *value = 2000; break; case FC_LINK_SPEED_4G: *value = 4000; break; case FC_LINK_SPEED_8G: *value = 8000; break; case FC_LINK_SPEED_16G: *value = 16000; break; case FC_LINK_SPEED_32G: *value = 32000; break; default: ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed); rc = OCS_HW_RTN_ERROR; break; } break; case OCS_HW_IF_TYPE: *value = sli_get_if_type(&hw->sli); break; case OCS_HW_SLI_REV: *value = sli_get_sli_rev(&hw->sli); break; case OCS_HW_SLI_FAMILY: *value = sli_get_sli_family(&hw->sli); break; case OCS_HW_DIF_CAPABLE: *value = sli_get_dif_capable(&hw->sli); break; case OCS_HW_DIF_SEED: *value = hw->config.dif_seed; break; case OCS_HW_DIF_MODE: *value = hw->config.dif_mode; break; case OCS_HW_DIF_MULTI_SEPARATE: /* Lancer supports multiple DIF separates */ if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { *value = TRUE; } else { *value = FALSE; } break; case OCS_HW_DUMP_MAX_SIZE: *value = hw->dump_size; break; case OCS_HW_DUMP_READY: *value = sli_dump_is_ready(&hw->sli); break; case OCS_HW_DUMP_PRESENT: *value = sli_dump_is_present(&hw->sli); break; case OCS_HW_RESET_REQUIRED: tmp = sli_reset_required(&hw->sli); if(tmp < 0) { rc = OCS_HW_RTN_ERROR; } else { *value = tmp; } break; case OCS_HW_FW_ERROR: *value = sli_fw_error_status(&hw->sli); break; case OCS_HW_FW_READY: *value = sli_fw_ready(&hw->sli); break; case OCS_HW_FW_TIMED_OUT: *value = ocs_hw_get_fw_timed_out(hw); break; case OCS_HW_HIGH_LOGIN_MODE: *value = sli_get_hlm_capable(&hw->sli); break; case OCS_HW_PREREGISTER_SGL: *value = sli_get_sgl_preregister_required(&hw->sli); break; case OCS_HW_HW_REV1: *value = sli_get_hw_revision(&hw->sli, 0); break; case OCS_HW_HW_REV2: *value = sli_get_hw_revision(&hw->sli, 1); break; case OCS_HW_HW_REV3: *value = sli_get_hw_revision(&hw->sli, 2); break; case OCS_HW_LINKCFG: *value = hw->linkcfg; break; case OCS_HW_ETH_LICENSE: *value = hw->eth_license; break; case OCS_HW_LINK_MODULE_TYPE: *value = sli_get_link_module_type(&hw->sli); break; case OCS_HW_NUM_CHUTES: *value = ocs_hw_get_num_chutes(hw); break; case OCS_HW_DISABLE_AR_TGT_DIF: *value = hw->workaround.disable_ar_tgt_dif; break; case OCS_HW_EMULATE_I_ONLY_AAB: *value = hw->config.i_only_aab; break; case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: *value = hw->config.emulate_tgt_wqe_timeout; break; case OCS_HW_VPD_LEN: *value = sli_get_vpd_len(&hw->sli); break; case OCS_HW_SGL_CHAINING_CAPABLE: *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported; break; case OCS_HW_SGL_CHAINING_ALLOWED: /* * SGL Chaining is allowed in the following cases: * 1. Lancer with host SGL Lists * 2. Skyhawk with pre-registered SGL Lists */ *value = FALSE; if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && !sli_get_sgl_preregister(&hw->sli) && SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { *value = TRUE; } if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && sli_get_sgl_preregister(&hw->sli) && ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { *value = TRUE; } break; case OCS_HW_SGL_CHAINING_HOST_ALLOCATED: /* Only lancer supports host allocated SGL Chaining buffers. */ *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli))); break; case OCS_HW_SEND_FRAME_CAPABLE: if (hw->workaround.ignore_send_frame) { *value = 0; } else { /* Only lancer is capable */ *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH; } break; case OCS_HW_RQ_SELECTION_POLICY: *value = hw->config.rq_selection_policy; break; case OCS_HW_RR_QUANTA: *value = hw->config.rr_quanta; break; case OCS_HW_MAX_VPORTS: *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI); break; default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); rc = OCS_HW_RTN_ERROR; } return rc; } void * ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop) { void *rc = NULL; switch (prop) { case OCS_HW_WWN_NODE: rc = sli_get_wwn_node(&hw->sli); break; case OCS_HW_WWN_PORT: rc = sli_get_wwn_port(&hw->sli); break; case OCS_HW_VPD: /* make sure VPD length is non-zero */ if (sli_get_vpd_len(&hw->sli)) { rc = sli_get_vpd(&hw->sli); } break; case OCS_HW_FW_REV: rc = sli_get_fw_name(&hw->sli, 0); break; case OCS_HW_FW_REV2: rc = sli_get_fw_name(&hw->sli, 1); break; case OCS_HW_IPL: rc = sli_get_ipl_name(&hw->sli); break; case OCS_HW_PORTNUM: rc = sli_get_portnum(&hw->sli); break; case OCS_HW_BIOS_VERSION_STRING: rc = sli_get_bios_version_string(&hw->sli); break; default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); } return rc; } ocs_hw_rtn_e ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; switch (prop) { case OCS_HW_N_IO: if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) || value == 0) { ocs_log_test(hw->os, "IO value out of range %d vs %d\n", value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI)); rc = OCS_HW_RTN_ERROR; } else { hw->config.n_io = value; } break; case OCS_HW_N_SGL: value += SLI4_SGE_MAX_RESERVED; if (value > sli_get_max_sgl(&hw->sli)) { ocs_log_test(hw->os, "SGL value out of range %d vs %d\n", value, sli_get_max_sgl(&hw->sli)); rc = OCS_HW_RTN_ERROR; } else { hw->config.n_sgl = value; } break; case OCS_HW_TOPOLOGY: if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) && (value != OCS_HW_TOPOLOGY_AUTO)) { ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n", value, sli_get_medium(&hw->sli)); rc = OCS_HW_RTN_ERROR; break; } switch (value) { case OCS_HW_TOPOLOGY_AUTO: if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC); } else { sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE); } break; case OCS_HW_TOPOLOGY_NPORT: sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA); break; case OCS_HW_TOPOLOGY_LOOP: sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL); break; default: ocs_log_test(hw->os, "unsupported topology %#x\n", value); rc = OCS_HW_RTN_ERROR; } hw->config.topology = value; break; case OCS_HW_LINK_SPEED: if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) { switch (value) { case 0: /* Auto-speed negotiation */ case 10000: /* FCoE speed */ hw->config.speed = FC_LINK_SPEED_10G; break; default: ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n", value, sli_get_medium(&hw->sli)); rc = OCS_HW_RTN_ERROR; } break; } switch (value) { case 0: /* Auto-speed negotiation */ hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; break; case 2000: /* FC speeds */ hw->config.speed = FC_LINK_SPEED_2G; break; case 4000: hw->config.speed = FC_LINK_SPEED_4G; break; case 8000: hw->config.speed = FC_LINK_SPEED_8G; break; case 16000: hw->config.speed = FC_LINK_SPEED_16G; break; case 32000: hw->config.speed = FC_LINK_SPEED_32G; break; default: ocs_log_test(hw->os, "unsupported speed %d\n", value); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_DIF_SEED: /* Set the DIF seed - only for lancer right now */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "DIF seed not supported for this device\n"); rc = OCS_HW_RTN_ERROR; } else { hw->config.dif_seed = value; } break; case OCS_HW_DIF_MODE: switch (value) { case OCS_HW_DIF_MODE_INLINE: /* * Make sure we support inline DIF. * * Note: Having both bits clear means that we have old * FW that doesn't set the bits. */ if (sli_is_dif_inline_capable(&hw->sli)) { hw->config.dif_mode = value; } else { ocs_log_test(hw->os, "chip does not support DIF inline\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_DIF_MODE_SEPARATE: /* Make sure we support DIF separates. */ if (sli_is_dif_separate_capable(&hw->sli)) { hw->config.dif_mode = value; } else { ocs_log_test(hw->os, "chip does not support DIF separate\n"); rc = OCS_HW_RTN_ERROR; } } break; case OCS_HW_RQ_PROCESS_LIMIT: { hw_rq_t *rq; uint32_t i; /* For each hw_rq object, set its parent CQ limit value */ for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; hw->cq[rq->cq->instance].proc_limit = value; } break; } case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: hw->config.rq_default_buffer_size = value; break; case OCS_HW_AUTO_XFER_RDY_XRI_CNT: hw->config.auto_xfer_rdy_xri_cnt = value; break; case OCS_HW_AUTO_XFER_RDY_SIZE: hw->config.auto_xfer_rdy_size = value; break; case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: switch (value) { case 512: hw->config.auto_xfer_rdy_blk_size_chip = 0; break; case 1024: hw->config.auto_xfer_rdy_blk_size_chip = 1; break; case 2048: hw->config.auto_xfer_rdy_blk_size_chip = 2; break; case 4096: hw->config.auto_xfer_rdy_blk_size_chip = 3; break; case 520: hw->config.auto_xfer_rdy_blk_size_chip = 4; break; default: ocs_log_err(hw->os, "Invalid block size %d\n", value); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: hw->config.auto_xfer_rdy_t10_enable = value; break; case OCS_HW_AUTO_XFER_RDY_P_TYPE: hw->config.auto_xfer_rdy_p_type = value; break; case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: hw->config.auto_xfer_rdy_ref_tag_is_lba = value; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: hw->config.auto_xfer_rdy_app_tag_valid = value; break; case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: hw->config.auto_xfer_rdy_app_tag_value = value; break; case OCS_ESOC: hw->config.esoc = value; break; case OCS_HW_HIGH_LOGIN_MODE: rc = sli_set_hlm(&hw->sli, value); break; case OCS_HW_PREREGISTER_SGL: rc = sli_set_sgl_preregister(&hw->sli, value); break; case OCS_HW_ETH_LICENSE: hw->eth_license = value; break; case OCS_HW_EMULATE_I_ONLY_AAB: hw->config.i_only_aab = value; break; case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: hw->config.emulate_tgt_wqe_timeout = value; break; case OCS_HW_BOUNCE: hw->config.bounce = value; break; case OCS_HW_RQ_SELECTION_POLICY: hw->config.rq_selection_policy = value; break; case OCS_HW_RR_QUANTA: hw->config.rr_quanta = value; break; default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); rc = OCS_HW_RTN_ERROR; } return rc; } ocs_hw_rtn_e ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; switch (prop) { case OCS_HW_WAR_VERSION: hw->hw_war_version = value; break; case OCS_HW_FILTER_DEF: { char *p = value; uint32_t idx = 0; for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) { hw->config.filter_def[idx] = 0; } for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) { hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0); p = ocs_strchr(p, ','); if (p != NULL) { p++; } } break; } default: ocs_log_test(hw->os, "unsupported property %#x\n", prop); rc = OCS_HW_RTN_ERROR; break; } return rc; } /** * @ingroup interrupt * @brief Check for the events associated with the interrupt vector. * * @param hw Hardware context. * @param vector Zero-based interrupt vector number. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector) { int32_t rc = 0; if (!hw) { ocs_log_err(NULL, "HW context NULL?!?\n"); return -1; } if (vector > hw->eq_count) { ocs_log_err(hw->os, "vector %d. max %d\n", vector, hw->eq_count); return -1; } /* * The caller should disable interrupts if they wish to prevent us * from processing during a shutdown. The following states are defined: * OCS_HW_STATE_UNINITIALIZED - No queues allocated * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, * queues are cleared. * OCS_HW_STATE_ACTIVE - Chip and queues are operational * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox * completions. */ if (hw->state != OCS_HW_STATE_UNINITIALIZED) { rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]); /* Re-arm queue if there are no entries */ if (rc != 0) { sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE); } } return rc; } void ocs_hw_unsol_process_bounce(void *arg) { ocs_hw_sequence_t *seq = arg; ocs_hw_t *hw = seq->hw; ocs_hw_assert(hw != NULL); ocs_hw_assert(hw->callback.unsolicited != NULL); hw->callback.unsolicited(hw->args.unsolicited, seq); } int32_t ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec) { hw_eq_t *eq; int32_t rc = 0; CPUTRACE(""); /* * The caller should disable interrupts if they wish to prevent us * from processing during a shutdown. The following states are defined: * OCS_HW_STATE_UNINITIALIZED - No queues allocated * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, * queues are cleared. * OCS_HW_STATE_ACTIVE - Chip and queues are operational * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox * completions. */ if (hw->state == OCS_HW_STATE_UNINITIALIZED) { return 0; } /* Get pointer to hw_eq_t */ eq = hw->hw_eq[vector]; OCS_STAT(eq->use_count++); rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec); return rc; } /** * @ingroup interrupt * @brief Process events associated with an EQ. * * @par Description * Loop termination: * @n @n Without a mechanism to terminate the completion processing loop, it * is possible under some workload conditions for the loop to never terminate * (or at least take longer than the OS is happy to have an interrupt handler * or kernel thread context hold a CPU without yielding). * @n @n The approach taken here is to periodically check how much time * we have been in this * processing loop, and if we exceed a predetermined time (multiple seconds), the * loop is terminated, and ocs_hw_process() returns. * * @param hw Hardware context. * @param eq Pointer to HW EQ object. * @param max_isr_time_msec Maximum time in msec to stay in this function. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec) { uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 }; uint32_t done = FALSE; uint32_t tcheck_count; time_t tstart; time_t telapsed; tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; tstart = ocs_msectime(); CPUTRACE(""); while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) { uint16_t cq_id = 0; int32_t rc; rc = sli_eq_parse(&hw->sli, eqe, &cq_id); if (unlikely(rc)) { if (rc > 0) { uint32_t i; /* * Received a sentinel EQE indicating the EQ is full. * Process all CQs */ for (i = 0; i < hw->cq_count; i++) { ocs_hw_cq_process(hw, hw->hw_cq[i]); } continue; } else { return rc; } } else { int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id); if (likely(index >= 0)) { ocs_hw_cq_process(hw, hw->hw_cq[index]); } else { ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); } } if (eq->queue->n_posted > (eq->queue->posted_limit)) { sli_queue_arm(&hw->sli, eq->queue, FALSE); } if (tcheck_count && (--tcheck_count == 0)) { tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; telapsed = ocs_msectime() - tstart; if (telapsed >= max_isr_time_msec) { done = TRUE; } } } sli_queue_eq_arm(&hw->sli, eq->queue, TRUE); return 0; } /** * @brief Submit queued (pending) mbx commands. * * @par Description * Submit queued mailbox commands. * --- Assumes that hw->cmd_lock is held --- * * @param hw Hardware context. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_hw_cmd_submit_pending(ocs_hw_t *hw) { ocs_command_ctx_t *ctx; int32_t rc = 0; /* Assumes lock held */ /* Only submit MQE if there's room */ while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) { ctx = ocs_list_remove_head(&hw->cmd_pending); if (ctx == NULL) { break; } ocs_list_add_tail(&hw->cmd_head, ctx); hw->cmd_head_count++; if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) { ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc); rc = -1; break; } } return rc; } /** * @ingroup io * @brief Issue a SLI command. * * @par Description * Send a mailbox command to the hardware, and either wait for a completion * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT). * * @param hw Hardware context. * @param cmd Buffer containing a formatted command and results. * @param opts Command options: * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion. * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback. * @param cb Function callback used for asynchronous mode. May be NULL. * @n Prototype is (*cb)(void *arg, uint8_t *cmd). * @n @n @b Note: If the * callback function pointer is NULL, the results of the command are silently * discarded, allowing this pointer to exist solely on the stack. * @param arg Argument passed to an asynchronous callback. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* * If the chip is in an error state (UE'd) then reject this mailbox * command. */ if (sli_fw_error_status(&hw->sli) > 0) { uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1); uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2); if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) { hw->expiration_logged = 1; ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n", hw->watchdog_timeout); } ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS), err1, err2); return OCS_HW_RTN_ERROR; } if (OCS_CMD_POLL == opts) { ocs_lock(&hw->cmd_lock); if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) { /* * Can't issue Boot-strap mailbox command with other * mail-queue commands pending as this interaction is * undefined */ rc = OCS_HW_RTN_ERROR; } else { void *bmbx = hw->sli.bmbx.virt; ocs_memset(bmbx, 0, SLI4_BMBX_SIZE); ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE); if (sli_bmbx_command(&hw->sli) == 0) { rc = OCS_HW_RTN_SUCCESS; ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE); } } ocs_unlock(&hw->cmd_lock); } else if (OCS_CMD_NOWAIT == opts) { ocs_command_ctx_t *ctx = NULL; ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT); if (!ctx) { ocs_log_err(hw->os, "can't allocate command context\n"); return OCS_HW_RTN_NO_RESOURCES; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state); ocs_free(hw->os, ctx, sizeof(*ctx)); return OCS_HW_RTN_ERROR; } if (cb) { ctx->cb = cb; ctx->arg = arg; } ctx->buf = cmd; ctx->ctx = hw; ocs_lock(&hw->cmd_lock); /* Add to pending list */ ocs_list_add_tail(&hw->cmd_pending, ctx); /* Submit as much of the pending list as we can */ if (ocs_hw_cmd_submit_pending(hw) == 0) { rc = OCS_HW_RTN_SUCCESS; } ocs_unlock(&hw->cmd_lock); } return rc; } /** * @ingroup devInitShutdown * @brief Register a callback for the given event. * * @param hw Hardware context. * @param which Event of interest. * @param func Function to call when the event occurs. * @param arg Argument passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg) { if (!hw || !func || (which >= OCS_HW_CB_MAX)) { ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n", hw, which, func); return OCS_HW_RTN_ERROR; } switch (which) { case OCS_HW_CB_DOMAIN: hw->callback.domain = func; hw->args.domain = arg; break; case OCS_HW_CB_PORT: hw->callback.port = func; hw->args.port = arg; break; case OCS_HW_CB_UNSOLICITED: hw->callback.unsolicited = func; hw->args.unsolicited = arg; break; case OCS_HW_CB_REMOTE_NODE: hw->callback.rnode = func; hw->args.rnode = arg; break; case OCS_HW_CB_BOUNCE: hw->callback.bounce = func; hw->args.bounce = arg; break; default: ocs_log_test(hw->os, "unknown callback %#x\n", which); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } /** * @ingroup port * @brief Allocate a port object. * * @par Description * This function allocates a VPI object for the port and stores it in the * indicator field of the port object. * * @param hw Hardware context. * @param sport SLI port object used to connect to the domain. * @param domain Domain object associated with this port (may be NULL). * @param wwpn Port's WWPN in big-endian order, or NULL to use default. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain, uint8_t *wwpn) { uint8_t *cmd = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t index; sport->indicator = UINT32_MAX; sport->hw = hw; sport->ctx.app = sport; sport->sm_free_req_pending = 0; /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } if (wwpn) { ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn)); } if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) { ocs_log_err(hw->os, "FCOE_VPI allocation failure\n"); return OCS_HW_RTN_ERROR; } if (domain != NULL) { ocs_sm_function_t next = NULL; cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!cmd) { ocs_log_err(hw->os, "command memory allocation failed\n"); rc = OCS_HW_RTN_NO_MEMORY; goto ocs_hw_port_alloc_out; } /* If the WWPN is NULL, fetch the default WWPN and WWNN before * initializing the VPI */ if (!wwpn) { next = __ocs_hw_port_alloc_read_sparm64; } else { next = __ocs_hw_port_alloc_init_vpi; } ocs_sm_transition(&sport->ctx, next, cmd); } else if (!wwpn) { /* This is the convention for the HW, not SLI */ ocs_log_test(hw->os, "need WWN for physical port\n"); rc = OCS_HW_RTN_ERROR; } else { /* domain NULL and wwpn non-NULL */ ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL); } ocs_hw_port_alloc_out: if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); } return rc; } /** * @ingroup port * @brief Attach a physical/virtual SLI port to a domain. * * @par Description * This function registers a previously-allocated VPI with the * device. * * @param hw Hardware context. * @param sport Pointer to the SLI port object. * @param fc_id Fibre Channel ID to associate with this port. * * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure. */ ocs_hw_rtn_e ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !sport) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p sport=%p\n", hw, sport); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } sport->fc_id = fc_id; ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf); return rc; } /** * @brief Called when the port control command completes. * * @par Description * We only need to free the mailbox command buffer. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @ingroup port * @brief Control a port (initialize, shutdown, or set link configuration). * * @par Description * This function controls a port depending on the @c ctrl parameter: * - @b OCS_HW_PORT_INIT - * Issues the CONFIG_LINK and INIT_LINK commands for the specified port. * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up. * . * - @b OCS_HW_PORT_SHUTDOWN - * Issues the DOWN_LINK command for the specified port. * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down. * . * - @b OCS_HW_PORT_SET_LINK_CONFIG - * Sets the link configuration. * * @param hw Hardware context. * @param ctrl Specifies the operation: * - OCS_HW_PORT_INIT * - OCS_HW_PORT_SHUTDOWN * - OCS_HW_PORT_SET_LINK_CONFIG * * @param value Operation-specific value. * - OCS_HW_PORT_INIT - Selective reset AL_PA * - OCS_HW_PORT_SHUTDOWN - N/A * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value. * * @param cb Callback function to invoke the following operation. * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events * are handled by the OCS_HW_CB_DOMAIN callbacks). * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command * completes. * * @param arg Callback argument invoked after the command completes. * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events * are handled by the OCS_HW_CB_DOMAIN callbacks). * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command * completes. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; switch (ctrl) { case OCS_HW_PORT_INIT: { uint8_t *init_link; uint32_t speed = 0; uint8_t reset_alpa = 0; if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) { uint8_t *cfg_link; cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (cfg_link == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT, ocs_hw_cb_port_control, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "CONFIG_LINK failed\n"); break; } speed = hw->config.speed; reset_alpa = (uint8_t)(value & 0xff); } else { speed = FC_LINK_SPEED_10G; } /* * Bring link up, unless FW version is not supported */ if (hw->workaround.fw_version_too_low) { if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) { ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0)); } else { ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0)); } return OCS_HW_RTN_ERROR; } rc = OCS_HW_RTN_ERROR; /* Allocate a new buffer for the init_link command */ init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (init_link == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) { rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT, ocs_hw_cb_port_control, NULL); } /* Free buffer on error, since no callback is coming */ if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, init_link, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "INIT_LINK failed\n"); } break; } case OCS_HW_PORT_SHUTDOWN: { uint8_t *down_link; down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (down_link == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT, ocs_hw_cb_port_control, NULL); } /* Free buffer on error, since no callback is coming */ if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, down_link, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "DOWN_LINK failed\n"); } break; } case OCS_HW_PORT_SET_LINK_CONFIG: rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg); break; default: ocs_log_test(hw->os, "unhandled control %#x\n", ctrl); break; } return rc; } /** * @ingroup port * @brief Free port resources. * * @par Description * Issue the UNREG_VPI command to free the assigned VPI context. * * @param hw Hardware context. * @param sport SLI port object used to connect to the domain. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !sport) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p sport=%p\n", hw, sport); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL); return rc; } /** * @ingroup domain * @brief Allocate a fabric domain object. * * @par Description * This function starts a series of commands needed to connect to the domain, including * - REG_FCFI * - INIT_VFI * - READ_SPARMS * . * @b Note: Not all SLI interface types use all of the above commands. * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event. * * @param hw Hardware context. * @param domain Pointer to the domain object. * @param fcf FCF index. * @param vlan VLAN ID. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan) { uint8_t *cmd = NULL; uint32_t index; if (!hw || !domain || !domain->sport) { ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n", hw, domain, domain ? domain->sport : NULL); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!cmd) { ocs_log_err(hw->os, "command memory allocation failed\n"); return OCS_HW_RTN_NO_MEMORY; } domain->dma = hw->domain_dmem; domain->hw = hw; domain->sm.app = domain; domain->fcf = fcf; domain->fcf_indicator = UINT32_MAX; domain->vlan_id = vlan; domain->indicator = UINT32_MAX; if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) { ocs_log_err(hw->os, "FCOE_VFI allocation failure\n"); ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); return OCS_HW_RTN_ERROR; } ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd); return OCS_HW_RTN_SUCCESS; } /** * @ingroup domain * @brief Attach a SLI port to a domain. * * @param hw Hardware context. * @param domain Pointer to the domain object. * @param fc_id Fibre Channel ID to associate with this port. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !domain) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } domain->sport->fc_id = fc_id; ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf); return rc; } /** * @ingroup domain * @brief Free a fabric domain object. * * @par Description * Free both the driver and SLI port resources associated with the domain. * * @param hw Hardware context. * @param domain Pointer to the domain object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !domain) { ocs_log_err(hw ? hw->os : NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL); return rc; } /** * @ingroup domain * @brief Free a fabric domain object. * * @par Description * Free the driver resources associated with the domain. The difference between * this call and ocs_hw_domain_free() is that this call assumes resources no longer * exist on the SLI port, due to a reset or after some error conditions. * * @param hw Hardware context. * @param domain Pointer to the domain object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain) { if (!hw || !domain) { ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); return OCS_HW_RTN_SUCCESS; } /** * @ingroup node * @brief Allocate a remote node object. * * @param hw Hardware context. * @param rnode Allocated remote node object to initialize. * @param fc_addr FC address of the remote node. * @param sport SLI port used to connect to remote node. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr, ocs_sli_port_t *sport) { /* Check for invalid indicator */ if (UINT32_MAX != rnode->indicator) { ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n", fc_addr, rnode->indicator); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } /* NULL SLI port indicates an unallocated remote node */ rnode->sport = NULL; if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) { ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", fc_addr); return OCS_HW_RTN_ERROR; } rnode->fc_id = fc_addr; rnode->sport = sport; return OCS_HW_RTN_SUCCESS; } /** * @ingroup node * @brief Update a remote node object with the remote port's service parameters. * * @param hw Hardware context. * @param rnode Allocated remote node object to initialize. * @param sparms DMA buffer containing the remote port's service parameters. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *buf = NULL; uint32_t count = 0; if (!hw || !rnode || !sparms) { ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n", hw, rnode, sparms); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } /* * If the attach count is non-zero, this RPI has already been registered. * Otherwise, register the RPI */ if (rnode->index == UINT32_MAX) { ocs_log_err(NULL, "bad parameter rnode->index invalid\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); return OCS_HW_RTN_ERROR; } count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1); if (count) { /* * Can't attach multiple FC_ID's to a node unless High Login * Mode is enabled */ if (sli_get_hlm(&hw->sli) == FALSE) { ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n", sli_get_hlm(&hw->sli), count); rc = OCS_HW_RTN_SUCCESS; } else { rnode->node_group = TRUE; rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached); rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS; } } else { rnode->node_group = FALSE; ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt); if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id, rnode->indicator, rnode->sport->indicator, sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_attach, rnode); } } if (count || rc) { if (rc < OCS_HW_RTN_SUCCESS) { ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI"); } ocs_free(hw->os, buf, SLI4_BMBX_SIZE); } return rc; } /** * @ingroup node * @brief Free a remote node resource. * * @param hw Hardware context. * @param rnode Remote node object to free. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (!hw || !rnode) { ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", hw, rnode); return OCS_HW_RTN_ERROR; } if (rnode->sport) { if (!rnode->attached) { if (rnode->indicator != UINT32_MAX) { if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n", rnode->indicator, rnode->fc_id); rc = OCS_HW_RTN_ERROR; } else { rnode->node_group = FALSE; rnode->indicator = UINT32_MAX; rnode->index = UINT32_MAX; rnode->free_group = FALSE; } } } else { ocs_log_err(hw->os, "Error: rnode is still attached\n"); rc = OCS_HW_RTN_ERROR; } } return rc; } /** * @ingroup node * @brief Free a remote node object. * * @param hw Hardware context. * @param rnode Remote node object to free. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC; uint32_t index = UINT32_MAX; if (!hw || !rnode) { ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", hw, rnode); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } index = rnode->index; if (rnode->sport) { uint32_t count = 0; uint32_t fc_id; if (!rnode->attached) { return OCS_HW_RTN_SUCCESS_SYNC; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1); if (count <= 1) { /* There are no other references to this RPI * so unregister it and free the resource. */ fc_id = UINT32_MAX; rnode->node_group = FALSE; rnode->free_group = TRUE; } else { if (sli_get_hlm(&hw->sli) == FALSE) { ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n", count); } fc_id = rnode->fc_id & 0x00ffffff; } rc = OCS_HW_RTN_ERROR; if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator, SLI_RSRC_FCOE_RPI, fc_id)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "UNREG_RPI failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); rc = OCS_HW_RTN_ERROR; } } return rc; } /** * @ingroup node * @brief Free all remote node objects. * * @param hw Hardware context. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_node_free_all(ocs_hw_t *hw) { uint8_t *buf = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; if (!hw) { ocs_log_err(NULL, "bad parameter hw=%p\n", hw); return OCS_HW_RTN_ERROR; } /* * Check if the chip is in an error state (UE'd) before proceeding. */ if (sli_fw_error_status(&hw->sli) > 0) { ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); return OCS_HW_RTN_ERROR; } buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff, SLI_RSRC_FCOE_FCFI, UINT32_MAX)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "UNREG_RPI failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); rc = OCS_HW_RTN_ERROR; } return rc; } ocs_hw_rtn_e ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) { if (!hw || !ngroup) { ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", hw, ngroup); return OCS_HW_RTN_ERROR; } if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator, &ngroup->index)) { ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", ngroup->indicator); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode) { if (!hw || !ngroup || !rnode) { ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n", hw, ngroup, rnode); return OCS_HW_RTN_ERROR; } if (rnode->attached) { ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n", rnode->indicator, rnode->fc_id); return OCS_HW_RTN_ERROR; } if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", rnode->indicator); return OCS_HW_RTN_ERROR; } rnode->indicator = ngroup->indicator; rnode->index = ngroup->index; return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) { int ref; if (!hw || !ngroup) { ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", hw, ngroup); return OCS_HW_RTN_ERROR; } ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count); if (ref) { /* Hmmm, the reference count is non-zero */ ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n", ref, ngroup->indicator); if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) { ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", ngroup->indicator); return OCS_HW_RTN_ERROR; } ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0); } ngroup->indicator = UINT32_MAX; ngroup->index = UINT32_MAX; return OCS_HW_RTN_SUCCESS; } /** * @brief Initialize IO fields on each free call. * * @n @b Note: This is done on each free call (as opposed to each * alloc call) because port-owned XRIs are not * allocated with ocs_hw_io_alloc() but are freed with this * function. * * @param io Pointer to HW IO. */ static inline void ocs_hw_init_free_io(ocs_hw_io_t *io) { /* * Set io->done to NULL, to avoid any callbacks, should * a completion be received for one of these IOs */ io->done = NULL; io->abort_done = NULL; io->status_saved = 0; io->abort_in_progress = FALSE; io->port_owned_abort_count = 0; io->rnode = NULL; io->type = 0xFFFF; io->wq = NULL; io->ul_io = NULL; io->tgt_wqe_timeout = 0; } /** * @ingroup io * @brief Lockless allocate a HW IO object. * * @par Description * Assume that hw->ocs_lock is held. This function is only used if * use_dif_sec_xri workaround is being used. * * @param hw Hardware context. * * @return Returns a pointer to an object on success, or NULL on failure. */ static inline ocs_hw_io_t * _ocs_hw_io_alloc(ocs_hw_t *hw) { ocs_hw_io_t *io = NULL; if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { ocs_list_add_tail(&hw->io_inuse, io); io->state = OCS_HW_IO_STATE_INUSE; io->quarantine = FALSE; io->quarantine_first_phase = TRUE; io->abort_reqtag = UINT32_MAX; ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io); } else { ocs_atomic_add_return(&hw->io_alloc_failed_count, 1); } return io; } /** * @ingroup io * @brief Allocate a HW IO object. * * @par Description * @n @b Note: This function applies to non-port owned XRIs * only. * * @param hw Hardware context. * * @return Returns a pointer to an object on success, or NULL on failure. */ ocs_hw_io_t * ocs_hw_io_alloc(ocs_hw_t *hw) { ocs_hw_io_t *io = NULL; ocs_lock(&hw->io_lock); io = _ocs_hw_io_alloc(hw); ocs_unlock(&hw->io_lock); return io; } /** * @ingroup io * @brief Allocate/Activate a port owned HW IO object. * * @par Description * This function is called by the transport layer when an XRI is * allocated by the SLI-Port. This will "activate" the HW IO * associated with the XRI received from the SLI-Port to mirror * the state of the XRI. * @n @n @b Note: This function applies to port owned XRIs only. * * @param hw Hardware context. * @param io Pointer HW IO to activate/allocate. * * @return Returns a pointer to an object on success, or NULL on failure. */ ocs_hw_io_t * ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) { if (ocs_ref_read_count(&io->ref) > 0) { ocs_log_err(hw->os, "Bad parameter: refcount > 0\n"); return NULL; } if (io->wq != NULL) { ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator); return NULL; } ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); io->xbusy = TRUE; return io; } /** * @ingroup io * @brief When an IO is freed, depending on the exchange busy flag, and other * workarounds, move it to the correct list. * * @par Description * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed * from the busy or wait_free list. * * @param hw Hardware context. * @param io Pointer to the IO object to move. */ static void ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io) { if (io->xbusy) { /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */ ocs_list_add_tail(&hw->io_wait_free, io); io->state = OCS_HW_IO_STATE_WAIT_FREE; } else { /* IO not busy, add to free list */ ocs_list_add_tail(&hw->io_free, io); io->state = OCS_HW_IO_STATE_FREE; } /* BZ 161832 workaround */ if (hw->workaround.use_dif_sec_xri) { ocs_hw_check_sec_hio_list(hw); } } /** * @ingroup io * @brief Free a HW IO object. Perform cleanup common to * port and host-owned IOs. * * @param hw Hardware context. * @param io Pointer to the HW IO object. */ static inline void ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io) { /* initialize IO fields */ ocs_hw_init_free_io(io); /* Restore default SGL */ ocs_hw_io_restore_sgl(hw, io); } /** * @ingroup io * @brief Free a HW IO object associated with a port-owned XRI. * * @param arg Pointer to the HW IO object. */ static void ocs_hw_io_free_port_owned(void *arg) { ocs_hw_io_t *io = (ocs_hw_io_t *)arg; ocs_hw_t *hw = io->hw; /* * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs * waiting for buffers. */ if (io->auto_xfer_rdy_dnrx) { ocs_lock(&hw->io_lock); /* take a reference count because we still own the IO until the buffer is posted */ ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); ocs_list_add_tail(&hw->io_port_dnrx, io); ocs_unlock(&hw->io_lock); } /* perform common cleanup */ ocs_hw_io_free_common(hw, io); } /** * @ingroup io * @brief Free a previously-allocated HW IO object. Called when * IO refcount goes to zero (host-owned IOs only). * * @param arg Pointer to the HW IO object. */ static void ocs_hw_io_free_internal(void *arg) { ocs_hw_io_t *io = (ocs_hw_io_t *)arg; ocs_hw_t *hw = io->hw; /* perform common cleanup */ ocs_hw_io_free_common(hw, io); ocs_lock(&hw->io_lock); /* remove from in-use list */ ocs_list_remove(&hw->io_inuse, io); ocs_hw_io_free_move_correct_list(hw, io); ocs_unlock(&hw->io_lock); } /** * @ingroup io * @brief Free a previously-allocated HW IO object. * * @par Description * @n @b Note: This function applies to port and host owned XRIs. * * @param hw Hardware context. * @param io Pointer to the HW IO object. * * @return Returns a non-zero value if HW IO was freed, 0 if references * on the IO still exist, or a negative value if an error occurred. */ int32_t ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io) { /* just put refcount */ if (ocs_ref_read_count(&io->ref) <= 0) { ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n", io->indicator, io->reqtag); return -1; } return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */ } /** * @ingroup io * @brief Check if given HW IO is in-use * * @par Description * This function returns TRUE if the given HW IO has been * allocated and is in-use, and FALSE otherwise. It applies to * port and host owned XRIs. * * @param hw Hardware context. * @param io Pointer to the HW IO object. * * @return TRUE if an IO is in use, or FALSE otherwise. */ uint8_t ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io) { return (ocs_ref_read_count(&io->ref) > 0); } /** * @brief Write a HW IO to a work queue. * * @par Description * A HW IO is written to a work queue. * * @param wq Pointer to work queue. * @param wqe Pointer to WQ entry. * * @n @b Note: Assumes the SLI-4 queue lock is held. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) { int32_t rc; int32_t queue_rc; /* Every so often, set the wqec bit to generate comsummed completions */ if (wq->wqec_count) { wq->wqec_count--; } if (wq->wqec_count == 0) { sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf; genwqe->wqec = 1; wq->wqec_count = wq->wqec_set_count; } /* Decrement WQ free count */ wq->free_count--; queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf); if (queue_rc < 0) { rc = -1; } else { rc = 0; ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc); } return rc; } /** * @brief Write a HW IO to a work queue. * * @par Description * A HW IO is written to a work queue. * * @param wq Pointer to work queue. * @param wqe Pointer to WQE entry. * * @n @b Note: Takes the SLI-4 queue lock. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) { int32_t rc = 0; sli_queue_lock(wq->queue); if ( ! ocs_list_empty(&wq->pending_list)) { ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { rc = _hw_wq_write(wq, wqe); if (rc < 0) { break; } if (wqe->abort_wqe_submit_needed) { wqe->abort_wqe_submit_needed = 0; sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT ); ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) } } } else { if (wq->free_count > 0) { rc = _hw_wq_write(wq, wqe); } else { ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) } } sli_queue_unlock(wq->queue); return rc; } /** * @brief Update free count and submit any pending HW IOs * * @par Description * The WQ free count is updated, and any pending HW IOs are submitted that * will fit in the queue. * * @param wq Pointer to work queue. * @param update_free_count Value added to WQs free count. * * @return None. */ static void hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count) { ocs_hw_wqe_t *wqe; sli_queue_lock(wq->queue); /* Update free count with value passed in */ wq->free_count += update_free_count; while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { _hw_wq_write(wq, wqe); if (wqe->abort_wqe_submit_needed) { wqe->abort_wqe_submit_needed = 0; sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT); ocs_list_add_tail(&wq->pending_list, wqe); OCS_STAT(wq->wq_pending_count++;) } } sli_queue_unlock(wq->queue); } /** * @brief Check to see if there are any BZ 161832 workaround waiting IOs * * @par Description * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try * to allocate a secondary HW io, and dispatch it. * * @n @b Note: hw->io_lock MUST be taken when called. * * @param hw pointer to HW object * * @return none */ static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw) { ocs_hw_io_t *io; ocs_hw_io_t *sec_io; int rc = 0; while (!ocs_list_empty(&hw->sec_hio_wait_list)) { uint16_t flags; sec_io = _ocs_hw_io_alloc(hw); if (sec_io == NULL) { break; } io = ocs_list_remove_head(&hw->sec_hio_wait_list); ocs_list_add_tail(&hw->io_inuse, io); io->state = OCS_HW_IO_STATE_INUSE; io->sec_hio = sec_io; /* mark secondary XRI for second and subsequent data phase as quarantine */ if (io->xbusy) { sec_io->quarantine = TRUE; } flags = io->sec_iparam.fcp_tgt.flags; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout; /* Complete (continue) TRECV IO */ if (io->xbusy) { if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator, io->reqtag, SLI4_CQ_DEFAULT, io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, flags, io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { ocs_log_test(hw->os, "TRECEIVE WQE error\n"); break; } } else { if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, flags, io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { ocs_log_test(hw->os, "TRECEIVE WQE error\n"); break; } } if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } io->xbusy = TRUE; /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ ocs_hw_add_io_timed_wqe(hw, io); rc = hw_wq_write(io->wq, &io->wqe); if (rc >= 0) { /* non-negative return is success */ rc = 0; } else { /* failed to write wqe, remove from active wqe list */ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); io->xbusy = FALSE; ocs_hw_remove_io_timed_wqe(hw, io); } } } /** * @ingroup io * @brief Send a Single Request/Response Sequence (SRRS). * * @par Description * This routine supports communication sequences consisting of a single * request and single response between two endpoints. Examples include: * - Sending an ELS request. * - Sending an ELS response - To send an ELS reponse, the caller must provide * the OX_ID from the received request. * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request, * the caller must provide the R_CTL, TYPE, and DF_CTL * values to place in the FC frame header. * . * @n @b Note: The caller is expected to provide both send and receive * buffers for requests. In the case of sending a response, no receive buffer * is necessary and the caller may pass in a NULL pointer. * * @param hw Hardware context. * @param type Type of sequence (ELS request/response, FC-CT). * @param io Previously-allocated HW IO object. * @param send DMA memory holding data to send (for example, ELS request, BLS response). * @param len Length, in bytes, of data to send. * @param receive Optional DMA memory to hold a response. * @param rnode Destination of data (that is, a remote node). * @param iparam IO parameters (ELS response and FC-CT). * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns 0 on success, or a non-zero on failure. */ ocs_hw_rtn_e ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, ocs_dma_t *send, uint32_t len, ocs_dma_t *receive, ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam, ocs_hw_srrs_cb_t cb, void *arg) { sli4_sge_t *sge = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint16_t local_flags = 0; if (!hw || !io || !rnode || !iparam) { ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n", hw, io, send, receive, rnode, iparam); return OCS_HW_RTN_ERROR; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state); return OCS_HW_RTN_ERROR; } if (ocs_hw_is_xri_port_owned(hw, io->indicator)) { /* We must set the XC bit for port owned XRIs */ local_flags |= SLI4_IO_CONTINUATION; } io->rnode = rnode; io->type = type; io->done = cb; io->arg = arg; sge = io->sgl->virt; /* clear both SGE */ ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); if (send) { sge[0].buffer_address_high = ocs_addr32_hi(send->phys); sge[0].buffer_address_low = ocs_addr32_lo(send->phys); sge[0].sge_type = SLI4_SGE_TYPE_DATA; sge[0].buffer_length = len; } if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) { sge[1].buffer_address_high = ocs_addr32_hi(receive->phys); sge[1].buffer_address_low = ocs_addr32_lo(receive->phys); sge[1].sge_type = SLI4_SGE_TYPE_DATA; sge[1].buffer_length = receive->size; sge[1].last = TRUE; } else { sge[0].last = TRUE; } switch (type) { case OCS_HW_ELS_REQ: if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, *((uint8_t *)(send->virt)), /* req_type */ len, receive->size, iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) { ocs_log_err(hw->os, "REQ WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_ELS_RSP: if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->els.ox_id, rnode, local_flags, UINT32_MAX)) { ocs_log_err(hw->os, "RSP WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_ELS_RSP_SID: if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->els_sid.ox_id, rnode, local_flags, iparam->els_sid.s_id)) { ocs_log_err(hw->os, "RSP (SID) WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_FC_CT: if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, receive->size, iparam->fc_ct.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl, iparam->fc_ct.type, iparam->fc_ct.df_ctl)) { ocs_log_err(hw->os, "GEN WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_FC_CT_RSP: if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator, io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl, iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) { ocs_log_err(hw->os, "XMIT SEQ WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_BLS_ACC: case OCS_HW_BLS_RJT: { sli_bls_payload_t bls; if (OCS_HW_BLS_ACC == type) { bls.type = SLI_BLS_ACC; ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc)); } else { bls.type = SLI_BLS_RJT; ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt)); } bls.ox_id = iparam->bls.ox_id; bls.rx_id = iparam->bls.rx_id; if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, UINT32_MAX)) { ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; } case OCS_HW_BLS_ACC_SID: { sli_bls_payload_t bls; bls.type = SLI_BLS_ACC; ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc)); bls.ox_id = iparam->bls_sid.ox_id; bls.rx_id = iparam->bls_sid.rx_id; if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->bls_sid.s_id)) { ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n"); rc = OCS_HW_RTN_ERROR; } break; } case OCS_HW_BCAST: if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, iparam->bcast.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) { ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; default: ocs_log_err(hw->os, "bad SRRS type %#x\n", type); rc = OCS_HW_RTN_ERROR; } if (OCS_HW_RTN_SUCCESS == rc) { if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } io->xbusy = TRUE; /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ OCS_STAT(io->wq->use_count++); ocs_hw_add_io_timed_wqe(hw, io); rc = hw_wq_write(io->wq, &io->wqe); if (rc >= 0) { /* non-negative return is success */ rc = 0; } else { /* failed to write wqe, remove from active wqe list */ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); io->xbusy = FALSE; ocs_hw_remove_io_timed_wqe(hw, io); } } return rc; } /** * @ingroup io * @brief Send a read, write, or response IO. * * @par Description * This routine supports sending a higher-level IO (for example, FCP) between two endpoints * as a target or initiator. Examples include: * - Sending read data and good response (target). * - Sending a response (target with no data or after receiving write data). * . * This routine assumes all IOs use the SGL associated with the HW IO. Prior to * calling this routine, the data should be loaded using ocs_hw_io_add_sge(). * * @param hw Hardware context. * @param type Type of IO (target read, target response, and so on). * @param io Previously-allocated HW IO object. * @param len Length, in bytes, of data to send. * @param iparam IO parameters. * @param rnode Destination of data (that is, a remote node). * @param cb Function call upon completion of sending data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns 0 on success, or a non-zero value on failure. * * @todo * - Support specifiying relative offset. * - Use a WQ other than 0. */ ocs_hw_rtn_e ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode, void *cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t rpi; uint8_t send_wqe = TRUE; CPUTRACE(""); if (!hw || !io || !rnode || !iparam) { ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n", hw, io, iparam, rnode); return OCS_HW_RTN_ERROR; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); return OCS_HW_RTN_ERROR; } rpi = rnode->indicator; if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) { rpi = hw->workaround.unregistered_rid; ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi); } /* * Save state needed during later stages */ io->rnode = rnode; io->type = type; io->done = cb; io->arg = arg; /* * Format the work queue entry used to send the IO */ switch (type) { case OCS_HW_IO_INITIATOR_READ: /* * If use_dif_quarantine workaround is in effect, and dif_separates then mark the * initiator read IO for quarantine */ if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { io->quarantine = TRUE; } ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, iparam->fcp_ini.rsp); if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, iparam->fcp_ini.timeout)) { ocs_log_err(hw->os, "IREAD WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_IO_INITIATOR_WRITE: ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, iparam->fcp_ini.rsp); if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len, iparam->fcp_ini.first_burst, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, iparam->fcp_ini.timeout)) { ocs_log_err(hw->os, "IWRITE WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_IO_INITIATOR_NODATA: ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, iparam->fcp_ini.rsp); if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, iparam->fcp_ini.timeout)) { ocs_log_err(hw->os, "ICMND WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; case OCS_HW_IO_TARGET_WRITE: { uint16_t flags = iparam->fcp_tgt.flags; fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt; /* * Fill in the XFER_RDY for IF_TYPE 0 devices */ *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset); *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len); *((uint32_t *)xfer->rsvd) = 0; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; /* * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO * then mark the target write IO for quarantine */ if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { io->quarantine = TRUE; } /* * BZ 161832 Workaround: * Check for use_dif_sec_xri workaround. Note, even though the first dataphase * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the * potential for deadlock where all XRI's are allocated as primaries to IOs that * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first * data phase, it is marked for quarantine. */ if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { /* * If we have allocated a chained SGL for skyhawk, then * we can re-use this for the sec_hio. */ if (io->ovfl_io != NULL) { io->sec_hio = io->ovfl_io; io->sec_hio->quarantine = TRUE; } else { io->sec_hio = ocs_hw_io_alloc(hw); } if (io->sec_hio == NULL) { /* Failed to allocate, so save full request context and put * this IO on the wait list */ io->sec_iparam = *iparam; io->sec_len = len; ocs_lock(&hw->io_lock); ocs_list_remove(&hw->io_inuse, io); ocs_list_add_tail(&hw->sec_hio_wait_list, io); io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO; hw->sec_hio_wait_count++; ocs_unlock(&hw->io_lock); send_wqe = FALSE; /* Done */ break; } /* We quarantine the secondary IO if this is the second or subsequent data phase */ if (io->xbusy) { io->sec_hio->quarantine = TRUE; } } /* * If not the first data phase, and io->sec_hio has been allocated, then issue * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE */ if (io->xbusy && (io->sec_hio != NULL)) { if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TRECEIVE WQE error\n"); rc = OCS_HW_RTN_ERROR; } } else { if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TRECEIVE WQE error\n"); rc = OCS_HW_RTN_ERROR; } } break; } case OCS_HW_IO_TARGET_READ: { uint16_t flags = iparam->fcp_tgt.flags; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TSEND WQE error\n"); rc = OCS_HW_RTN_ERROR; } else if (hw->workaround.retain_tsend_io_length) { io->length = len; } break; } case OCS_HW_IO_TARGET_RSP: { uint16_t flags = iparam->fcp_tgt.flags; if (io->xbusy) { flags |= SLI4_IO_CONTINUATION; } else { flags &= ~SLI4_IO_CONTINUATION; } /* post a new auto xfer ready buffer */ if (hw->auto_xfer_rdy_enabled && io->is_port_owned) { if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) { flags |= SLI4_IO_DNRX; } } io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, len, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id, rpi, rnode, flags, iparam->fcp_tgt.cs_ctl, io->is_port_owned, iparam->fcp_tgt.app_id)) { ocs_log_err(hw->os, "TRSP WQE error\n"); rc = OCS_HW_RTN_ERROR; } break; } default: ocs_log_err(hw->os, "unsupported IO type %#x\n", type); rc = OCS_HW_RTN_ERROR; } if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) { if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } io->xbusy = TRUE; /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); OCS_STAT(io->wq->use_count++); ocs_hw_add_io_timed_wqe(hw, io); rc = hw_wq_write(io->wq, &io->wqe); if (rc >= 0) { /* non-negative return is success */ rc = 0; } else { /* failed to write wqe, remove from active wqe list */ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); io->xbusy = FALSE; ocs_hw_remove_io_timed_wqe(hw, io); } } return rc; } /** * @brief Send a raw frame * * @par Description * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent. * * @param hw Pointer to HW object. * @param hdr Pointer to a little endian formatted FC header. * @param sof Value to use as the frame SOF. * @param eof Value to use as the frame EOF. * @param payload Pointer to payload DMA buffer. * @param ctx Pointer to caller provided send frame context. * @param callback Callback function. * @param arg Callback function argument. * * @return Returns 0 on success, or a negative error code value on failure. */ ocs_hw_rtn_e ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload, ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) { int32_t rc; ocs_hw_wqe_t *wqe; uint32_t xri; hw_wq_t *wq; wqe = &ctx->wqe; /* populate the callback object */ ctx->hw = hw; /* Fetch and populate request tag */ ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg); if (ctx->wqcb == NULL) { ocs_log_err(hw->os, "can't allocate request tag\n"); return OCS_HW_RTN_NO_RESOURCES; } /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */ wq = ocs_varray_iter_next(hw->wq_class_array[1]); if (wq == NULL) { wq = hw->hw_wq[0]; } /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */ xri = wq->send_frame_io->indicator; /* Build the send frame WQE */ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload, payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index); if (rc) { ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); return OCS_HW_RTN_ERROR; } /* Write to WQ */ rc = hw_wq_write(wq, wqe); if (rc) { ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc); return OCS_HW_RTN_ERROR; } OCS_STAT(wq->use_count++); return OCS_HW_RTN_SUCCESS; } ocs_hw_rtn_e ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count) { if (sli_get_sgl_preregister(&hw->sli)) { ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n"); return OCS_HW_RTN_ERROR; } io->ovfl_sgl = sgl; io->ovfl_sgl_count = sgl_count; io->ovfl_io = NULL; return OCS_HW_RTN_SUCCESS; } static void ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) { /* Restore the default */ io->sgl = &io->def_sgl; io->sgl_count = io->def_sgl_count; /* * For skyhawk, we need to free the IO allocated for the chained * SGL. For all devices, clear the overflow fields on the IO. * * Note: For DIF IOs, we may be using the same XRI for the sec_hio and * the chained SGLs. If so, then we clear the ovfl_io field * when the sec_hio is freed. */ if (io->ovfl_io != NULL) { ocs_hw_io_free(hw, io->ovfl_io); io->ovfl_io = NULL; } /* Clear the overflow SGL */ io->ovfl_sgl = NULL; io->ovfl_sgl_count = 0; io->ovfl_lsp = NULL; } /** * @ingroup io * @brief Initialize the scatter gather list entries of an IO. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param type Type of IO (target read, target response, and so on). * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type) { sli4_sge_t *data = NULL; uint32_t i = 0; uint32_t skips = 0; if (!hw || !io) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", hw, io); return OCS_HW_RTN_ERROR; } /* Clear / reset the scatter-gather list */ io->sgl = &io->def_sgl; io->sgl_count = io->def_sgl_count; io->first_data_sge = 0; ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); io->n_sge = 0; io->sge_offset = 0; io->type = type; data = io->sgl->virt; /* * Some IO types have underlying hardware requirements on the order * of SGEs. Process all special entries here. */ switch (type) { case OCS_HW_IO_INITIATOR_READ: case OCS_HW_IO_INITIATOR_WRITE: case OCS_HW_IO_INITIATOR_NODATA: /* * No skips, 2 special for initiator I/Os * The addresses and length are written later */ /* setup command pointer */ data->sge_type = SLI4_SGE_TYPE_DATA; data++; /* setup response pointer */ data->sge_type = SLI4_SGE_TYPE_DATA; if (OCS_HW_IO_INITIATOR_NODATA == type) { data->last = TRUE; } data++; io->n_sge = 2; break; case OCS_HW_IO_TARGET_WRITE: #define OCS_TARGET_WRITE_SKIPS 2 skips = OCS_TARGET_WRITE_SKIPS; /* populate host resident XFER_RDY buffer */ data->sge_type = SLI4_SGE_TYPE_DATA; data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys); data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys); data->buffer_length = io->xfer_rdy.size; data++; skips--; io->n_sge = 1; break; case OCS_HW_IO_TARGET_READ: /* * For FCP_TSEND64, the first 2 entries are SKIP SGE's */ #define OCS_TARGET_READ_SKIPS 2 skips = OCS_TARGET_READ_SKIPS; break; case OCS_HW_IO_TARGET_RSP: /* * No skips, etc. for FCP_TRSP64 */ break; default: ocs_log_err(hw->os, "unsupported IO type %#x\n", type); return OCS_HW_RTN_ERROR; } /* * Write skip entries */ for (i = 0; i < skips; i++) { data->sge_type = SLI4_SGE_TYPE_SKIP; data++; } io->n_sge += skips; /* * Set last */ data->last = TRUE; return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Add a T10 PI seed scatter gather list entry. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info) { sli4_sge_t *data = NULL; sli4_diseed_sge_t *dif_seed; /* If no dif_info, or dif_oper is disabled, then just return success */ if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) { return OCS_HW_RTN_SUCCESS; } if (!hw || !io) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n", hw, io, dif_info); return OCS_HW_RTN_ERROR; } data = io->sgl->virt; data += io->n_sge; /* If we are doing T10 DIF add the DIF Seed SGE */ ocs_memset(data, 0, sizeof(sli4_diseed_sge_t)); dif_seed = (sli4_diseed_sge_t *)data; dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp; dif_seed->ref_tag_repl = dif_info->ref_tag_repl; dif_seed->app_tag_repl = dif_info->app_tag_repl; dif_seed->repl_app_tag = dif_info->repl_app_tag; if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) { dif_seed->atrt = dif_info->disable_app_ref_ffff; dif_seed->at = dif_info->disable_app_ffff; } dif_seed->sge_type = SLI4_SGE_TYPE_DISEED; /* Workaround for SKH (BZ157233) */ if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) { dif_seed->sge_type = SLI4_SGE_TYPE_SKIP; } dif_seed->app_tag_cmp = dif_info->app_tag_cmp; dif_seed->dif_blk_size = dif_info->blk_size; dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag; dif_seed->check_app_tag = dif_info->check_app_tag; dif_seed->check_ref_tag = dif_info->check_ref_tag; dif_seed->check_crc = dif_info->check_guard; dif_seed->new_ref_tag = dif_info->repl_ref_tag; switch(dif_info->dif_oper) { case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; break; case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; break; case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; break; case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; break; case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; break; case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; break; case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; break; case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; break; case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW: dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; break; default: ocs_log_err(hw->os, "unsupported DIF operation %#x\n", dif_info->dif_oper); return OCS_HW_RTN_ERROR; } /* * Set last, clear previous last */ data->last = TRUE; if (io->n_sge) { data[-1].last = FALSE; } io->n_sge++; return OCS_HW_RTN_SUCCESS; } static ocs_hw_rtn_e ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) { sli4_lsp_sge_t *lsp; /* fail if we're already pointing to the overflow SGL */ if (io->sgl == io->ovfl_sgl) { return OCS_HW_RTN_ERROR; } /* * For skyhawk, we can use another SGL to extend the SGL list. The * Chained entry must not be in the first 4 entries. * * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio. */ if (sli_get_sgl_preregister(&hw->sli) && io->def_sgl_count > 4 && io->ovfl_io == NULL && ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { io->ovfl_io = ocs_hw_io_alloc(hw); if (io->ovfl_io != NULL) { /* * Note: We can't call ocs_hw_io_register_sgl() here * because it checks that SGLs are not pre-registered * and for shyhawk, preregistered SGLs are required. */ io->ovfl_sgl = &io->ovfl_io->def_sgl; io->ovfl_sgl_count = io->ovfl_io->def_sgl_count; } } /* fail if we don't have an overflow SGL registered */ if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) { return OCS_HW_RTN_ERROR; } /* * Overflow, we need to put a link SGE in the last location of the current SGL, after * copying the the last SGE to the overflow SGL */ ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1]; lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1]; ocs_memset(lsp, 0, sizeof(*lsp)); if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { sli_skh_chain_sge_build(&hw->sli, (sli4_sge_t*)lsp, io->ovfl_io->indicator, 0, /* frag_num */ 0); /* offset */ } else { lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys); lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys); lsp->sge_type = SLI4_SGE_TYPE_LSP; lsp->last = 0; io->ovfl_lsp = lsp; io->ovfl_lsp->segment_length = sizeof(sli4_sge_t); } /* Update the current SGL pointer, and n_sgl */ io->sgl = io->ovfl_sgl; io->sgl_count = io->ovfl_sgl_count; io->n_sge = 1; return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Add a scatter gather list entry to an IO. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param addr Physical address. * @param length Length of memory pointed to by @c addr. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length) { sli4_sge_t *data = NULL; if (!hw || !io || !addr || !length) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p addr=%lx length=%u\n", hw, io, addr, length); return OCS_HW_RTN_ERROR; } if ((length != 0) && (io->n_sge + 1) > io->sgl_count) { if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); return OCS_HW_RTN_ERROR; } } if (length > sli_get_max_sge(&hw->sli)) { ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n", length, sli_get_max_sge(&hw->sli)); return OCS_HW_RTN_ERROR; } data = io->sgl->virt; data += io->n_sge; data->sge_type = SLI4_SGE_TYPE_DATA; data->buffer_address_high = ocs_addr32_hi(addr); data->buffer_address_low = ocs_addr32_lo(addr); data->buffer_length = length; data->data_offset = io->sge_offset; /* * Always assume this is the last entry and mark as such. * If this is not the first entry unset the "last SGE" * indication for the previous entry */ data->last = TRUE; if (io->n_sge) { data[-1].last = FALSE; } /* Set first_data_bde if not previously set */ if (io->first_data_sge == 0) { io->first_data_sge = io->n_sge; } io->sge_offset += length; io->n_sge++; /* Update the linked segment length (only executed after overflow has begun) */ if (io->ovfl_lsp != NULL) { io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t); } return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Add a T10 DIF scatter gather list entry to an IO. * * @param hw Hardware context. * @param io Previously-allocated HW IO object. * @param addr DIF physical address. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr) { sli4_dif_sge_t *data = NULL; if (!hw || !io || !addr) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p addr=%lx\n", hw, io, addr); return OCS_HW_RTN_ERROR; } if ((io->n_sge + 1) > hw->config.n_sgl) { if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) { ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); return OCS_HW_RTN_ERROR; } } data = io->sgl->virt; data += io->n_sge; data->sge_type = SLI4_SGE_TYPE_DIF; /* Workaround for SKH (BZ157233) */ if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) { data->sge_type = SLI4_SGE_TYPE_SKIP; } data->buffer_address_high = ocs_addr32_hi(addr); data->buffer_address_low = ocs_addr32_lo(addr); /* * Always assume this is the last entry and mark as such. * If this is not the first entry unset the "last SGE" * indication for the previous entry */ data->last = TRUE; if (io->n_sge) { data[-1].last = FALSE; } io->n_sge++; return OCS_HW_RTN_SUCCESS; } /** * @ingroup io * @brief Abort a previously-started IO. * * @param hw Hardware context. * @param io_to_abort The IO to abort. * @param send_abts Boolean to have the hardware automatically * generate an ABTS. * @param cb Function call upon completion of the abort (may be NULL). * @param arg Argument to pass to abort completion function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg) { sli4_abort_type_e atype = SLI_ABORT_MAX; uint32_t id = 0, mask = 0; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; hw_wq_callback_t *wqcb; if (!hw || !io_to_abort) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", hw, io_to_abort); return OCS_HW_RTN_ERROR; } if (hw->state != OCS_HW_STATE_ACTIVE) { ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n", hw->state); return OCS_HW_RTN_ERROR; } /* take a reference on IO being aborted */ if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) { /* command no longer active */ ocs_log_test(hw ? hw->os : NULL, "io not active xri=0x%x tag=0x%x\n", io_to_abort->indicator, io_to_abort->reqtag); return OCS_HW_RTN_IO_NOT_ACTIVE; } /* non-port owned XRI checks */ /* Must have a valid WQ reference */ if (io_to_abort->wq == NULL) { ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n", io_to_abort->indicator); ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ return OCS_HW_RTN_IO_NOT_ACTIVE; } /* Validation checks complete; now check to see if already being aborted */ ocs_lock(&hw->io_abort_lock); if (io_to_abort->abort_in_progress) { ocs_unlock(&hw->io_abort_lock); ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ ocs_log_debug(hw ? hw->os : NULL, "io already being aborted xri=0x%x tag=0x%x\n", io_to_abort->indicator, io_to_abort->reqtag); return OCS_HW_RTN_IO_ABORT_IN_PROGRESS; } /* * This IO is not already being aborted. Set flag so we won't try to * abort it again. After all, we only have one abort_done callback. */ io_to_abort->abort_in_progress = 1; ocs_unlock(&hw->io_abort_lock); /* * If we got here, the possibilities are: * - host owned xri * - io_to_abort->wq_index != UINT32_MAX * - submit ABORT_WQE to same WQ * - port owned xri: * - rxri: io_to_abort->wq_index == UINT32_MAX * - submit ABORT_WQE to any WQ * - non-rxri * - io_to_abort->index != UINT32_MAX * - submit ABORT_WQE to same WQ * - io_to_abort->index == UINT32_MAX * - submit ABORT_WQE to any WQ */ io_to_abort->abort_done = cb; io_to_abort->abort_arg = arg; atype = SLI_ABORT_XRI; id = io_to_abort->indicator; /* Allocate a request tag for the abort portion of this IO */ wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort); if (wqcb == NULL) { ocs_log_err(hw->os, "can't allocate request tag\n"); return OCS_HW_RTN_NO_RESOURCES; } io_to_abort->abort_reqtag = wqcb->instance_index; /* * If the wqe is on the pending list, then set this wqe to be * aborted when the IO's wqe is removed from the list. */ if (io_to_abort->wq != NULL) { sli_queue_lock(io_to_abort->wq->queue); if (ocs_list_on_list(&io_to_abort->wqe.link)) { io_to_abort->wqe.abort_wqe_submit_needed = 1; io_to_abort->wqe.send_abts = send_abts; io_to_abort->wqe.id = id; io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag; sli_queue_unlock(io_to_abort->wq->queue); return 0; } sli_queue_unlock(io_to_abort->wq->queue); } if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask, io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) { ocs_log_err(hw->os, "ABORT WQE error\n"); io_to_abort->abort_reqtag = UINT32_MAX; ocs_hw_reqtag_free(hw, wqcb); rc = OCS_HW_RTN_ERROR; } if (OCS_HW_RTN_SUCCESS == rc) { if (io_to_abort->wq == NULL) { io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort); ocs_hw_assert(io_to_abort->wq != NULL); } /* ABORT_WQE does not actually utilize an XRI on the Port, * therefore, keep xbusy as-is to track the exchange's state, * not the ABORT_WQE's state */ rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe); if (rc > 0) { /* non-negative return is success */ rc = 0; /* can't abort an abort so skip adding to timed wqe list */ } } if (OCS_HW_RTN_SUCCESS != rc) { ocs_lock(&hw->io_abort_lock); io_to_abort->abort_in_progress = 0; ocs_unlock(&hw->io_abort_lock); ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ } return rc; } /** * @ingroup io * @brief Return the OX_ID/RX_ID of the IO. * * @param hw Hardware context. * @param io HW IO object. * * @return Returns X_ID on success, or -1 on failure. */ int32_t ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io) { if (!hw || !io) { ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", hw, io); return -1; } return io->indicator; } typedef struct ocs_hw_fw_write_cb_arg { ocs_hw_fw_cb_t cb; void *arg; } ocs_hw_fw_write_cb_arg_t; typedef struct ocs_hw_sfp_cb_arg { ocs_hw_sfp_cb_t cb; void *arg; ocs_dma_t payload; } ocs_hw_sfp_cb_arg_t; typedef struct ocs_hw_temp_cb_arg { ocs_hw_temp_cb_t cb; void *arg; } ocs_hw_temp_cb_arg_t; typedef struct ocs_hw_link_stat_cb_arg { ocs_hw_link_stat_cb_t cb; void *arg; } ocs_hw_link_stat_cb_arg_t; typedef struct ocs_hw_host_stat_cb_arg { ocs_hw_host_stat_cb_t cb; void *arg; } ocs_hw_host_stat_cb_arg_t; typedef struct ocs_hw_dump_get_cb_arg { ocs_hw_dump_get_cb_t cb; void *arg; void *mbox_cmd; } ocs_hw_dump_get_cb_arg_t; typedef struct ocs_hw_dump_clear_cb_arg { ocs_hw_dump_clear_cb_t cb; void *arg; void *mbox_cmd; } ocs_hw_dump_clear_cb_arg_t; /** * @brief Write a portion of a firmware image to the device. * * @par Description * Calls the correct firmware write function based on the device type. * * @param hw Hardware context. * @param dma DMA structure containing the firmware image chunk. * @param size Size of the firmware image chunk. * @param offset Offset, in bytes, from the beginning of the firmware image. * @param last True if this is the last chunk of the image. * Causes the image to be committed to flash. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) { if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg); } else { /* Write firmware_write for BE3/Skyhawk not supported */ return -1; } } /** * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer). * * @par Description * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a * firmware image chunk, and then sends the command with ocs_hw_command(). On completion, * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox * and to signal the caller that the write has completed. * * @param hw Hardware context. * @param dma DMA structure containing the firmware image chunk. * @param size Size of the firmware image chunk. * @param offset Offset, in bytes, from the beginning of the firmware image. * @param last True if this is the last chunk of the image. Causes the image to be committed to flash. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *mbxdata; ocs_hw_fw_write_cb_arg_t *cb_arg; int noc=0; /* No Commit bit - set to 1 for testing */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last, size, offset, "/prg/", dma)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); } return rc; } /** * @brief Called when the WRITE OBJECT command completes. * * @par Description * Get the number of bytes actually written out of the response, free the mailbox * that was malloc'd by ocs_hw_firmware_write(), * then call the callback and pass the status and bytes written. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is void cb(int32_t status, uint32_t bytes_written). * * @return Returns 0. */ static int32_t ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed); ocs_hw_fw_write_cb_arg_t *cb_arg = arg; uint32_t bytes_written; uint16_t mbox_status; uint32_t change_status; bytes_written = wr_obj_rsp->actual_write_length; mbox_status = mbox_rsp->hdr.status; change_status = wr_obj_rsp->change_status; ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_status) { status = mbox_status; } cb_arg->cb(status, bytes_written, change_status, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); } return 0; } /** * @brief Called when the READ_TRANSCEIVER_DATA command completes. * * @par Description * Get the number of bytes read out of the response, free the mailbox that was malloc'd * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg). * * @return Returns 0. */ static int32_t ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_sfp_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = NULL; sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL; uint32_t bytes_written; if (cb_arg) { payload = &(cb_arg->payload); if (cb_arg->cb) { mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt; bytes_written = mbox_rsp->hdr.response_length; if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg); } ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @ingroup io * @brief Function to retrieve the SFP information. * * @param hw Hardware context. * @param page The page of SFP data to retrieve (0xa0 or 0xa2). * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_sfp_cb_arg_t *cb_arg; uint8_t *mbxdata; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; /* payload holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t), OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } /* Send the HW command */ if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n", rc); ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); } return rc; } /** * @brief Function to retrieve the temperature information. * * @param hw Hardware context. * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_temp_cb_arg_t *cb_arg; uint8_t *mbxdata; mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE, SLI4_WKI_TAG_SAT_TEM)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "DUMP_TYPE4 failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); } return rc; } /** * @brief Called when the DUMP command completes. * * @par Description * Get the temperature data out of the response, free the mailbox that was malloc'd * by ocs_hw_get_temperature(), then call the callback and pass the status and data. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is defined by ocs_hw_temp_cb_t. * * @return Returns 0. */ static int32_t ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe; ocs_hw_temp_cb_arg_t *cb_arg = arg; uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */ uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/ uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */ uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */ uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */ uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */ if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, curr_temp, crit_temp_thrshld, warn_temp_thrshld, norm_temp_thrshld, fan_off_thrshld, fan_on_thrshld, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Function to retrieve the link statistics. * * @param hw Hardware context. * @param req_ext_counters If TRUE, then the extended counters will be requested. * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. * @param clear_all_counters If TRUE, the counters will be cleared. * @param cb Function call upon completion of sending the data (may be NULL). * @param arg Argument to pass to IO completion function. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_link_stats(ocs_hw_t *hw, uint8_t req_ext_counters, uint8_t clear_overflow_flags, uint8_t clear_all_counters, ocs_hw_link_stat_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_link_stat_cb_arg_t *cb_arg; uint8_t *mbxdata; mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE, req_ext_counters, clear_overflow_flags, clear_all_counters)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_LINK_STATS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); } return rc; } /** * @brief Called when the READ_LINK_STAT command completes. * * @par Description * Get the counters out of the response, free the mailbox that was malloc'd * by ocs_hw_get_link_stats(), then call the callback and pass the status and data. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is defined by ocs_hw_link_stat_cb_t. * * @return Returns 0. */ static int32_t ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe; ocs_hw_link_stat_cb_arg_t *cb_arg = arg; ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX]; uint32_t num_counters = (mbox_rsp->gec ? 20 : 13); ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) * OCS_HW_LINK_STAT_MAX); counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of; counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of; counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of; counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of; counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of; counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of; counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of; counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of; counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of; counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of; counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of; counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of; counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of; counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of; counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of; counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count; counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count; counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count; counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count; counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count; counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count; counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count; counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count; counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit; counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit; counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit; counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit; counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count; counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count; counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count; counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count; counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, num_counters, counts, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Function to retrieve the link and host statistics. * * @param hw Hardware context. * @param cc clear counters, if TRUE all counters will be cleared. * @param cb Function call upon completion of receiving the data. * @param arg Argument to pass to pointer fc hosts statistics structure. * * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. */ ocs_hw_rtn_e ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_host_stat_cb_arg_t *cb_arg; uint8_t *mbxdata; mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; /* Send the HW command to get the host stats */ if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_HOST_STATS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); } return rc; } /** * @brief Called when the READ_STATUS command completes. * * @par Description * Get the counters out of the response, free the mailbox that was malloc'd * by ocs_hw_get_host_stats(), then call the callback and pass * the status and data. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is defined by * ocs_hw_host_stat_cb_t. * * @return Returns 0. */ static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe; ocs_hw_host_stat_cb_arg_t *cb_arg = arg; ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX]; uint32_t num_counters = OCS_HW_HOST_STAT_MAX; ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) * OCS_HW_HOST_STAT_MAX); counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count; counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count; counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count; counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count; counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count; counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count; counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator; counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder; counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count; counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count; counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count; counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count; counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count; counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, num_counters, counts, cb_arg->arg); } ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief HW link configuration enum to the CLP string value mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the CLP string that is used * in the DMTF_CLP_CMD mailbox command. */ typedef struct ocs_hw_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; const char *clp_str; } ocs_hw_linkcfg_map_t; /** * @brief Mapping from the HW linkcfg enum to the CLP command value * string. */ static ocs_hw_linkcfg_map_t linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"}, {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"}, {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"}, {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"}, {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"}, {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"}, {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}}; /** * @brief HW link configuration enum to Skyhawk link config ID mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the link config ID numbers used by Skyhawk */ typedef struct ocs_hw_skyhawk_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; uint32_t config_id; } ocs_hw_skyhawk_linkcfg_map_t; /** * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs */ static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, 0x0a}, {OCS_HW_LINKCFG_1X40G, 0x09}, }; /** * @brief Helper function for getting the HW linkcfg enum from the CLP * string value * * @param clp_str CLP string value from OEMELX_LinkConfig. * * @return Returns the HW linkcfg enum corresponding to clp_str. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_clp(const char *clp_str) { uint32_t i; for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) { return linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Helper function for getting the CLP string value from the HW * linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the OEMELX_LinkConfig CLP string value corresponding to * given linkcfg. */ static const char * ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { if (linkcfg_map[i].linkcfg == linkcfg) { return linkcfg_map[i].clp_str; } } return NULL; } /** * @brief Helper function for getting a Skyhawk link config ID from the HW * linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the Skyhawk link config ID corresponding to * given linkcfg. */ static uint32_t ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) { return skyhawk_linkcfg_map[i].config_id; } } return 0; } /** * @brief Helper function for getting the HW linkcfg enum from a * Skyhawk config ID. * * @param config_id Skyhawk link config ID. * * @return Returns the HW linkcfg enum corresponding to config_id. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_config_id(const uint32_t config_id) { uint32_t i; for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { if (skyhawk_linkcfg_map[i].config_id == config_id) { return skyhawk_linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Link configuration callback argument. */ typedef struct ocs_hw_linkcfg_cb_arg_s { ocs_hw_port_control_cb_t cb; void *arg; uint32_t opts; int32_t status; ocs_dma_t dma_cmd; ocs_dma_t dma_resp; uint32_t result_len; } ocs_hw_linkcfg_cb_arg_t; /** * @brief Set link configuration. * * @param hw Hardware context. * @param value Link configuration enum to which the link configuration is * set. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { if (!sli_link_is_configurable(&hw->sli)) { ocs_log_debug(hw->os, "Function not supported\n"); return OCS_HW_RTN_ERROR; } if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg); } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg); } else { ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); return OCS_HW_RTN_ERROR; } } /** * @brief Set link configuration for Lancer * * @param hw Hardware context. * @param value Link configuration enum to which the link configuration is * set. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; ocs_hw_linkcfg_cb_arg_t *cb_arg; const char *value_str = NULL; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* translate ocs_hw_linkcfg_e to CLP string */ value_str = ocs_hw_clp_from_linkcfg(value); /* allocate memory for callback argument */ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); return OCS_HW_RTN_NO_MEMORY; } ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str); /* allocate DMA for command */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); /* allocate DMA for response */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->opts = opts; rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { /* if failed, or polling, free memory here; if success and not * polling, will free in callback function */ if (rc) { ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", (char *)cb_arg->dma_cmd.virt); } ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_dma_free(hw->os, &cb_arg->dma_resp); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } return rc; } /** * @brief Callback for ocs_hw_set_linkcfg_skyhawk * * @param hw Hardware context. * @param status Status from the RECONFIG_GET_LINK_INFO command. * @param mqe Mailbox response structure. * @param arg Pointer to a callback argument. * * @return none */ static void ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; if (status) { ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status); } /* invoke callback */ if (cb_arg->cb) { cb_arg->cb(status, 0, cb_arg->arg); } /* if polling, will free memory in calling function */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } /** * @brief Set link configuration for a Skyhawk * * @param hw Hardware context. * @param value Link configuration enum to which the link configuration is * set. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { uint8_t *mbxdata; ocs_hw_linkcfg_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t config_id; config_id = ocs_hw_config_id_from_linkcfg(value); if (config_id == 0) { ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value); return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else if (opts == OCS_CMD_POLL) { /* if we're polling we have to call the callback here. */ ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else { /* We weren't poling, so the callback got called */ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); } return rc; } /** * @brief Get link configuration. * * @param hw Hardware context. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { if (!sli_link_is_configurable(&hw->sli)) { ocs_log_debug(hw->os, "Function not supported\n"); return OCS_HW_RTN_ERROR; } if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))){ return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg); } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg); } else { ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); return OCS_HW_RTN_ERROR; } } /** * @brief Get link configuration for a Lancer * * @param hw Hardware context. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; ocs_hw_linkcfg_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* allocate memory for callback argument */ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); return OCS_HW_RTN_NO_MEMORY; } ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig"); /* allocate DMA for command */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } /* copy CLP command to DMA command */ ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); /* allocate DMA for response */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->opts = opts; rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { /* if failed or polling, free memory here; if not polling and success, * will free in callback function */ if (rc) { ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", (char *)cb_arg->dma_cmd.virt); } ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_dma_free(hw->os, &cb_arg->dma_resp); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } return rc; } /** * @brief Get the link configuration callback. * * @param hw Hardware context. * @param status Status from the RECONFIG_GET_LINK_INFO command. * @param mqe Mailbox response structure. * @param arg Pointer to a callback argument. * * @return none */ static void ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt; ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA; if (status) { ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status); } else { /* Call was successful */ value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id); } /* invoke callback */ if (cb_arg->cb) { cb_arg->cb(status, value, cb_arg->arg); } /* if polling, will free memory in calling function */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } /** * @brief Get link configuration for a Skyhawk. * * @param hw Hardware context. * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). * @param cb Callback function to invoke following mbx command. * @param arg Callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) { uint8_t *mbxdata; ocs_hw_linkcfg_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->opts = opts; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else if (opts == OCS_CMD_POLL) { /* if we're polling we have to call the callback here. */ ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); } else { /* We weren't poling, so the callback got called */ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); } return rc; } /** * @brief Sets the DIF seed value. * * @param hw Hardware context. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_dif_seed_t seed_param; ocs_memset(&seed_param, 0, sizeof(seed_param)); seed_param.seed = hw->config.dif_seed; /* send set_features command */ if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_DIF_SEED, 4, (uint32_t*)&seed_param)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_debug(hw->os, "DIF seed set to 0x%x\n", hw->config.dif_seed); } } else { ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); rc = OCS_HW_RTN_ERROR; } return rc; } /** * @brief Sets the DIF mode value. * * @param hw Hardware context. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_t10_pi_mem_model_t mode_param; ocs_memset(&mode_param, 0, sizeof(mode_param)); mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1); /* send set_features command */ if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_DIF_MEMORY_MODE, sizeof(mode_param), (uint32_t*)&mode_param)) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_test(hw->os, "DIF mode set to %s\n", (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate")); } } else { ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); rc = OCS_HW_RTN_ERROR; } return rc; } static void ocs_hw_watchdog_timer_cb(void *arg) { ocs_hw_t *hw = (ocs_hw_t *)arg; ocs_hw_config_watchdog_timer(hw); return; } static void ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { uint16_t timeout = hw->watchdog_timeout; if (status != 0) { ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status); } else { if(timeout != 0) { /* keeping callback 500ms before timeout to keep heartbeat alive */ ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) ); }else { ocs_del_timer(&hw->watchdog_timer); } } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return; } /** * @brief Set configuration parameters for watchdog timer feature. * * @param hw Hardware context. * @param timeout Timeout for watchdog timer in seconds * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout); rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL); if (rc) { ocs_free(hw->os, buf, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc); } return rc; } /** * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature. * * @param hw Hardware context. * @param buf Pointer to a mailbox buffer area. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; sli4_req_common_set_features_xfer_rdy_t10pi_t param; ocs_memset(¶m, 0, sizeof(param)); param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1); param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0); param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1); param.app_tag = hw->config.auto_xfer_rdy_app_tag_value; param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip; switch (hw->config.auto_xfer_rdy_p_type) { case 1: param.p_type = 0; break; case 3: param.p_type = 2; break; default: ocs_log_err(hw->os, "unsupported p_type %d\n", hw->config.auto_xfer_rdy_p_type); return OCS_HW_RTN_ERROR; } /* build the set_features command */ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI, sizeof(param), ¶m); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n", param.rtc, param.atv, param.p_type, param.app_tag, param.blk_size); } return rc; } /** * @brief enable sli port health check * * @param hw Hardware context. * @param buf Pointer to a mailbox buffer area. * @param query current status of the health check feature enabled/disabled * @param enable if 1: enable 0: disable * @param buf Pointer to a mailbox buffer area. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_health_check_t param; ocs_memset(¶m, 0, sizeof(param)); param.hck = enable; param.qry = query; /* build the set_features command */ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), ¶m); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } else { ocs_log_test(hw->os, "SLI Port Health Check is enabled \n"); } return rc; } /** * @brief Set FTD transfer hint feature * * @param hw Hardware context. * @param fdt_xfer_hint size in bytes where read requests are segmented. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; sli4_req_common_set_features_set_fdt_xfer_hint_t param; ocs_memset(¶m, 0, sizeof(param)); param.fdt_xfer_hint = fdt_xfer_hint; /* build the set_features command */ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), ¶m); rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc); } else { ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint); } return rc; } /** * @brief Get the link configuration callback. * * @param hw Hardware context. * @param status Status from the DMTF CLP command. * @param result_len Length, in bytes, of the DMTF CLP result. * @param arg Pointer to a callback argument. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg) { int32_t rval; char retdata_str[64]; ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA; if (status) { ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status); } else { /* parse CLP response to get return data */ rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str, sizeof(retdata_str), cb_arg->dma_resp.virt, result_len); if (rval <= 0) { ocs_log_err(hw->os, "failed to get retdata %d\n", result_len); } else { /* translate string into hw enum */ linkcfg = ocs_hw_linkcfg_from_clp(retdata_str); } } /* invoke callback */ if (cb_arg->cb) { cb_arg->cb(status, linkcfg, cb_arg->arg); } /* if polling, will free memory in calling function */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_dma_free(hw->os, &cb_arg->dma_cmd); ocs_dma_free(hw->os, &cb_arg->dma_resp); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } /** * @brief Set the Lancer dump location * @par Description * This function tells a Lancer chip to use a specific DMA * buffer as a dump location rather than the internal flash. * * @param hw Hardware context. * @param num_buffers The number of DMA buffers to hold the dump (1..n). * @param dump_buffers DMA buffers to hold the dump. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ ocs_hw_rtn_e ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb) { uint8_t bus, dev, func; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint8_t buf[SLI4_BMBX_SIZE]; /* * Make sure the FW is new enough to support this command. If the FW * is too old, the FW will UE. */ if (hw->workaround.disable_dump_loc) { ocs_log_test(hw->os, "FW version is too old for this feature\n"); return OCS_HW_RTN_ERROR; } /* This command is only valid for physical port 0 */ ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); if (fdb == 0 && func != 0) { ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n", func); return OCS_HW_RTN_ERROR; } /* * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers, * We must allocate a SGL list and then pass the address of the list to the chip. */ if (num_buffers > 1) { uint32_t sge_size = num_buffers * sizeof(sli4_sge_t); sli4_sge_t *sge; uint32_t i; if (hw->dump_sges.size < sge_size) { ocs_dma_free(hw->os, &hw->dump_sges); if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "SGE DMA allocation failed\n"); return OCS_HW_RTN_NO_MEMORY; } } /* build the SGE list */ ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size); hw->dump_sges.len = sge_size; sge = hw->dump_sges.virt; for (i = 0; i < num_buffers; i++) { sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys); sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys); sge[i].last = (i == num_buffers - 1 ? 1 : 0); sge[i].buffer_length = dump_buffers[i].size; } rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, SLI4_BMBX_SIZE, FALSE, TRUE, &hw->dump_sges, fdb); } else { dump_buffers->len = dump_buffers->size; rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, SLI4_BMBX_SIZE, FALSE, FALSE, dump_buffers, fdb); } if (rc) { rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); if (rc) { ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); } } else { ocs_log_err(hw->os, "sli_cmd_common_set_dump_location failed\n"); rc = OCS_HW_RTN_ERROR; } return rc; } /** * @brief Set the Ethernet license. * * @par Description * This function sends the appropriate mailbox command (DMTF * CLP) to set the Ethernet license to the given license value. * Since it is used during the time of ocs_hw_init(), the mailbox * command is sent via polling (the BMBX route). * * @param hw Hardware context. * @param license 32-bit license value. * * @return Returns OCS_HW_RTN_SUCCESS on success. */ static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; ocs_dma_t dma_cmd; ocs_dma_t dma_resp; /* only for lancer right now */ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license); /* allocate DMA for command */ if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); return OCS_HW_RTN_NO_MEMORY; } ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1); ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd)); /* allocate DMA for response */ if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { ocs_log_err(hw->os, "malloc failed\n"); ocs_dma_free(hw->os, &dma_cmd); return OCS_HW_RTN_NO_MEMORY; } /* send DMTF CLP command mbx and poll */ if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) { ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt); rc = OCS_HW_RTN_ERROR; } ocs_dma_free(hw->os, &dma_cmd); ocs_dma_free(hw->os, &dma_resp); return rc; } /** * @brief Callback argument structure for the DMTF CLP commands. */ typedef struct ocs_hw_clp_cb_arg_s { ocs_hw_dmtf_clp_cb_t cb; ocs_dma_t *dma_resp; int32_t status; uint32_t opts; void *arg; } ocs_hw_clp_cb_arg_t; /** * @brief Execute the DMTF CLP command. * * @param hw Hardware context. * @param dma_cmd DMA buffer containing the CLP command. * @param dma_resp DMA buffer that will contain the response (if successful). * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL). * @param cb Callback function. * @param arg Callback argument. * * @return Returns the number of bytes written to the response * buffer on success, or a negative value if failed. */ static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; ocs_hw_clp_cb_arg_t *cb_arg; uint8_t *mbxdata; /* allocate DMA for mailbox */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* allocate memory for callback argument */ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->dma_resp = dma_resp; cb_arg->opts = opts; /* Send the HW command */ if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE, dma_cmd, dma_resp)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg); if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) { /* if we're polling, copy response and invoke callback to * parse result */ ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg); /* set rc to resulting or "parsed" status */ rc = cb_arg->status; } /* if failed, or polling, free memory here */ if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "ocs_hw_command failed\n"); } ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } } else { ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n"); rc = OCS_HW_RTN_ERROR; ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); } return rc; } /** * @brief Called when the DMTF CLP command completes. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback argument. * * @return None. * */ static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { int32_t cb_status = 0; sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed; ocs_hw_clp_cb_arg_t *cb_arg = arg; uint32_t result_len = 0; int32_t stat_len; char stat_str[8]; /* there are several status codes here, check them all and condense * into a single callback status */ if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) { ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n", status, mbox_rsp->hdr.status, clp_rsp->hdr.status, clp_rsp->hdr.additional_status, clp_rsp->clp_status, clp_rsp->clp_detailed_status); if (status) { cb_status = status; } else if (mbox_rsp->hdr.status) { cb_status = mbox_rsp->hdr.status; } else { cb_status = clp_rsp->clp_status; } } else { result_len = clp_rsp->resp_length; } if (cb_status) { goto ocs_hw_cb_dmtf_clp_done; } if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) { ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n", cb_arg->dma_resp->size, result_len); cb_status = -1; goto ocs_hw_cb_dmtf_clp_done; } /* parse CLP response to get status */ stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str, sizeof(stat_str), cb_arg->dma_resp->virt, result_len); if (stat_len <= 0) { ocs_log_test(hw->os, "failed to get status %d\n", stat_len); cb_status = -1; goto ocs_hw_cb_dmtf_clp_done; } if (ocs_strcmp(stat_str, "0") != 0) { ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str); cb_status = -1; goto ocs_hw_cb_dmtf_clp_done; } ocs_hw_cb_dmtf_clp_done: /* save status in cb_arg for callers with NULL cb's + polling */ cb_arg->status = cb_status; if (cb_arg->cb) { cb_arg->cb(hw, cb_status, result_len, cb_arg->arg); } /* if polling, caller will free memory */ if (cb_arg->opts != OCS_CMD_POLL) { ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); } } /** * @brief Parse the CLP result and get the value corresponding to the given * keyword. * * @param hw Hardware context. * @param keyword CLP keyword for which the value is returned. * @param value Location to which the resulting value is copied. * @param value_len Length of the value parameter. * @param resp Pointer to the response buffer that is searched * for the keyword and value. * @param resp_len Length of response buffer passed in. * * @return Returns the number of bytes written to the value * buffer on success, or a negative vaue on failure. */ static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len) { char *start = NULL; char *end = NULL; /* look for specified keyword in string */ start = ocs_strstr(resp, keyword); if (start == NULL) { ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n", keyword); return -1; } /* now look for '=' and go one past */ start = ocs_strchr(start, '='); if (start == NULL) { ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n", keyword); return -1; } start++; /* \r\n terminates value */ end = ocs_strstr(start, "\r\n"); if (end == NULL) { ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n", keyword); return -1; } /* make sure given result array is big enough */ if ((end - start + 1) > value_len) { ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n", value_len, (end-start)); return -1; } ocs_strncpy(value, start, (end - start)); value[end-start] = '\0'; return (end-start+1); } /** * @brief Cause chip to enter an unrecoverable error state. * * @par Description * Cause chip to enter an unrecoverable error state. This is * used when detecting unexpected FW behavior so that the FW can be * hwted from the driver as soon as the error is detected. * * @param hw Hardware context. * @param dump Generate dump as part of reset. * * @return Returns 0 on success, or a non-zero value on failure. * */ ocs_hw_rtn_e ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; if (sli_raise_ue(&hw->sli, dump) != 0) { rc = OCS_HW_RTN_ERROR; } else { if (hw->state != OCS_HW_STATE_UNINITIALIZED) { hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; } } return rc; } /** * @brief Called when the OBJECT_GET command completes. * * @par Description * Get the number of bytes actually written out of the response, free the mailbox * that was malloc'd by ocs_hw_dump_get(), then call the callback * and pass the status and bytes read. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is void cb(int32_t status, uint32_t bytes_read). * * @return Returns 0. */ static int32_t ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed; ocs_hw_dump_get_cb_arg_t *cb_arg = arg; uint32_t bytes_read; uint8_t eof; bytes_read = rd_obj_rsp->actual_read_length; eof = rd_obj_rsp->eof; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, bytes_read, eof, cb_arg->arg); } ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); } return 0; } /** * @brief Read a dump image to the host. * * @par Description * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a * dump image chunk, then sends the command with the ocs_hw_command(). On completion, * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox * and signal the caller that the read has completed. * * @param hw Hardware context. * @param dma DMA structure to transfer the dump chunk into. * @param size Size of the dump chunk. * @param offset Offset, in bytes, from the beginning of the dump. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *mbxdata; ocs_hw_dump_get_cb_arg_t *cb_arg; uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } if (1 != sli_dump_is_present(&hw->sli)) { ocs_log_test(hw->os, "No dump is present\n"); return OCS_HW_RTN_ERROR; } if (1 == sli_reset_required(&hw->sli)) { ocs_log_test(hw->os, "device reset required\n"); return OCS_HW_RTN_ERROR; } mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->mbox_cmd = mbxdata; if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, size, offset, "/dbg/dump.bin", dma)) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg); if (rc == 0 && opts == OCS_CMD_POLL) { ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg); } } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); } return rc; } /** * @brief Called when the OBJECT_DELETE command completes. * * @par Description * Free the mailbox that was malloc'd * by ocs_hw_dump_clear(), then call the callback and pass the status. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * The callback function prototype is void cb(int32_t status, void *arg). * * @return Returns 0. */ static int32_t ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_dump_clear_cb_arg_t *cb_arg = arg; sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; if (cb_arg) { if (cb_arg->cb) { if ((status == 0) && mbox_rsp->hdr.status) { status = mbox_rsp->hdr.status; } cb_arg->cb(status, cb_arg->arg); } ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); } return 0; } /** * @brief Clear a dump image from the device. * * @par Description * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear * the dump, then sends the command with ocs_hw_command(). On completion, * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox * and to signal the caller that the write has completed. * * @param hw Hardware context. * @param cb Pointer to a callback function that is called when the command completes. * The callback function prototype is * void cb(int32_t status, uint32_t bytes_written, void *arg). * @param arg Pointer to be passed to the callback function. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg) { ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; uint8_t *mbxdata; ocs_hw_dump_clear_cb_arg_t *cb_arg; uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); return OCS_HW_RTN_ERROR; } mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = arg; cb_arg->mbox_cmd = mbxdata; if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, "/dbg/dump.bin")) { rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg); if (rc == 0 && opts == OCS_CMD_POLL) { ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg); } } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); } return rc; } typedef struct ocs_hw_get_port_protocol_cb_arg_s { ocs_get_port_protocol_cb_t cb; void *arg; uint32_t pci_func; ocs_dma_t payload; } ocs_hw_get_port_protocol_cb_arg_t; /** * @brief Called for the completion of get_port_profile for a * user request. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = &(cb_arg->payload); sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; ocs_hw_port_protocol_e port_protocol; int num_descriptors; sli4_resource_descriptor_v1_t *desc_p; sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; int i; port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; num_descriptors = response->desc_count; desc_p = (sli4_resource_descriptor_v1_t *)response->desc; for (i=0; idescriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; if (pcie_desc_p->pf_number == cb_arg->pci_func) { switch(pcie_desc_p->pf_type) { case 0x02: port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; break; case 0x04: port_protocol = OCS_HW_PORT_PROTOCOL_FCOE; break; case 0x10: port_protocol = OCS_HW_PORT_PROTOCOL_FC; break; default: port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; break; } } } desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); } if (cb_arg->cb) { cb_arg->cb(status, port_protocol, cb_arg->arg); } ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @ingroup io * @brief Get the current port protocol. * @par Description * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param pci_func PCI function to query for current protocol. * @param cb Callback function to be called when the command completes. * @param ul_arg An argument that is passed to the callback function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ ocs_hw_rtn_e ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func, ocs_get_port_protocol_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_port_protocol_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; cb_arg->pci_func = pci_func; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); ocs_dma_free(hw->os, &cb_arg->payload); } return rc; } typedef struct ocs_hw_set_port_protocol_cb_arg_s { ocs_set_port_protocol_cb_t cb; void *arg; ocs_dma_t payload; uint32_t new_protocol; uint32_t pci_func; } ocs_hw_set_port_protocol_cb_arg_t; /** * @brief Called for the completion of set_port_profile for a * user request. * * @par Description * This is the second of two callbacks for the set_port_protocol * function. The set operation is a read-modify-write. This * callback is called when the write (SET_PROFILE_CONFIG) * completes. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return 0 on success, non-zero otherwise */ static int32_t ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; if (cb_arg->cb) { cb_arg->cb( status, cb_arg->arg); } ocs_dma_free(hw->os, &(cb_arg->payload)); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); return 0; } /** * @brief Called for the completion of set_port_profile for a * user request. * * @par Description * This is the first of two callbacks for the set_port_protocol * function. The set operation is a read-modify-write. This * callback is called when the read completes * (GET_PROFILE_CONFG). It will updated the resource * descriptors, then queue the write (SET_PROFILE_CONFIG). * * On entry there are three memory areas that were allocated by * ocs_hw_set_port_protocol. If a failure is detected in this * function those need to be freed. If this function succeeds * it allocates three more areas. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value otherwise. */ static int32_t ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = &(cb_arg->payload); sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; int num_descriptors; sli4_resource_descriptor_v1_t *desc_p; sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; int i; ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg; ocs_hw_port_protocol_e new_protocol; uint8_t *dst; sli4_isap_resouce_descriptor_v1_t *isap_desc_p; uint8_t *mbxdata; int pci_descriptor_count; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; int num_fcoe_ports = 0; int num_iscsi_ports = 0; new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol; num_descriptors = response->desc_count; /* Count PCI descriptors */ pci_descriptor_count = 0; desc_p = (sli4_resource_descriptor_v1_t *)response->desc; for (i=0; idescriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { ++pci_descriptor_count; } desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); if (new_cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } new_cb_arg->cb = cb_arg->cb; new_cb_arg->arg = cb_arg->arg; /* Allocate memory for the descriptors we're going to send. This is * one for each PCI descriptor plus one ISAP descriptor. */ if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) + (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) + sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &new_cb_arg->payload, 0, pci_descriptor_count+1, 1); /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */ dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc); /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor * that we're trying to change also set its pf_type. */ desc_p = (sli4_resource_descriptor_v1_t *)response->desc; for (i=0; idescriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; if (pcie_desc_p->pf_number == cb_arg->pci_func) { /* This is the PCIe descriptor for this OCS instance. * Update it with the new pf_type */ switch(new_protocol) { case OCS_HW_PORT_PROTOCOL_FC: pcie_desc_p->pf_type = SLI4_PROTOCOL_FC; break; case OCS_HW_PORT_PROTOCOL_FCOE: pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE; break; case OCS_HW_PORT_PROTOCOL_ISCSI: pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI; break; default: pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT; break; } } if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) { ++num_fcoe_ports; } if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) { ++num_iscsi_ports; } ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t)); dst += sizeof(sli4_pcie_resource_descriptor_v1_t); } desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); } /* Create an ISAP resource descriptor */ isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst; isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP; isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t); if (num_iscsi_ports > 0) { isap_desc_p->iscsi_tgt = 1; isap_desc_p->iscsi_ini = 1; isap_desc_p->iscsi_dif = 1; } if (num_fcoe_ports > 0) { isap_desc_p->fcoe_tgt = 1; isap_desc_p->fcoe_ini = 1; isap_desc_p->fcoe_dif = 1; } /* At this point we're done with the memory allocated by ocs_port_set_protocol */ ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg); if (rc) { ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n"); /* Call the upper level callback to report a failure */ if (new_cb_arg->cb) { new_cb_arg->cb( rc, new_cb_arg->arg); } /* Free the memory allocated by this function */ ocs_dma_free(hw->os, &new_cb_arg->payload); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); } return rc; } /** * @ingroup io * @brief Set the port protocol. * @par Description * Setting the port protocol is a read-modify-write operation. * This function submits a GET_PROFILE_CONFIG command to read * the current settings. The callback function will modify the * settings and issue the write. * * On successful completion this function will have allocated * two regular memory areas and one dma area which will need to * get freed later in the callbacks. * * @param hw Hardware context. * @param new_protocol New protocol to use. * @param pci_func PCI function to configure. * @param cb Callback function to be called when the command completes. * @param ul_arg An argument that is passed to the callback function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ ocs_hw_rtn_e ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol, uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg) { uint8_t *mbxdata; ocs_hw_set_port_protocol_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; cb_arg->new_protocol = new_protocol; cb_arg->pci_func = pci_func; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); ocs_dma_free(hw->os, &cb_arg->payload); } return rc; } typedef struct ocs_hw_get_profile_list_cb_arg_s { ocs_get_profile_list_cb_t cb; void *arg; ocs_dma_t payload; } ocs_hw_get_profile_list_cb_arg_t; /** * @brief Called for the completion of get_profile_list for a * user request. * @par Description * This function is called when the COMMMON_GET_PROFILE_LIST * mailbox completes. The response will be in * ctx->non_embedded_mem.virt. This function parses the * response and creates a ocs_hw_profile_list, then calls the * mgmt_cb callback function and passes that list to it. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_profile_list_t *list; ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg; ocs_dma_t *payload = &(cb_arg->payload); sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt; int i; int num_descriptors; list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO); if (list == NULL) { ocs_log_err(hw->os, "failed to malloc list\n"); return OCS_HW_RTN_NO_MEMORY; } list->num_descriptors = response->profile_descriptor_count; num_descriptors = list->num_descriptors; if (num_descriptors > OCS_HW_MAX_PROFILES) { num_descriptors = OCS_HW_MAX_PROFILES; } for (i=0; idescriptors[i].profile_id = response->profile_descriptor[i].profile_id; list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index; ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description); } if (cb_arg->cb) { cb_arg->cb(status, list, cb_arg->arg); } else { ocs_free(hw->os, list, sizeof(*list)); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); return 0; } /** * @ingroup io * @brief Get a list of available profiles. * @par Description * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ ocs_hw_rtn_e ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_profile_list_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; /* dma_mem holds the non-embedded portion */ if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) { ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_dma_free(hw->os, &cb_arg->payload); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); } return rc; } typedef struct ocs_hw_get_active_profile_cb_arg_s { ocs_get_active_profile_cb_t cb; void *arg; } ocs_hw_get_active_profile_cb_arg_t; /** * @brief Called for the completion of get_active_profile for a * user request. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg; sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed; uint32_t active_profile; active_profile = response->active_profile_id; if (cb_arg->cb) { cb_arg->cb(status, active_profile, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); return 0; } /** * @ingroup io * @brief Get the currently active profile. * @par Description * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_active_profile_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); } return rc; } typedef struct ocs_hw_get_nvparms_cb_arg_s { ocs_get_nvparms_cb_t cb; void *arg; } ocs_hw_get_nvparms_cb_arg_t; /** * @brief Called for the completion of get_nvparms for a * user request. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return 0 on success, non-zero otherwise */ static int32_t ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg; sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe; if (cb_arg->cb) { cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa, mbox_rsp->preferred_d_id, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); return 0; } /** * @ingroup io * @brief Read non-volatile parms. * @par Description * Issues a SLI-4 READ_NVPARMS mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg) { uint8_t *mbxdata; ocs_hw_get_nvparms_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_NVPARMS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); } return rc; } typedef struct ocs_hw_set_nvparms_cb_arg_s { ocs_set_nvparms_cb_t cb; void *arg; } ocs_hw_set_nvparms_cb_arg_t; /** * @brief Called for the completion of set_nvparms for a * user request. * * @param hw Hardware context. * @param status The status from the MQE. * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg; if (cb_arg->cb) { cb_arg->cb(status, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); return 0; } /** * @ingroup io * @brief Write non-volatile parms. * @par Description * Issues a SLI-4 WRITE_NVPARMS mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param cb Callback function to be called when the * command completes. * @param wwpn Port's WWPN in big-endian order, or NULL to use default. * @param wwnn Port's WWNN in big-endian order, or NULL to use default. * @param hard_alpa A hard AL_PA address setting used during loop * initialization. If no hard AL_PA is required, set to 0. * @param preferred_d_id A preferred D_ID address setting * that may be overridden with the CONFIG_LINK mailbox command. * If there is no preference, set to 0. * @param ul_arg An argument that is passed to the callback * function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg) { uint8_t *mbxdata; ocs_hw_set_nvparms_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "SET_NVPARMS failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); } return rc; } /** * @brief Called to obtain the count for the specified type. * * @param hw Hardware context. * @param io_count_type IO count type (inuse, free, wait_free). * * @return Returns the number of IOs on the specified list type. */ uint32_t ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type) { ocs_hw_io_t *io = NULL; uint32_t count = 0; ocs_lock(&hw->io_lock); switch (io_count_type) { case OCS_HW_IO_INUSE_COUNT : ocs_list_foreach(&hw->io_inuse, io) { count++; } break; case OCS_HW_IO_FREE_COUNT : ocs_list_foreach(&hw->io_free, io) { count++; } break; case OCS_HW_IO_WAIT_FREE_COUNT : ocs_list_foreach(&hw->io_wait_free, io) { count++; } break; case OCS_HW_IO_PORT_OWNED_COUNT: ocs_list_foreach(&hw->io_port_owned, io) { count++; } break; case OCS_HW_IO_N_TOTAL_IO_COUNT : count = hw->config.n_io; break; } ocs_unlock(&hw->io_lock); return count; } /** * @brief Called to obtain the count of produced RQs. * * @param hw Hardware context. * * @return Returns the number of RQs produced. */ uint32_t ocs_hw_get_rqes_produced_count(ocs_hw_t *hw) { uint32_t count = 0; uint32_t i; uint32_t j; for (i = 0; i < hw->hw_rq_count; i++) { hw_rq_t *rq = hw->hw_rq[i]; if (rq->rq_tracker != NULL) { for (j = 0; j < rq->entry_count; j++) { if (rq->rq_tracker[j] != NULL) { count++; } } } } return count; } typedef struct ocs_hw_set_active_profile_cb_arg_s { ocs_set_active_profile_cb_t cb; void *arg; } ocs_hw_set_active_profile_cb_arg_t; /** * @brief Called for the completion of set_active_profile for a * user request. * * @param hw Hardware context. * @param status The status from the MQE * @param mqe Pointer to mailbox command buffer. * @param arg Pointer to a callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg; if (cb_arg->cb) { cb_arg->cb(status, cb_arg->arg); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); return 0; } /** * @ingroup io * @brief Set the currently active profile. * @par Description * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the * command completes the provided mgmt callback function is * called. * * @param hw Hardware context. * @param profile_id Profile ID to activate. * @param cb Callback function to be called when the command completes. * @param ul_arg An argument that is passed to the callback function. * * @return * - OCS_HW_RTN_SUCCESS on success. * - OCS_HW_RTN_NO_MEMORY if a malloc fails. * - OCS_HW_RTN_NO_RESOURCES if unable to get a command * context. * - OCS_HW_RTN_ERROR on any other error. */ int32_t ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg) { uint8_t *mbxdata; ocs_hw_set_active_profile_cb_arg_t *cb_arg; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Only supported on Skyhawk */ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { return OCS_HW_RTN_ERROR; } /* mbxdata holds the header of the command */ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mbxdata == NULL) { ocs_log_err(hw->os, "failed to malloc mbox\n"); return OCS_HW_RTN_NO_MEMORY; } /* cb_arg holds the data that will be passed to the callback on completion */ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT); if (cb_arg == NULL) { ocs_log_err(hw->os, "failed to malloc cb_arg\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); return OCS_HW_RTN_NO_MEMORY; } cb_arg->cb = cb; cb_arg->arg = ul_arg; if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) { rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n"); ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t)); } return rc; } /* * Private functions */ /** * @brief Update the queue hash with the ID and index. * * @param hash Pointer to hash table. * @param id ID that was created. * @param index The index into the hash object. */ static void ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index) { uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1); /* * Since the hash is always bigger than the number of queues, then we * never have to worry about an infinite loop. */ while(hash[hash_index].in_use) { hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1); } /* not used, claim the entry */ hash[hash_index].id = id; hash[hash_index].in_use = 1; hash[hash_index].index = index; } /** * @brief Find index given queue ID. * * @param hash Pointer to hash table. * @param id ID to find. * * @return Returns the index into the HW cq array or -1 if not found. */ int32_t ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id) { int32_t rc = -1; int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1); /* * Since the hash is always bigger than the maximum number of Qs, then we * never have to worry about an infinite loop. We will always find an * unused entry. */ do { if (hash[index].in_use && hash[index].id == id) { rc = hash[index].index; } else { index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1); } } while(rc == -1 && hash[index].in_use); return rc; } static int32_t ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain) { int32_t rc = OCS_HW_RTN_ERROR; uint16_t fcfi = UINT16_MAX; if ((hw == NULL) || (domain == NULL)) { ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } fcfi = domain->fcf_indicator; if (fcfi < SLI4_MAX_FCFI) { uint16_t fcf_index = UINT16_MAX; ocs_log_debug(hw->os, "adding domain %p @ %#x\n", domain, fcfi); hw->domains[fcfi] = domain; /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { if (hw->first_domain_idx < 0) { hw->first_domain_idx = fcfi; } } fcf_index = domain->fcf; if (fcf_index < SLI4_MAX_FCF_INDEX) { ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n", fcf_index, fcfi); hw->fcf_index_fcfi[fcf_index] = fcfi; rc = OCS_HW_RTN_SUCCESS; } else { ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", fcf_index, SLI4_MAX_FCF_INDEX); hw->domains[fcfi] = NULL; } } else { ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", fcfi, SLI4_MAX_FCFI); } return rc; } static int32_t ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain) { int32_t rc = OCS_HW_RTN_ERROR; uint16_t fcfi = UINT16_MAX; if ((hw == NULL) || (domain == NULL)) { ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", hw, domain); return OCS_HW_RTN_ERROR; } fcfi = domain->fcf_indicator; if (fcfi < SLI4_MAX_FCFI) { uint16_t fcf_index = UINT16_MAX; ocs_log_debug(hw->os, "deleting domain %p @ %#x\n", domain, fcfi); if (domain != hw->domains[fcfi]) { ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n", domain, hw->domains[fcfi]); return OCS_HW_RTN_ERROR; } hw->domains[fcfi] = NULL; /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (hw->workaround.override_fcfi) { if (hw->first_domain_idx == fcfi) { hw->first_domain_idx = -1; } } fcf_index = domain->fcf; if (fcf_index < SLI4_MAX_FCF_INDEX) { if (hw->fcf_index_fcfi[fcf_index] == fcfi) { hw->fcf_index_fcfi[fcf_index] = 0; rc = OCS_HW_RTN_SUCCESS; } else { ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n", hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index); } } else { ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", fcf_index, SLI4_MAX_FCF_INDEX); } } else { ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", fcfi, SLI4_MAX_FCFI); } return rc; } ocs_domain_t * ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi) { if (hw == NULL) { ocs_log_err(NULL, "bad parameter hw=%p\n", hw); return NULL; } if (fcfi < SLI4_MAX_FCFI) { return hw->domains[fcfi]; } else { ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", fcfi, SLI4_MAX_FCFI); return NULL; } } static ocs_domain_t * ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index) { if (hw == NULL) { ocs_log_err(NULL, "bad parameter hw=%p\n", hw); return NULL; } if (fcf_index < SLI4_MAX_FCF_INDEX) { return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]); } else { ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", fcf_index, SLI4_MAX_FCF_INDEX); return NULL; } } /** * @brief Quaratine an IO by taking a reference count and adding it to the * quarantine list. When the IO is popped from the list then the * count is released and the IO MAY be freed depending on whether * it is still referenced by the IO. * * @n @b Note: BZ 160124 - If this is a target write or an initiator read using * DIF, then we must add the XRI to a quarantine list until we receive * 4 more completions of this same type. * * @param hw Hardware context. * @param wq Pointer to the WQ associated with the IO object to quarantine. * @param io Pointer to the io object to quarantine. */ static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io) { ocs_quarantine_info_t *q_info = &wq->quarantine_info; uint32_t index; ocs_hw_io_t *free_io = NULL; /* return if the QX bit was clear */ if (!io->quarantine) { return; } /* increment the IO refcount to prevent it from being freed before the quarantine is over */ if (ocs_ref_get_unless_zero(&io->ref) == 0) { /* command no longer active */ ocs_log_debug(hw ? hw->os : NULL, "io not active xri=0x%x tag=0x%x\n", io->indicator, io->reqtag); return; } sli_queue_lock(wq->queue); index = q_info->quarantine_index; free_io = q_info->quarantine_ios[index]; q_info->quarantine_ios[index] = io; q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH; sli_queue_unlock(wq->queue); if (free_io != NULL) { ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */ } } /** * @brief Process entries on the given completion queue. * * @param hw Hardware context. * @param cq Pointer to the HW completion queue object. * * @return None. */ void ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq) { uint8_t cqe[sizeof(sli4_mcqe_t)]; uint16_t rid = UINT16_MAX; sli4_qentry_e ctype; /* completion type */ int32_t status; uint32_t n_processed = 0; time_t tstart; time_t telapsed; tstart = ocs_msectime(); while (!sli_queue_read(&hw->sli, cq->queue, cqe)) { status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); /* * The sign of status is significant. If status is: * == 0 : call completed correctly and the CQE indicated success * > 0 : call completed correctly and the CQE indicated an error * < 0 : call failed and no information is available about the CQE */ if (status < 0) { if (status == -2) { /* Notification that an entry was consumed, but not completed */ continue; } break; } switch (ctype) { case SLI_QENTRY_ASYNC: CPUTRACE("async"); sli_cqe_async(&hw->sli, cqe); break; case SLI_QENTRY_MQ: /* * Process MQ entry. Note there is no way to determine * the MQ_ID from the completion entry. */ CPUTRACE("mq"); ocs_hw_mq_process(hw, status, hw->mq); break; case SLI_QENTRY_OPT_WRITE_CMD: ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe); break; case SLI_QENTRY_OPT_WRITE_DATA: ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe); break; case SLI_QENTRY_WQ: CPUTRACE("wq"); ocs_hw_wq_process(hw, cq, cqe, status, rid); break; case SLI_QENTRY_WQ_RELEASE: { uint32_t wq_id = rid; int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id); if (unlikely(index < 0)) { ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n", index, rid); break; } hw_wq_t *wq = hw->hw_wq[index]; /* Submit any HW IOs that are on the WQ pending list */ hw_wq_submit_pending(wq, wq->wqec_set_count); break; } case SLI_QENTRY_RQ: CPUTRACE("rq"); ocs_hw_rqpair_process_rq(hw, cq, cqe); break; case SLI_QENTRY_XABT: { CPUTRACE("xabt"); ocs_hw_xabt_process(hw, cq, cqe, rid); break; } default: ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid); break; } n_processed++; if (n_processed == cq->queue->proc_limit) { break; } if (cq->queue->n_posted >= (cq->queue->posted_limit)) { sli_queue_arm(&hw->sli, cq->queue, FALSE); } } sli_queue_arm(&hw->sli, cq->queue, TRUE); if (n_processed > cq->queue->max_num_processed) { cq->queue->max_num_processed = n_processed; } telapsed = ocs_msectime() - tstart; if (telapsed > cq->queue->max_process_time) { cq->queue->max_process_time = telapsed; } } /** * @brief Process WQ completion queue entries. * * @param hw Hardware context. * @param cq Pointer to the HW completion queue object. * @param cqe Pointer to WQ completion queue. * @param status Completion status. * @param rid Resource ID (IO tag). * * @return none */ void ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid) { hw_wq_callback_t *wqcb; ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id, ((cq->queue->index - 1) & (cq->queue->length - 1))); if(rid == OCS_HW_REQUE_XRI_REGTAG) { if(status) { ocs_log_err(hw->os, "reque xri failed, status = %d \n", status); } return; } wqcb = ocs_hw_reqtag_get_instance(hw, rid); if (wqcb == NULL) { ocs_log_err(hw->os, "invalid request tag: x%x\n", rid); return; } if (wqcb->callback == NULL) { ocs_log_err(hw->os, "wqcb callback is NULL\n"); return; } (*wqcb->callback)(wqcb->arg, cqe, status); } /** * @brief Process WQ completions for IO requests * * @param arg Generic callback argument * @param cqe Pointer to completion queue entry * @param status Completion status * * @par Description * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool. * * @return None. */ static void ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status) { ocs_hw_io_t *io = arg; ocs_hw_t *hw = io->hw; sli4_fc_wcqe_t *wcqe = (void *)cqe; uint32_t len = 0; uint32_t ext = 0; uint8_t out_of_order_axr_cmd = 0; uint8_t out_of_order_axr_data = 0; uint8_t lock_taken = 0; #if defined(OCS_DISC_SPIN_DELAY) uint32_t delay = 0; char prop_buf[32]; #endif /* * For the primary IO, this will also be used for the * response. So it is important to only set/clear this * flag on the first data phase of the IO because * subsequent phases will be done on the secondary XRI. */ if (io->quarantine && io->quarantine_first_phase) { io->quarantine = (wcqe->qx == 1); ocs_hw_io_quarantine(hw, io->wq, io); } io->quarantine_first_phase = FALSE; /* BZ 161832 - free secondary HW IO */ if (io->sec_hio != NULL && io->sec_hio->quarantine) { /* * If the quarantine flag is set on the * IO, then set it on the secondary IO * based on the quarantine XRI (QX) bit * sent by the FW. */ io->sec_hio->quarantine = (wcqe->qx == 1); /* use the primary io->wq because it is not set on the secondary IO. */ ocs_hw_io_quarantine(hw, io->wq, io->sec_hio); } ocs_hw_remove_io_timed_wqe(hw, io); /* clear xbusy flag if WCQE[XB] is clear */ if (io->xbusy && wcqe->xb == 0) { io->xbusy = FALSE; } /* get extended CQE status */ switch (io->type) { case OCS_HW_BLS_ACC: case OCS_HW_BLS_ACC_SID: break; case OCS_HW_ELS_REQ: sli_fc_els_did(&hw->sli, cqe, &ext); len = sli_fc_response_length(&hw->sli, cqe); break; case OCS_HW_ELS_RSP: case OCS_HW_ELS_RSP_SID: case OCS_HW_FC_CT_RSP: break; case OCS_HW_FC_CT: len = sli_fc_response_length(&hw->sli, cqe); break; case OCS_HW_IO_TARGET_WRITE: len = sli_fc_io_length(&hw->sli, cqe); #if defined(OCS_DISC_SPIN_DELAY) if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { delay = ocs_strtoul(prop_buf, 0, 0); ocs_udelay(delay); } #endif break; case OCS_HW_IO_TARGET_READ: len = sli_fc_io_length(&hw->sli, cqe); /* * if_type == 2 seems to return 0 "total length placed" on * FCP_TSEND64_WQE completions. If this appears to happen, * use the CTIO data transfer length instead. */ if (hw->workaround.retain_tsend_io_length && !len && !status) { len = io->length; } break; case OCS_HW_IO_TARGET_RSP: if(io->is_port_owned) { ocs_lock(&io->axr_lock); lock_taken = 1; if(io->axr_buf->call_axr_cmd) { out_of_order_axr_cmd = 1; } if(io->axr_buf->call_axr_data) { out_of_order_axr_data = 1; } } break; case OCS_HW_IO_INITIATOR_READ: len = sli_fc_io_length(&hw->sli, cqe); break; case OCS_HW_IO_INITIATOR_WRITE: len = sli_fc_io_length(&hw->sli, cqe); break; case OCS_HW_IO_INITIATOR_NODATA: break; case OCS_HW_IO_DNRX_REQUEUE: /* release the count for re-posting the buffer */ //ocs_hw_io_free(hw, io); break; default: ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n", io->type, io->indicator); break; } if (status) { ext = sli_fc_ext_status(&hw->sli, cqe); /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically * abort exchange if an error occurred and exchange is still busy. */ if (hw->config.i_only_aab && (ocs_hw_iotype_is_originator(io->type)) && (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) { ocs_hw_rtn_e rc; ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag); /* * Because the initiator will not issue another IO phase, then it is OK to to issue the * callback on the abort completion, but for consistency with the target, wait for the * XRI_ABORTED CQE to issue the IO callback. */ rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL); if (rc == OCS_HW_RTN_SUCCESS) { /* latch status to return after abort is complete */ io->status_saved = 1; io->saved_status = status; io->saved_ext = ext; io->saved_len = len; goto exit_ocs_hw_wq_process_io; } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { /* * Already being aborted by someone else (ABTS * perhaps). Just fall through and return original * error. */ ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", io->indicator, io->reqtag); } else { /* Failed to abort for some other reason, log error */ ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", io->indicator, io->reqtag, rc); } } /* * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW */ if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) { ocs_hw_rtn_e rc; ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag); /* * Because targets may send a response when the IO completes using the same XRI, we must * wait for the XRI_ABORTED CQE to issue the IO callback */ rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); if (rc == OCS_HW_RTN_SUCCESS) { /* latch status to return after abort is complete */ io->status_saved = 1; io->saved_status = status; io->saved_ext = ext; io->saved_len = len; goto exit_ocs_hw_wq_process_io; } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { /* * Already being aborted by someone else (ABTS * perhaps). Just fall through and return original * error. */ ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", io->indicator, io->reqtag); } else { /* Failed to abort for some other reason, log error */ ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", io->indicator, io->reqtag, rc); } } } /* BZ 161832 - free secondary HW IO */ if (io->sec_hio != NULL) { ocs_hw_io_free(hw, io->sec_hio); io->sec_hio = NULL; } if (io->done != NULL) { ocs_hw_done_t done = io->done; void *arg = io->arg; io->done = NULL; if (io->status_saved) { /* use latched status if exists */ status = io->saved_status; len = io->saved_len; ext = io->saved_ext; io->status_saved = 0; } /* Restore default SGL */ ocs_hw_io_restore_sgl(hw, io); done(io, io->rnode, len, status, ext, arg); } if(out_of_order_axr_cmd) { /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id); } }else { hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq); } if(out_of_order_axr_data) { /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = io->axr_buf->seq.header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id); } }else { hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq); } } } exit_ocs_hw_wq_process_io: if(lock_taken) { ocs_unlock(&io->axr_lock); } } /** * @brief Process WQ completions for abort requests. * * @param arg Generic callback argument. * @param cqe Pointer to completion queue entry. * @param status Completion status. * * @return None. */ static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status) { ocs_hw_io_t *io = arg; ocs_hw_t *hw = io->hw; uint32_t ext = 0; uint32_t len = 0; hw_wq_callback_t *wqcb; /* * For IOs that were aborted internally, we may need to issue the callback here depending * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then * issue the callback now. */ ext = sli_fc_ext_status(&hw->sli, cqe); if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT && ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done != NULL) { ocs_hw_done_t done = io->done; void *arg = io->arg; io->done = NULL; /* * Use latched status as this is always saved for an internal abort * * Note: We wont have both a done and abort_done function, so don't worry about * clobbering the len, status and ext fields. */ status = io->saved_status; len = io->saved_len; ext = io->saved_ext; io->status_saved = 0; done(io, io->rnode, len, status, ext, arg); } if (io->abort_done != NULL) { ocs_hw_done_t done = io->abort_done; void *arg = io->abort_arg; io->abort_done = NULL; done(io, io->rnode, len, status, ext, arg); } ocs_lock(&hw->io_abort_lock); /* clear abort bit to indicate abort is complete */ io->abort_in_progress = 0; ocs_unlock(&hw->io_abort_lock); /* Free the WQ callback */ ocs_hw_assert(io->abort_reqtag != UINT32_MAX); wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag); ocs_hw_reqtag_free(hw, wqcb); /* * Call ocs_hw_io_free() because this releases the WQ reservation as * well as doing the refcount put. Don't duplicate the code here. */ (void)ocs_hw_io_free(hw, io); } /** * @brief Process XABT completions * * @param hw Hardware context. * @param cq Pointer to the HW completion queue object. * @param cqe Pointer to WQ completion queue. * @param rid Resource ID (IO tag). * * * @return None. */ void ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid) { /* search IOs wait free list */ ocs_hw_io_t *io = NULL; io = ocs_hw_io_lookup(hw, rid); ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id, ((cq->queue->index - 1) & (cq->queue->length - 1))); if (io == NULL) { /* IO lookup failure should never happen */ ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid); return; } if (!io->xbusy) { ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); } else { /* mark IO as no longer busy */ io->xbusy = FALSE; } if (io->is_port_owned) { ocs_lock(&hw->io_lock); /* Take reference so that below callback will not free io before reque */ ocs_ref_get(&io->ref); ocs_unlock(&hw->io_lock); } /* For IOs that were aborted internally, we need to issue any pending callback here. */ if (io->done != NULL) { ocs_hw_done_t done = io->done; void *arg = io->arg; /* Use latched status as this is always saved for an internal abort */ int32_t status = io->saved_status; uint32_t len = io->saved_len; uint32_t ext = io->saved_ext; io->done = NULL; io->status_saved = 0; done(io, io->rnode, len, status, ext, arg); } /* Check to see if this is a port owned XRI */ if (io->is_port_owned) { ocs_lock(&hw->io_lock); ocs_hw_reque_xri(hw, io); ocs_unlock(&hw->io_lock); /* Not hanlding reque xri completion, free io */ ocs_hw_io_free(hw, io); return; } ocs_lock(&hw->io_lock); if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) { /* if on wait_free list, caller has already freed IO; * remove from wait_free list and add to free list. * if on in-use list, already marked as no longer busy; * just leave there and wait for caller to free. */ if (io->state == OCS_HW_IO_STATE_WAIT_FREE) { io->state = OCS_HW_IO_STATE_FREE; ocs_list_remove(&hw->io_wait_free, io); ocs_hw_io_free_move_correct_list(hw, io); } } ocs_unlock(&hw->io_lock); } /** * @brief Adjust the number of WQs and CQs within the HW. * * @par Description * Calculates the number of WQs and associated CQs needed in the HW based on * the number of IOs. Calculates the starting CQ index for each WQ, RQ and * MQ. * * @param hw Hardware context allocated by the caller. */ static void ocs_hw_adjust_wqs(ocs_hw_t *hw) { uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ); uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ]; uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ]; /* * possibly adjust the the size of the WQs so that the CQ is twice as * big as the WQ to allow for 2 completions per IO. This allows us to * handle multi-phase as well as aborts. */ if (max_cq_entries < max_wq_entries * 2) { max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2; } /* * Calculate the number of WQs to use base on the number of IOs. * * Note: We need to reserve room for aborts which must be sent down * the same WQ as the IO. So we allocate enough WQ space to * handle 2 times the number of IOs. Half of the space will be * used for normal IOs and the other hwf is reserved for aborts. */ hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries; /* * For performance reasons, it is best to use use a minimum of 4 WQs * for BE3 and Skyhawk. */ if (hw->config.n_wq < 4 && SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { hw->config.n_wq = 4; } /* * For dual-chute support, we need to have at least one WQ per chute. */ if (hw->config.n_wq < 2 && ocs_hw_get_num_chutes(hw) > 1) { hw->config.n_wq = 2; } /* make sure we haven't exceeded the max supported in the HW */ if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { hw->config.n_wq = OCS_HW_MAX_NUM_WQ; } /* make sure we haven't exceeded the chip maximum */ if (hw->config.n_wq > max_wq_num) { hw->config.n_wq = max_wq_num; } /* * Using Queue Topology string, we divide by number of chutes */ hw->config.n_wq /= ocs_hw_get_num_chutes(hw); } static int32_t ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size) { ocs_command_ctx_t *ctx = NULL; ocs_lock(&hw->cmd_lock); if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) { ocs_log_err(hw->os, "XXX no command context?!?\n"); ocs_unlock(&hw->cmd_lock); return -1; } hw->cmd_head_count--; /* Post any pending requests */ ocs_hw_cmd_submit_pending(hw); ocs_unlock(&hw->cmd_lock); if (ctx->cb) { if (ctx->buf) { ocs_memcpy(ctx->buf, mqe, size); } ctx->cb(hw, status, ctx->buf, ctx->arg); } ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t)); ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t)); return 0; } /** * @brief Process entries on the given mailbox queue. * * @param hw Hardware context. * @param status CQE status. * @param mq Pointer to the mailbox queue object. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq) { uint8_t mqe[SLI4_BMBX_SIZE]; if (!sli_queue_read(&hw->sli, mq, mqe)) { ocs_hw_command_process(hw, status, mqe, mq->size); } return 0; } /** * @brief Read a FCF table entry. * * @param hw Hardware context. * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first * read and the next_index field from the FCOE_READ_FCF_TABLE command * for subsequent reads. * * @return Returns 0 on success, or a non-zero value on failure. */ static ocs_hw_rtn_e ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index) { uint8_t *buf = NULL; int32_t rc = OCS_HW_RTN_ERROR; buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem, index)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); } return rc; } /** * @brief Callback function for the FCOE_READ_FCF_TABLE command. * * @par Description * Note that the caller has allocated: * - DMA memory to hold the table contents * - DMA memory structure * - Command/results buffer * . * Each of these must be freed here. * * @param hw Hardware context. * @param status Hardware status. * @param mqe Pointer to the mailbox command/results buffer. * @param arg Pointer to the DMA memory structure. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_dma_t *dma = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; if (status || hdr->status) { ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); } else if (dma->virt) { sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt; /* if FC or FCOE and FCF entry valid, process it */ if (read_fcf->fcf_entry.fc || (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) { if (hw->callback.domain != NULL) { ocs_domain_record_t drec = {0}; if (read_fcf->fcf_entry.fc) { /* * This is a pseudo FCF entry. Create a domain * record based on the read topology information */ drec.speed = hw->link.speed; drec.fc_id = hw->link.fc_id; drec.is_fc = TRUE; if (SLI_LINK_TOPO_LOOP == hw->link.topology) { drec.is_loop = TRUE; ocs_memcpy(drec.map.loop, hw->link.loop_map, sizeof(drec.map.loop)); } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) { drec.is_nport = TRUE; } } else { drec.index = read_fcf->fcf_entry.fcf_index; drec.priority = read_fcf->fcf_entry.fip_priority; /* copy address, wwn and vlan_bitmap */ ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address, sizeof(drec.address)); ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id, sizeof(drec.wwn)); ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap, sizeof(drec.map.vlan)); drec.is_ethernet = TRUE; drec.is_nport = TRUE; } hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_FOUND, &drec); } } else { /* if FCOE and FCF is not valid, ignore it */ ocs_log_test(hw->os, "ignore invalid FCF entry\n"); } if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) { ocs_hw_read_fcf(hw, read_fcf->next_index); } } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); //ocs_dma_free(hw->os, dma); //ocs_free(hw->os, dma, sizeof(ocs_dma_t)); return 0; } /** * @brief Callback function for the SLI link events. * * @par Description * This function allocates memory which must be freed in its callback. * * @param ctx Hardware context pointer (that is, ocs_hw_t *). * @param e Event structure pointer (that is, sli4_link_event_t *). * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_hw_cb_link(void *ctx, void *e) { ocs_hw_t *hw = ctx; sli4_link_event_t *event = e; ocs_domain_t *d = NULL; uint32_t i = 0; int32_t rc = OCS_HW_RTN_ERROR; ocs_t *ocs = hw->os; ocs_hw_link_event_init(hw); switch (event->status) { case SLI_LINK_STATUS_UP: hw->link = *event; if (SLI_LINK_TOPO_NPORT == event->topology) { device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed); ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); } else if (SLI_LINK_TOPO_LOOP == event->topology) { uint8_t *buf = NULL; device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed); buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (!buf) { ocs_log_err(hw->os, "no buffer for command\n"); break; } if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) { rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL); } if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(hw->os, "READ_TOPOLOGY failed\n"); ocs_free(hw->os, buf, SLI4_BMBX_SIZE); } } else { device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n", event->topology, event->speed); } break; case SLI_LINK_STATUS_DOWN: device_printf(ocs->dev, "Link Down\n"); hw->link.status = event->status; for (i = 0; i < SLI4_MAX_FCFI; i++) { d = hw->domains[i]; if (d != NULL && hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d); } } break; default: ocs_log_test(hw->os, "unhandled link status %#x\n", event->status); break; } return 0; } static int32_t ocs_hw_cb_fip(void *ctx, void *e) { ocs_hw_t *hw = ctx; ocs_domain_t *domain = NULL; sli4_fip_event_t *event = e; ocs_hw_assert(event); ocs_hw_assert(hw); /* Find the associated domain object */ if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) { ocs_domain_t *d = NULL; uint32_t i = 0; /* Clear VLINK is different from the other FIP events as it passes back * a VPI instead of a FCF index. Check all attached SLI ports for a * matching VPI */ for (i = 0; i < SLI4_MAX_FCFI; i++) { d = hw->domains[i]; if (d != NULL) { ocs_sport_t *sport = NULL; ocs_list_foreach(&d->sport_list, sport) { if (sport->indicator == event->index) { domain = d; break; } } if (domain != NULL) { break; } } } } else { domain = ocs_hw_domain_get_indexed(hw, event->index); } switch (event->type) { case SLI4_FCOE_FIP_FCF_DISCOVERED: ocs_hw_read_fcf(hw, event->index); break; case SLI4_FCOE_FIP_FCF_DEAD: if (domain != NULL && hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); } break; case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: if (domain != NULL && hw->callback.domain != NULL) { /* * We will want to issue rediscover FCF when this domain is free'd in order * to invalidate the FCF table */ domain->req_rediscover_fcf = TRUE; hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); } break; case SLI4_FCOE_FIP_FCF_MODIFIED: if (domain != NULL && hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); } ocs_hw_read_fcf(hw, event->index); break; default: ocs_log_test(hw->os, "unsupported event %#x\n", event->type); } return 0; } static int32_t ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_remote_node_t *rnode = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_hw_remote_node_event_e evt = 0; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); rnode->attached = FALSE; ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); evt = OCS_HW_NODE_ATTACH_FAIL; } else { rnode->attached = TRUE; ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1); evt = OCS_HW_NODE_ATTACH_OK; } if (hw->callback.rnode != NULL) { hw->callback.rnode(hw->args.rnode, evt, rnode); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } static int32_t ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_remote_node_t *rnode = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; int32_t rc = 0; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); /* * In certain cases, a non-zero MQE status is OK (all must be true): * - node is attached * - if High Login Mode is enabled, node is part of a node group * - status is 0x1400 */ if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) || (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) { rc = -1; } } if (rc == 0) { rnode->node_group = FALSE; rnode->attached = FALSE; if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) { ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); } evt = OCS_HW_NODE_FREE_OK; } if (hw->callback.rnode != NULL) { hw->callback.rnode(hw->args.rnode, evt, rnode); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return rc; } static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; int32_t rc = 0; uint32_t i; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, hdr->status); } else { evt = OCS_HW_NODE_FREE_ALL_OK; } if (evt == OCS_HW_NODE_FREE_ALL_OK) { for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) { ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0); } if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) { ocs_log_test(hw->os, "FCOE_RPI free all failure\n"); rc = -1; } } if (hw->callback.rnode != NULL) { hw->callback.rnode(hw->args.rnode, evt, NULL); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return rc; } /** * @brief Initialize the pool of HW IO objects. * * @param hw Hardware context. * * @return Returns 0 on success, or a non-zero value on failure. */ static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *hw) { uint32_t i = 0; ocs_hw_io_t *io = NULL; uintptr_t xfer_virt = 0; uintptr_t xfer_phys = 0; uint32_t index; uint8_t new_alloc = TRUE; if (NULL == hw->io) { hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT); if (NULL == hw->io) { ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n", hw->config.n_io, sizeof(ocs_hw_io_t *)); return OCS_HW_RTN_NO_MEMORY; } for (i = 0; i < hw->config.n_io; i++) { hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t), OCS_M_ZERO | OCS_M_NOWAIT); if (hw->io[i] == NULL) { ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i); goto error; } } /* Create WQE buffs for IO */ hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size, OCS_M_ZERO | OCS_M_NOWAIT); if (NULL == hw->wqe_buffs) { ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t)); ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n", __func__, hw->config.n_io, hw->sli.config.wqe_size); return OCS_HW_RTN_NO_MEMORY; } } else { /* re-use existing IOs, including SGLs */ new_alloc = FALSE; } if (new_alloc) { if (ocs_dma_alloc(hw->os, &hw->xfer_rdy, sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io, 4/*XXX what does this need to be? */)) { ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n"); return OCS_HW_RTN_NO_MEMORY; } } xfer_virt = (uintptr_t)hw->xfer_rdy.virt; xfer_phys = hw->xfer_rdy.phys; for (i = 0; i < hw->config.n_io; i++) { hw_wq_callback_t *wqcb; io = hw->io[i]; /* initialize IO fields */ io->hw = hw; /* Assign a WQE buff */ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size]; /* Allocate the request tag for this IO */ wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io); if (wqcb == NULL) { ocs_log_err(hw->os, "can't allocate request tag\n"); return OCS_HW_RTN_NO_RESOURCES; } io->reqtag = wqcb->instance_index; /* Now for the fields that are initialized on each free */ ocs_hw_init_free_io(io); /* The XB flag isn't cleared on IO free, so initialize it to zero here */ io->xbusy = 0; if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) { ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i); return OCS_HW_RTN_NO_MEMORY; } if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) { ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i); ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t)); return OCS_HW_RTN_NO_MEMORY; } io->def_sgl_count = hw->config.n_sgl; io->sgl = &io->def_sgl; io->sgl_count = io->def_sgl_count; if (hw->xfer_rdy.size) { io->xfer_rdy.virt = (void *)xfer_virt; io->xfer_rdy.phys = xfer_phys; io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t); xfer_virt += sizeof(fcp_xfer_rdy_iu_t); xfer_phys += sizeof(fcp_xfer_rdy_iu_t); } } return OCS_HW_RTN_SUCCESS; error: for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); hw->io[i] = NULL; } return OCS_HW_RTN_NO_MEMORY; } static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *hw) { uint32_t i = 0, io_index = 0; uint32_t prereg = 0; ocs_hw_io_t *io = NULL; uint8_t cmd[SLI4_BMBX_SIZE]; ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; uint32_t nremaining; uint32_t n = 0; uint32_t sgls_per_request = 256; ocs_dma_t **sgls = NULL; ocs_dma_t reqbuf = { 0 }; prereg = sli_get_sgl_preregister(&hw->sli); if (prereg) { sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT); if (sgls == NULL) { ocs_log_err(hw->os, "ocs_malloc sgls failed\n"); return OCS_HW_RTN_NO_MEMORY; } rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT); if (rc) { ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n"); ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); return OCS_HW_RTN_NO_MEMORY; } } io = hw->io[io_index]; for (nremaining = hw->config.n_io; nremaining; nremaining -= n) { if (prereg) { /* Copy address of SGL's into local sgls[] array, break out if the xri * is not contiguous. */ for (n = 0; n < MIN(sgls_per_request, nremaining); n++) { /* Check that we have contiguous xri values */ if (n > 0) { if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) { break; } } sgls[n] = hw->io[io_index + n]->sgl; } if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd), io->indicator, n, sgls, NULL, &reqbuf)) { if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) { rc = OCS_HW_RTN_ERROR; ocs_log_err(hw->os, "SGL post failed\n"); break; } } } else { n = nremaining; } /* Add to tail if successful */ for (i = 0; i < n; i ++) { io->is_port_owned = 0; io->state = OCS_HW_IO_STATE_FREE; ocs_list_add_tail(&hw->io_free, io); io = hw->io[io_index+1]; io_index++; } } if (prereg) { ocs_dma_free(hw->os, &reqbuf); ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); } return rc; } static int32_t ocs_hw_flush(ocs_hw_t *hw) { uint32_t i = 0; /* Process any remaining completions */ for (i = 0; i < hw->eq_count; i++) { ocs_hw_process(hw, i, ~0); } return 0; } static int32_t ocs_hw_command_cancel(ocs_hw_t *hw) { ocs_lock(&hw->cmd_lock); /* * Manually clean up remaining commands. Note: since this calls * ocs_hw_command_process(), we'll also process the cmd_pending * list, so no need to manually clean that out. */ while (!ocs_list_empty(&hw->cmd_head)) { uint8_t mqe[SLI4_BMBX_SIZE] = { 0 }; ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head); ocs_log_test(hw->os, "hung command %08x\n", NULL == ctx ? UINT32_MAX : (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf))); ocs_unlock(&hw->cmd_lock); ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE); ocs_lock(&hw->cmd_lock); } ocs_unlock(&hw->cmd_lock); return 0; } /** * @brief Find IO given indicator (xri). * * @param hw Hal context. * @param indicator Indicator (xri) to look for. * * @return Returns io if found, NULL otherwise. */ ocs_hw_io_t * ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri) { uint32_t ioindex; ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0]; return hw->io[ioindex]; } /** * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists. * * @param hw Hal context. * @param io Pointer to the IO to cleanup. */ static void ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io) { ocs_hw_done_t done = io->done; ocs_hw_done_t abort_done = io->abort_done; /* first check active_wqe list and remove if there */ if (ocs_list_on_list(&io->wqe_link)) { ocs_list_remove(&hw->io_timed_wqe, io); } /* Remove from WQ pending list */ if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) { ocs_list_remove(&io->wq->pending_list, io); } if (io->done) { void *arg = io->arg; io->done = NULL; ocs_unlock(&hw->io_lock); done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg); ocs_lock(&hw->io_lock); } if (io->abort_done != NULL) { void *abort_arg = io->abort_arg; io->abort_done = NULL; ocs_unlock(&hw->io_lock); abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg); ocs_lock(&hw->io_lock); } } static int32_t ocs_hw_io_cancel(ocs_hw_t *hw) { ocs_hw_io_t *io = NULL; ocs_hw_io_t *tmp_io = NULL; uint32_t iters = 100; /* One second limit */ /* * Manually clean up outstanding IO. * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called. */ ocs_lock(&hw->io_lock); ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) { ocs_hw_done_t done = io->done; ocs_hw_done_t abort_done = io->abort_done; ocs_hw_io_cancel_cleanup(hw, io); /* * Since this is called in a reset/shutdown * case, If there is no callback, then just * free the IO. * * Note: A port owned XRI cannot be on * the in use list. We cannot call * ocs_hw_io_free() because we already * hold the io_lock. */ if (done == NULL && abort_done == NULL) { /* * Since this is called in a reset/shutdown * case, If there is no callback, then just * free the IO. */ ocs_hw_io_free_common(hw, io); ocs_list_remove(&hw->io_inuse, io); ocs_hw_io_free_move_correct_list(hw, io); } } /* * For port owned XRIs, they are not on the in use list, so * walk though XRIs and issue any callbacks. */ ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) { /* check list and remove if there */ if (ocs_list_on_list(&io->dnrx_link)) { ocs_list_remove(&hw->io_port_dnrx, io); ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ } ocs_hw_io_cancel_cleanup(hw, io); ocs_list_remove(&hw->io_port_owned, io); ocs_hw_io_free_common(hw, io); } ocs_unlock(&hw->io_lock); /* Give time for the callbacks to complete */ do { ocs_udelay(10000); iters--; } while (!ocs_list_empty(&hw->io_inuse) && iters); /* Leave a breadcrumb that cleanup is not yet complete. */ if (!ocs_list_empty(&hw->io_inuse)) { ocs_log_test(hw->os, "io_inuse list is not empty\n"); } return 0; } static int32_t ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size, ocs_dma_t *rsp) { sli4_sge_t *data = NULL; if (!hw || !io) { ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io); return OCS_HW_RTN_ERROR; } data = io->def_sgl.virt; /* setup command pointer */ data->buffer_address_high = ocs_addr32_hi(cmnd->phys); data->buffer_address_low = ocs_addr32_lo(cmnd->phys); data->buffer_length = cmnd_size; data++; /* setup response pointer */ data->buffer_address_high = ocs_addr32_hi(rsp->phys); data->buffer_address_low = ocs_addr32_lo(rsp->phys); data->buffer_length = rsp->size; return 0; } static int32_t __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe; if (status || read_topo->hdr.status) { ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, read_topo->hdr.status); ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return -1; } switch (read_topo->attention_type) { case SLI4_READ_TOPOLOGY_LINK_UP: hw->link.status = SLI_LINK_STATUS_UP; break; case SLI4_READ_TOPOLOGY_LINK_DOWN: hw->link.status = SLI_LINK_STATUS_DOWN; break; case SLI4_READ_TOPOLOGY_LINK_NO_ALPA: hw->link.status = SLI_LINK_STATUS_NO_ALPA; break; default: hw->link.status = SLI_LINK_STATUS_MAX; break; } switch (read_topo->topology) { case SLI4_READ_TOPOLOGY_NPORT: hw->link.topology = SLI_LINK_TOPO_NPORT; break; case SLI4_READ_TOPOLOGY_FC_AL: hw->link.topology = SLI_LINK_TOPO_LOOP; if (SLI_LINK_STATUS_UP == hw->link.status) { hw->link.loop_map = hw->loop_map.virt; } hw->link.fc_id = read_topo->acquired_al_pa; break; default: hw->link.topology = SLI_LINK_TOPO_MAX; break; } hw->link.medium = SLI_LINK_MEDIUM_FC; switch (read_topo->link_current.link_speed) { case SLI4_READ_TOPOLOGY_SPEED_1G: hw->link.speed = 1 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_2G: hw->link.speed = 2 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_4G: hw->link.speed = 4 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_8G: hw->link.speed = 8 * 1000; break; case SLI4_READ_TOPOLOGY_SPEED_16G: hw->link.speed = 16 * 1000; hw->link.loop_map = NULL; break; case SLI4_READ_TOPOLOGY_SPEED_32G: hw->link.speed = 32 * 1000; hw->link.loop_map = NULL; break; } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); return 0; } static int32_t __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_EXIT: /* ignore */ break; case OCS_EVT_HW_PORT_REQ_FREE: case OCS_EVT_HW_PORT_REQ_ATTACH: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } /* fall through */ default: ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); break; } return 0; } static void * __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_FREE_FAIL, sport); } break; default: break; } return NULL; } static void * __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) { ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id); } /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_FREE_OK, sport); } break; default: break; } return NULL; } static void * __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ATTACH_FAIL, sport); } if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; uint8_t *cmd = NULL; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* allocate memory and send unreg_vpi */ cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!cmd) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator, SLI4_UNREG_TYPE_PORT)) { ocs_log_err(hw->os, "UNREG_VPI format failure\n"); ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "UNREG_VPI command failure\n"); ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_freed, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* Forward to execute in mailbox completion processing context */ if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) { ocs_log_err(hw->os, "ocs_hw_async_call failed\n"); } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_freed, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); break; default: break; } return NULL; } static void * __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ATTACH_OK, sport); } if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; case OCS_EVT_HW_PORT_REQ_FREE: /* virtual/physical port request free */ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) { ocs_log_err(hw->os, "REG_VPI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "REG_VPI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_attached, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ALLOC_OK, sport); } /* If there is a pending free request, then handle it now */ if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; case OCS_EVT_HW_PORT_REQ_ATTACH: /* virtual port requests attach */ ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data); break; case OCS_EVT_HW_PORT_ATTACH_OK: /* physical port attached (as part of attaching domain) */ ocs_sm_transition(ctx, __ocs_hw_port_attached, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* virtual port request free */ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } else { /* * Note: BE3/Skyhawk will respond with a status of 0x20 * unless the reg_vpi has been issued, so we can * skip the unreg_vpi for these adapters. * * Send a nop to make sure that free doesn't occur in * same context */ ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* free SLI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); /* free mailbox buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } if (hw->callback.port != NULL) { hw->callback.port(hw->args.port, OCS_HW_PORT_ALLOC_FAIL, sport); } /* If there is a pending free request, then handle it now */ if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); } break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; uint8_t *payload = NULL; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* allocate memory for the service parameters */ if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) { ocs_log_err(hw->os, "Failed to allocate DMA memory\n"); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, &sport->dma, sport->indicator)) { ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n"); ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } break; case OCS_EVT_RESPONSE: payload = sport->dma.virt; ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload); ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(sport->sli_wwpn)); ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(sport->sli_wwnn)); ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data); break; case OCS_EVT_ERROR: ocs_dma_free(hw->os, &sport->dma); ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; case OCS_EVT_EXIT: break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* no-op */ break; case OCS_EVT_HW_PORT_ALLOC_OK: ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL); break; case OCS_EVT_HW_PORT_ALLOC_FAIL: ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_sli_port_t *sport = ctx->app; ocs_hw_t *hw = sport->hw; smtrace("port"); switch (evt) { case OCS_EVT_ENTER: /* If there is a pending free request, then handle it now */ if (sport->sm_free_req_pending) { ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL); return NULL; } /* TODO XXX transitioning to done only works if this is called * directly from ocs_hw_port_alloc BUT not if called from * read_sparm64. In the later case, we actually want to go * through report_ok/fail */ if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport->indicator, sport->domain->indicator)) { ocs_log_err(hw->os, "INIT_VPI allocation failure\n"); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { ocs_log_err(hw->os, "INIT_VPI command failure\n"); ocs_sm_transition(ctx, __ocs_hw_port_done, data); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_port_allocated, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); break; case OCS_EVT_HW_PORT_REQ_FREE: /* Wait for attach response and then free */ sport->sm_free_req_pending = 1; break; case OCS_EVT_EXIT: break; default: __ocs_hw_port_common(__func__, ctx, evt, data); break; } return NULL; } static int32_t __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_sli_port_t *sport = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_sm_event_t evt; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", sport->indicator, status, hdr->status); evt = OCS_EVT_ERROR; } else { evt = OCS_EVT_RESPONSE; } ocs_sm_post_event(&sport->ctx, evt, mqe); return 0; } static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_sli_port_t *sport = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_sm_event_t evt; uint8_t *mqecpy; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", sport->indicator, status, hdr->status); evt = OCS_EVT_ERROR; } else { evt = OCS_EVT_RESPONSE; } /* * In this case we have to malloc a mailbox command buffer, as it is reused * in the state machine post event call, and eventually freed */ mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (mqecpy == NULL) { ocs_log_err(hw->os, "malloc mqecpy failed\n"); return -1; } ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE); ocs_sm_post_event(&sport->ctx, evt, mqecpy); return 0; } /*************************************************************************** * Domain state machine */ static int32_t __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_EXIT: /* ignore */ break; default: ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); break; } return 0; } static void * __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* free command buffer */ if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } /* free SLI resources */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); /* TODO how to free FCFI (or do we at all)? */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ALLOC_FAIL, domain); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* free mailbox buffer and send alloc ok to physical sport */ ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL); /* now inform registered callbacks */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ATTACH_OK, domain); } break; case OCS_EVT_HW_DOMAIN_REQ_FREE: ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (data != NULL) { ocs_free(hw->os, data, SLI4_BMBX_SIZE); } /* free SLI resources */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); /* TODO how to free FCFI (or do we at all)? */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ATTACH_FAIL, domain); } break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt); if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) { ocs_log_err(hw->os, "REG_VFI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "REG_VFI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_domain_attached, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* free mailbox buffer and send alloc ok to physical sport */ ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL); ocs_hw_domain_add(hw, domain); /* now inform registered callbacks */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_ALLOC_OK, domain); } break; case OCS_EVT_HW_DOMAIN_REQ_ATTACH: ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data); break; case OCS_EVT_HW_DOMAIN_REQ_FREE: /* unreg_fcfi/vfi */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL); } else { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) { ocs_log_err(hw->os, "READ_SPARM64 format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_EXIT: break; case OCS_EVT_RESPONSE: ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt); ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_sli_port_t *sport = domain->sport; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator, domain->fcf_indicator, sport->indicator)) { ocs_log_err(hw->os, "INIT_VFI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "INIT_VFI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_EXIT: break; case OCS_EVT_RESPONSE: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: { sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; uint32_t i; /* Set the filter match/mask values from hw's filter_def values */ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { rq_cfg[i].rq_id = 0xffff; rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); } /* Set the rq_id for each, in order of RQ definition */ for (i = 0; i < hw->hw_rq_count; i++) { if (i >= ARRAY_SIZE(rq_cfg)) { ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n"); break; } rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id; } if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (hw->hw_mrq_count) { if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, domain->vlan_id, domain->fcf)) { ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } else { if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf, rq_cfg, domain->vlan_id)) { ocs_log_err(hw->os, "REG_FCFI format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "REG_FCFI command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; } case OCS_EVT_EXIT: break; case OCS_EVT_RESPONSE: if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi; /* * IF_TYPE 0 devices do not support explicit VFI and VPI initialization * and instead rely on implicit initialization during VFI registration. * Short circuit normal processing here for those devices. */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data); } break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { /* * For FC, the HW alread registered a FCFI * Copy FCF information into the domain and jump to INIT_VFI */ domain->fcf_indicator = hw->fcf_indicator; ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data); } else { ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (domain != NULL) { ocs_hw_t *hw = domain->hw; ocs_hw_domain_del(hw, domain); if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_FREE_FAIL, domain); } } /* free command buffer */ if (data != NULL) { ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE); } break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* Free DMA and mailbox buffer */ if (domain != NULL) { ocs_hw_t *hw = domain->hw; /* free VFI resource */ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); ocs_hw_domain_del(hw, domain); /* inform registered callbacks */ if (hw->callback.domain != NULL) { hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_FREE_OK, domain); } } if (data != NULL) { ocs_free(NULL, data, SLI4_BMBX_SIZE); } break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: /* if we're in the middle of a teardown, skip sending rediscover */ if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) { ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); break; } if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) { ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n"); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); } break; case OCS_EVT_RESPONSE: case OCS_EVT_ERROR: /* REDISCOVER_FCF can fail if none exist */ ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; smtrace("domain"); switch (evt) { case OCS_EVT_ENTER: if (data == NULL) { data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) { ocs_log_err(hw->os, "UNREG_FCFI format failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "UNREG_FCFI command failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_RESPONSE: if (domain->req_rediscover_fcf) { domain->req_rediscover_fcf = FALSE; ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); } break; case OCS_EVT_ERROR: ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); break; case OCS_EVT_EXIT: break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } static void * __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { ocs_domain_t *domain = ctx->app; ocs_hw_t *hw = domain->hw; uint8_t is_fc = FALSE; smtrace("domain"); is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC); switch (evt) { case OCS_EVT_ENTER: if (data == NULL) { data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (!data) { ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } } if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain, SLI4_UNREG_TYPE_DOMAIN)) { ocs_log_err(hw->os, "UNREG_VFI format failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { ocs_log_err(hw->os, "UNREG_VFI command failure\n"); ocs_free(hw->os, data, SLI4_BMBX_SIZE); ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); break; } break; case OCS_EVT_ERROR: if (is_fc) { ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); } break; case OCS_EVT_RESPONSE: if (is_fc) { ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); } else { ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); } break; default: __ocs_hw_domain_common(__func__, ctx, evt, data); break; } return NULL; } /* callback for domain alloc/attach/free */ static int32_t __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_domain_t *domain = arg; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; ocs_sm_event_t evt; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n", domain->indicator, status, hdr->status); evt = OCS_EVT_ERROR; } else { evt = OCS_EVT_RESPONSE; } ocs_sm_post_event(&domain->sm, evt, mqe); return 0; } static int32_t target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_io_t *io = NULL; ocs_hw_io_t *io_next = NULL; uint64_t ticks_current = ocs_get_os_ticks(); uint32_t sec_elapsed; ocs_hw_rtn_e rc; sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; if (status || hdr->status) { ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n", status, hdr->status); /* go ahead and proceed with wqe timer checks... */ } /* loop through active WQE list and check for timeouts */ ocs_lock(&hw->io_lock); ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) { sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq()); /* * If elapsed time > timeout, abort it. No need to check type since * it wouldn't be on this list unless it was a target WQE */ if (sec_elapsed > io->tgt_wqe_timeout) { ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n", io->indicator, io->reqtag, io->type); /* remove from active_wqe list so won't try to abort again */ ocs_list_remove(&hw->io_timed_wqe, io); /* save status of "timed out" for when abort completes */ io->status_saved = 1; io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT; io->saved_ext = 0; io->saved_len = 0; /* now abort outstanding IO */ rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); if (rc) { ocs_log_test(hw->os, "abort failed xri=%#x tag=%#x rc=%d\n", io->indicator, io->reqtag, rc); } } /* * need to go through entire list since each IO could have a * different timeout value */ } ocs_unlock(&hw->io_lock); /* if we're not in the middle of shutting down, schedule next timer */ if (!hw->active_wqe_timer_shutdown) { ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS); } hw->in_active_wqe_timer = FALSE; return 0; } static void target_wqe_timer_cb(void *arg) { ocs_hw_t *hw = (ocs_hw_t *)arg; /* delete existing timer; will kick off new timer after checking wqe timeouts */ hw->in_active_wqe_timer = TRUE; ocs_del_timer(&hw->wqe_timer); /* Forward timer callback to execute in the mailbox completion processing context */ if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) { ocs_log_test(hw->os, "ocs_hw_async_call failed\n"); } } static void shutdown_target_wqe_timer(ocs_hw_t *hw) { uint32_t iters = 100; if (hw->config.emulate_tgt_wqe_timeout) { /* request active wqe timer shutdown, then wait for it to complete */ hw->active_wqe_timer_shutdown = TRUE; /* delete WQE timer and wait for timer handler to complete (if necessary) */ ocs_del_timer(&hw->wqe_timer); /* now wait for timer handler to complete (if necessary) */ while (hw->in_active_wqe_timer && iters) { /* * if we happen to have just sent NOP mailbox command, make sure * completions are being processed */ ocs_hw_flush(hw); iters--; } if (iters == 0) { ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n"); } } } /** * @brief Determine if HW IO is owned by the port. * * @par Description * Determines if the given HW IO has been posted to the chip. * * @param hw Hardware context allocated by the caller. * @param io HW IO. * * @return Returns TRUE if given HW IO is port-owned. */ uint8_t ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) { /* Check to see if this is a port owned XRI */ return io->is_port_owned; } /** * @brief Return TRUE if exchange is port-owned. * * @par Description * Test to see if the xri is a port-owned xri. * * @param hw Hardware context. * @param xri Exchange indicator. * * @return Returns TRUE if XRI is a port owned XRI. */ uint8_t ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri) { ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri); return (io == NULL ? FALSE : io->is_port_owned); } /** * @brief Returns an XRI from the port owned list to the host. * * @par Description * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes. * * @param hw Hardware context. * @param xri_base The starting XRI number. * @param xri_count The number of XRIs to free from the base. */ static void ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count) { ocs_hw_io_t *io; uint32_t i; for (i = 0; i < xri_count; i++) { io = ocs_hw_io_lookup(hw, xri_base + i); /* * if this is an auto xfer rdy XRI, then we need to release any * buffer attached to the XRI before moving the XRI back to the free pool. */ if (hw->auto_xfer_rdy_enabled) { ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io); } ocs_lock(&hw->io_lock); ocs_list_remove(&hw->io_port_owned, io); io->is_port_owned = 0; ocs_list_add_tail(&hw->io_free, io); ocs_unlock(&hw->io_lock); } } /** * @brief Called when the POST_XRI command completes. * * @par Description * Free the mailbox command buffer and reclaim the XRIs on failure. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe; /* Reclaim the XRIs as host owned if the command fails */ if (status != 0) { ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n", status, post_xri->xri_base, post_xri->xri_count); ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port. * * @param hw Hardware context. * @param xri_start The starting XRI to post. * @param num_to_post The number of XRIs to post. * * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS. */ static ocs_hw_rtn_e ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post) { uint8_t *post_xri; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* Since we need to allocate for mailbox queue, just always allocate */ post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (post_xri == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } /* Register the XRIs */ if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE, xri_start, num_to_post)) { rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "post_xri failed\n"); } } return rc; } /** * @brief Move XRIs from the host-controlled pool to the port. * * @par Description * Removes IOs from the free list and moves them to the port. * * @param hw Hardware context. * @param num_xri The number of XRIs being requested to move to the chip. * * @return Returns the number of XRIs that were moved. */ uint32_t ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri) { ocs_hw_io_t *io; uint32_t i; uint32_t num_posted = 0; /* * Note: We cannot use ocs_hw_io_alloc() because that would place the * IO on the io_inuse list. We need to move from the io_free to * the io_port_owned list. */ ocs_lock(&hw->io_lock); for (i = 0; i < num_xri; i++) { if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { ocs_hw_rtn_e rc; /* * if this is an auto xfer rdy XRI, then we need to attach a * buffer to the XRI before submitting it to the chip. If a * buffer is unavailable, then we cannot post it, so return it * to the free pool. */ if (hw->auto_xfer_rdy_enabled) { /* Note: uses the IO lock to get the auto xfer rdy buffer */ ocs_unlock(&hw->io_lock); rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io); ocs_lock(&hw->io_lock); if (rc != OCS_HW_RTN_SUCCESS) { ocs_list_add_head(&hw->io_free, io); break; } } ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator); io->is_port_owned = 1; ocs_list_add_tail(&hw->io_port_owned, io); /* Post XRI */ if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) { ocs_hw_reclaim_xri(hw, io->indicator, i); break; } num_posted++; } else { /* no more free XRIs */ break; } } ocs_unlock(&hw->io_lock); return num_posted; } /** * @brief Called when the RELEASE_XRI command completes. * * @par Description * Move the IOs back to the free pool on success. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe; uint8_t i; /* Reclaim the XRIs as host owned if the command fails */ if (status != 0) { ocs_log_err(hw->os, "Status 0x%x\n", status); } else { for (i = 0; i < release_xri->released_xri_count; i++) { uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 : release_xri->xri_tbl[i/2].xri_tag1); ocs_hw_reclaim_xri(hw, xri, 1); } } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Move XRIs from the port-controlled pool to the host. * * Requests XRIs from the FW to return to the host-owned pool. * * @param hw Hardware context. * @param num_xri The number of XRIs being requested to moved from the chip. * * @return Returns 0 for success, or a negative error code value for failure. */ ocs_hw_rtn_e ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri) { uint8_t *release_xri; ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; /* non-local buffer required for mailbox queue */ release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (release_xri == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } /* release the XRIs */ if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) { rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL); if (rc != OCS_HW_RTN_SUCCESS) { ocs_log_err(hw->os, "release_xri failed\n"); } } /* If we are polling or an error occurred, then free the mailbox buffer */ if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) { ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE); } return rc; } /** * @brief Allocate an ocs_hw_rx_buffer_t array. * * @par Description * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory. * * @param hw Pointer to HW object. * @param rqindex RQ index for this buffer. * @param count Count of buffers in array. * @param size Size of buffer. * * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array. */ static ocs_hw_rq_buffer_t * ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size) { ocs_t *ocs = hw->os; ocs_hw_rq_buffer_t *rq_buf = NULL; ocs_hw_rq_buffer_t *prq; uint32_t i; if (count != 0) { rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO); if (rq_buf == NULL) { ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n"); return NULL; } for (i = 0, prq = rq_buf; i < count; i ++, prq++) { prq->rqindex = rqindex; if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "DMA allocation failed\n"); ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); rq_buf = NULL; break; } } } return rq_buf; } /** * @brief Free an ocs_hw_rx_buffer_t array. * * @par Description * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory. * * @param hw Pointer to HW object. * @param rq_buf Pointer to ocs_hw_rx_buffer_t array. * @param count Count of buffers in array. * * @return None. */ static void ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count) { ocs_t *ocs = hw->os; uint32_t i; ocs_hw_rq_buffer_t *prq; if (rq_buf != NULL) { for (i = 0, prq = rq_buf; i < count; i++, prq++) { ocs_dma_free(ocs, &prq->dma); } ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); } } /** * @brief Allocate the RQ data buffers. * * @param hw Pointer to HW object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rx_allocate(ocs_hw_t *hw) { ocs_t *ocs = hw->os; uint32_t i; int32_t rc = OCS_HW_RTN_SUCCESS; uint32_t rqindex = 0; hw_rq_t *rq; uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR; uint32_t payload_size = hw->config.rq_default_buffer_size; rqindex = 0; for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; /* Allocate header buffers */ rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size); if (rq->hdr_buf == NULL) { ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n"); rc = OCS_HW_RTN_ERROR; break; } ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id, rq->entry_count, hdr_size); rqindex++; /* Allocate payload buffers */ rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size); if (rq->payload_buf == NULL) { ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n"); rc = OCS_HW_RTN_ERROR; break; } ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id, rq->entry_count, payload_size); rqindex++; } return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS; } /** * @brief Post the RQ data buffers to the chip. * * @param hw Pointer to HW object. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rx_post(ocs_hw_t *hw) { uint32_t i; uint32_t idx; uint32_t rq_idx; int32_t rc = 0; /* * In RQ pair mode, we MUST post the header and payload buffer at the * same time. */ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { hw_rq_t *rq = hw->hw_rq[rq_idx]; for (i = 0; i < rq->entry_count-1; i++) { ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++); ocs_hw_assert(seq != NULL); seq->header = &rq->hdr_buf[i]; seq->payload = &rq->payload_buf[i]; rc = ocs_hw_sequence_free(hw, seq); if (rc) { break; } } if (rc) { break; } } return rc; } /** * @brief Free the RQ data buffers. * * @param hw Pointer to HW object. * */ void ocs_hw_rx_free(ocs_hw_t *hw) { hw_rq_t *rq; uint32_t i; /* Free hw_rq buffers */ for (i = 0; i < hw->hw_rq_count; i++) { rq = hw->hw_rq[i]; if (rq != NULL) { ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count); rq->hdr_buf = NULL; ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count); rq->payload_buf = NULL; } } } /** * @brief HW async call context structure. */ typedef struct { ocs_hw_async_cb_t callback; void *arg; uint8_t cmd[SLI4_BMBX_SIZE]; } ocs_hw_async_call_ctx_t; /** * @brief HW async callback handler * * @par Description * This function is called when the NOP mailbox command completes. The callback stored * in the requesting context is invoked. * * @param hw Pointer to HW object. * @param status Completion status. * @param mqe Pointer to mailbox completion queue entry. * @param arg Caller-provided argument. * * @return None. */ static void ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_hw_async_call_ctx_t *ctx = arg; if (ctx != NULL) { if (ctx->callback != NULL) { (*ctx->callback)(hw, status, mqe, ctx->arg); } ocs_free(hw->os, ctx, sizeof(*ctx)); } } /** * @brief Make an async callback using NOP mailbox command * * @par Description * Post a NOP mailbox command; the callback with argument is invoked upon completion * while in the event processing context. * * @param hw Pointer to HW object. * @param callback Pointer to callback function. * @param arg Caller-provided callback. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg) { ocs_hw_async_call_ctx_t *ctx; /* * Allocate a callback context (which includes the mailbox command buffer), we need * this to be persistent as the mailbox command submission may be queued and executed later * execution. */ ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT); if (ctx == NULL) { ocs_log_err(hw->os, "failed to malloc async call context\n"); return OCS_HW_RTN_NO_MEMORY; } ctx->callback = callback; ctx->arg = arg; /* Build and send a NOP mailbox command */ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) { ocs_log_err(hw->os, "COMMON_NOP format failure\n"); ocs_free(hw->os, ctx, sizeof(*ctx)); return OCS_HW_RTN_ERROR; } if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) { ocs_log_err(hw->os, "COMMON_NOP command failure\n"); ocs_free(hw->os, ctx, sizeof(*ctx)); return OCS_HW_RTN_ERROR; } return OCS_HW_RTN_SUCCESS; } /** * @brief Initialize the reqtag pool. * * @par Description * The WQ request tag pool is initialized. * * @param hw Pointer to HW object. * * @return Returns 0 on success, or a negative error code value on failure. */ ocs_hw_rtn_e ocs_hw_reqtag_init(ocs_hw_t *hw) { if (hw->wq_reqtag_pool == NULL) { hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE); if (hw->wq_reqtag_pool == NULL) { ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n"); return OCS_HW_RTN_NO_MEMORY; } } ocs_hw_reqtag_reset(hw); return OCS_HW_RTN_SUCCESS; } /** * @brief Allocate a WQ request tag. * * Allocate and populate a WQ request tag from the WQ request tag pool. * * @param hw Pointer to HW object. * @param callback Callback function. * @param arg Pointer to callback argument. * * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated. */ hw_wq_callback_t * ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) { hw_wq_callback_t *wqcb; ocs_hw_assert(callback != NULL); wqcb = ocs_pool_get(hw->wq_reqtag_pool); if (wqcb != NULL) { ocs_hw_assert(wqcb->callback == NULL); wqcb->callback = callback; wqcb->arg = arg; } return wqcb; } /** * @brief Free a WQ request tag. * * Free the passed in WQ request tag. * * @param hw Pointer to HW object. * @param wqcb Pointer to WQ request tag object to free. * * @return None. */ void ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb) { ocs_hw_assert(wqcb->callback != NULL); wqcb->callback = NULL; wqcb->arg = NULL; ocs_pool_put(hw->wq_reqtag_pool, wqcb); } /** * @brief Return WQ request tag by index. * * @par Description * Return pointer to WQ request tag object given an index. * * @param hw Pointer to HW object. * @param instance_index Index of WQ request tag to return. * * @return Pointer to WQ request tag, or NULL. */ hw_wq_callback_t * ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index) { hw_wq_callback_t *wqcb; wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index); if (wqcb == NULL) { ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index); } return wqcb; } /** * @brief Reset the WQ request tag pool. * * @par Description * Reset the WQ request tag pool, returning all to the free list. * * @param hw pointer to HW object. * * @return None. */ void ocs_hw_reqtag_reset(ocs_hw_t *hw) { hw_wq_callback_t *wqcb; uint32_t i; /* Remove all from freelist */ while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) { ; } /* Put them all back */ for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) { wqcb->instance_index = i; wqcb->callback = NULL; wqcb->arg = NULL; ocs_pool_put(hw->wq_reqtag_pool, wqcb); } } /** * @brief Handle HW assertion * * HW assert, display diagnostic message, and abort. * * @param cond string describing failing assertion condition * @param filename file name * @param linenum line number * * @return none */ void _ocs_hw_assert(const char *cond, const char *filename, int linenum) { ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond); ocs_abort(); /* no return */ } /** * @brief Handle HW verify * * HW verify, display diagnostic message, dump stack and return. * * @param cond string describing failing verify condition * @param filename file name * @param linenum line number * * @return none */ void _ocs_hw_verify(const char *cond, const char *filename, int linenum) { ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond); ocs_print_stack(); } /** * @brief Reque XRI * * @par Description * Reque XRI * * @param hw Pointer to HW object. * @param io Pointer to HW IO * * @return Return 0 if successful else returns -1 */ int32_t ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io ) { int32_t rc = 0; rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1); if (rc) { ocs_list_add_tail(&hw->io_port_dnrx, io); rc = -1; goto exit_ocs_hw_reque_xri; } io->auto_xfer_rdy_dnrx = 0; io->type = OCS_HW_IO_DNRX_REQUEUE; if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) { /* Clear buffer from XRI */ ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf); io->axr_buf = NULL; ocs_log_err(hw->os, "requeue_xri WQE error\n"); ocs_list_add_tail(&hw->io_port_dnrx, io); rc = -1; goto exit_ocs_hw_reque_xri; } if (io->wq == NULL) { io->wq = ocs_hw_queue_next_wq(hw, io); ocs_hw_assert(io->wq != NULL); } /* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread. */ OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); OCS_STAT(io->wq->use_count++); rc = hw_wq_write(io->wq, &io->wqe); if (rc < 0) { ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc); rc = -1; } exit_ocs_hw_reque_xri: return 0; } uint32_t ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn) { sli4_t *sli4 = &ocs->hw.sli; ocs_dma_t dma; uint8_t *payload = NULL; int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan; /* allocate memory for the service parameters */ if (ocs_dma_alloc(ocs, &dma, 112, 4)) { ocs_log_err(ocs, "Failed to allocate DMA memory\n"); return 1; } if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &dma, indicator)) { ocs_log_err(ocs, "READ_SPARM64 allocation failure\n"); ocs_dma_free(ocs, &dma); return 1; } if (sli_bmbx_command(sli4)) { ocs_log_err(ocs, "READ_SPARM64 command failure\n"); ocs_dma_free(ocs, &dma); return 1; } payload = dma.virt; ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn)); ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn)); ocs_dma_free(ocs, &dma); return 0; } +uint32_t +ocs_hw_get_config_persistent_topology(ocs_hw_t *hw) +{ + uint32_t topology = OCS_HW_TOPOLOGY_AUTO; + sli4_t *sli = &hw->sli; + + if (!sli_persist_topology_enabled(sli)) + return topology; + + switch (sli->config.pt) { + case SLI4_INIT_LINK_F_P2P_ONLY: + topology = OCS_HW_TOPOLOGY_NPORT; + break; + case SLI4_INIT_LINK_F_FCAL_ONLY: + topology = OCS_HW_TOPOLOGY_LOOP; + break; + default: + break; + } + + return topology; +} + +/* + * @brief Persistent topology configuration callback argument. + */ +typedef struct ocs_hw_persistent_topo_cb_arg { + ocs_sem_t semaphore; + int32_t status; +} ocs_hw_persistent_topo_cb_arg_t; + +/* + * @brief Called after the completion of set persistent topology request + * + * @par Description + * This is callback fn for the set_persistent_topology + * function. This callback is called when the common feature mbx cmd + * completes. + * + * @param hw Hardware context. + * @param status The status from the MQE. + * @param mqe Pointer to mailbox command buffer. + * @param arg Pointer to a callback argument. + * + * @return 0 on success, non-zero otherwise + */ +static int32_t +ocs_hw_set_persistent_topolgy_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) +{ + ocs_hw_persistent_topo_cb_arg_t *req = (ocs_hw_persistent_topo_cb_arg_t *)arg; + + req->status = status; + + ocs_sem_v(&req->semaphore); + + return 0; +} + +/** + * @brief Set persistent topology + * + * Sets the persistent topology(PT) feature using + * COMMON_SET_FEATURES cmd. If mbx cmd succeeds, update the + * topology into sli config. PT stores the value to be set into link_flags + * of the cmd INIT_LINK, to bring up the link. + * + * SLI specs defines following for PT: + * When TF is set to 0: + * 0 Reserved + * 1 Attempt point-to-point initialization (direct attach or Fabric topology). + * 2 Attempt FC-AL loop initialization. + * 3 Reserved + * + * When TF is set to 1: + * 0 Attempt FC-AL loop initialization; if it fails, attempt point-to-point initialization. + * 1 Attempt point-to-point initialization; if it fails, attempt FC-AL loop initialization. + * 2 Reserved + * 3 Reserved + * + * Note: Topology failover is only available on Lancer G5. This command will fail + * if TF is set to 1 on any other ASICs + * + * @param hw Pointer to hw + * @param topology topology value to be set, provided through + * elxsdkutil set-topology cmd + * + * @return Returns 0 on success, or a non-zero value on failure. + */ +ocs_hw_rtn_e +ocs_hw_set_persistent_topology(ocs_hw_t *hw, uint32_t topology, uint32_t opts) +{ + ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; + uint8_t buf[SLI4_BMBX_SIZE]; + sli4_req_common_set_features_persistent_topo_param_t param; + ocs_hw_persistent_topo_cb_arg_t request; + + ocs_memset(¶m, 0, sizeof(param)); + param.persistent_topo = topology; + + switch (topology) { + case OCS_HW_TOPOLOGY_AUTO: + if (sli_get_asic_type(&hw->sli) == SLI4_ASIC_TYPE_LANCER) { + param.persistent_topo = SLI4_INIT_LINK_F_P2P_FAIL_OVER; + param.topo_failover = 1; + } else { + param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;; + param.topo_failover = 0; + } + break; + + case OCS_HW_TOPOLOGY_NPORT: + param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY; + param.topo_failover = 0; + break; + + case OCS_HW_TOPOLOGY_LOOP: + param.persistent_topo = SLI4_INIT_LINK_F_FCAL_ONLY; + param.topo_failover = 0; + break; + + default: + ocs_log_err(hw->os, "unsupported topology %#x\n", topology); + return -1; + } + + ocs_sem_init(&request.semaphore, 0, "set_persistent_topo"); + + /* build the set_features command */ + sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, + SLI4_SET_FEATURES_PERSISTENT_TOPOLOGY, sizeof(param), ¶m); + + if (opts == OCS_CMD_POLL) { + rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); + if (rc) { + ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc); + return rc; + } + } else { + + // there's no response for this feature command + rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_set_persistent_topolgy_cb, &request); + if (rc) { + ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc); + return rc; + } + + if (ocs_sem_p(&request.semaphore, OCS_SEM_FOREVER)) { + ocs_log_err(hw->os, "ocs_sem_p failed\n"); + return -ENXIO; + } + + if (request.status) { + ocs_log_err(hw->os, "set persistent topology failed; status: %d\n", request.status); + return -EFAULT; + } + } + + sli_config_persistent_topology(&hw->sli, ¶m); + + return rc; +} + /** * @page fc_hw_api_overview HW APIs * - @ref devInitShutdown * - @ref domain * - @ref port * - @ref node * - @ref io * - @ref interrupt * *
* The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4 * message details, but the higher level code must still manage domains, ports, * IT nexuses, and IOs. The HW API is designed to help the higher level manage * these objects.

* * The HW uses function callbacks to notify the higher-level code of events * that are received from the chip. There are currently three types of * functions that may be registered: * *
  • domain – This function is called whenever a domain event is generated * within the HW. Examples include a new FCF is discovered, a connection * to a domain is disrupted, and allocation callbacks.
  • *
  • unsolicited – This function is called whenever new data is received in * the SLI-4 receive queue.
  • *
  • rnode – This function is called for remote node events, such as attach status * and allocation callbacks.
* * Upper layer functions may be registered by using the ocs_hw_callback() function. * * FC/FCoE HW *

FC/FCoE HW API

* The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible * interface for creating the necessary common objects and sending I/Os. It may be used * “as is” in customer implementations or it can serve as an example of typical interactions * between a driver and the SLI-4 hardware. The broad categories of functionality include: * *
  • Setting-up and tearing-down of the HW.
  • *
  • Allocating and using the common objects (SLI Port, domain, remote node).
  • *
  • Sending and receiving I/Os.
* *

HW Setup

* To set up the HW: * *
    *
  1. Set up the HW object using ocs_hw_setup().
    * This step performs a basic configuration of the SLI-4 component and the HW to * enable querying the hardware for its capabilities. At this stage, the HW is not * capable of general operations (such as, receiving events or sending I/Os).


  2. *
  3. Configure the HW according to the driver requirements.
    * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as * well as configures the amount of resources required (ocs_hw_set()). The driver * must also register callback functions (ocs_hw_callback()) to receive notification of * various asynchronous events.

    * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This * step creates the underlying queues, commits resources to the hardware, and * prepares the hardware for operation. While the hardware is operational, the * port is not online, and cannot send or receive data.


  4. *

    *
  5. Finally, the driver can bring the port online (ocs_hw_port_control()).
    * When the link comes up, the HW determines if a domain is present and notifies the * driver using the domain callback function. This is the starting point of the driver's * interaction with the common objects.

    * @b Note: For FCoE, there may be more than one domain available and, therefore, * more than one callback.
  6. *
* *

Allocating and Using Common Objects

* Common objects provide a mechanism through which the various OneCore Storage * driver components share and track information. These data structures are primarily * used to track SLI component information but can be extended by other components, if * needed. The main objects are: * *
  • DMA – the ocs_dma_t object describes a memory region suitable for direct * memory access (DMA) transactions.
  • *
  • SCSI domain – the ocs_domain_t object represents the SCSI domain, including * any infrastructure devices such as FC switches and FC forwarders. The domain * object contains both an FCFI and a VFI.
  • *
  • SLI Port (sport) – the ocs_sli_port_t object represents the connection between * the driver and the SCSI domain. The SLI Port object contains a VPI.
  • *
  • Remote node – the ocs_remote_node_t represents a connection between the SLI * Port and another device in the SCSI domain. The node object contains an RPI.
* * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote * node common objects and establish the connections between them. The goal is to * connect the driver to the SCSI domain to exchange I/Os with other devices. These * common object connections are shown in the following figure, FC Driver Common Objects: * FC Driver Common Objects * * The first step is to create a connection to the domain by allocating an SLI Port object. * The SLI Port object represents a particular FC ID and must be initialized with one. With * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying * a domain, the driver allocates a domain object and attaches to it using the previous SLI * port object.

* * @b Note: In some cases, the driver may need to negotiate service parameters (that is, * FLOGI) with the domain before attaching.

* * Once attached to the domain, the driver can discover and attach to other devices * (remote nodes). The exact discovery method depends on the driver, but it typically * includes using a position map, querying the fabric name server, or an out-of-band * method. In most cases, it is necessary to log in with devices before performing I/Os. * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port * before exchanging FCP I/O.

* * @b Note: The HW manages both the well known fabric address and the name server as * nodes in the domain. Therefore, the driver must allocate node objects prior to * communicating with either of these entities. * *

Sending and Receiving I/Os

* The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the * commands are conceptually similar. Since the commands complete asynchronously, * the caller must provide a HW I/O object that maintains the I/O state, as well as * provide a callback function. The driver may use the same callback function for all I/O * operations, but each operation must use a unique HW I/O object. In the SLI-4 * architecture, there is a direct association between the HW I/O object and the SGL used * to describe the data. Therefore, a driver typically performs the following operations: * *
  • Allocates a HW I/O object (ocs_hw_io_alloc()).
  • *
  • Formats the SGL, specifying both the HW I/O object and the SGL. * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).
  • *
  • Sends the HW I/O (ocs_hw_io_send()).
* *

HW Tear Down

* To tear-down the HW: * *
  1. Take the port offline (ocs_hw_port_control()) to prevent receiving further * data andevents.
  2. *
  3. Destroy the HW object (ocs_hw_teardown()).
  4. *
  5. Free any memory used by the HW, such as buffers for unsolicited data.
*
*
* */ /** * This contains all hw runtime workaround code. Based on the asic type, * asic revision, and range of fw revisions, a particular workaround may be enabled. * * A workaround may consist of overriding a particular HW/SLI4 value that was initialized * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to * control specific runtime behavior. * * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than * what we might previously see as "if this is a BE3, then do xxx" * */ #define HW_FWREV_ZERO (0ull) #define HW_FWREV_MAX (~0ull) #define SLI4_ASIC_TYPE_ANY 0 #define SLI4_ASIC_REV_ANY 0 /** * @brief Internal definition of workarounds */ typedef enum { HW_WORKAROUND_TEST = 1, HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */ HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */ HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, HW_WORKAROUND_WQE_COUNT_METHOD, HW_WORKAROUND_RQE_COUNT_METHOD, HW_WORKAROUND_USE_UNREGISTERD_RPI, HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */ HW_WORKAROUND_DISABLE_SET_DUMP_LOC, HW_WORKAROUND_USE_DIF_QUARANTINE, HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */ HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */ HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */ HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */ HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */ } hw_workaround_e; /** * @brief Internal workaround structure instance */ typedef struct { sli4_asic_type_e asic_type; sli4_asic_rev_e asic_rev; uint64_t fwrev_low; uint64_t fwrev_high; hw_workaround_e workaround; uint32_t value; } hw_workaround_t; static hw_workaround_t hw_workarounds[] = { {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_TEST, 999}, /* Bug: 127585: if_type == 2 returns 0 for total length placed on * FCP_TSEND64_WQE completions. Note, original driver code enables this * workaround for all asic types */ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0}, /* Bug: unknown, Lancer A0 has mis-reported max queue depth */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_MAX_QUEUE, 2048}, /* Bug: 143399, BE3 has mis-reported max RQ queue depth */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0), HW_WORKAROUND_MAX_RQ, 2048}, /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0), HW_WORKAROUND_MAX_RQ, 2048}, /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), HW_WORKAROUND_WQE_COUNT_METHOD, 1}, /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), HW_WORKAROUND_RQE_COUNT_METHOD, 1}, /* Bug: 142968, BE3 UE with RPI == 0xffff */ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_USE_UNREGISTERD_RPI, 0}, /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0}, {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0), HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0}, /* Bug: 160124, Skyhawk quarantine DIF XRIs */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_USE_DIF_QUARANTINE, 0}, /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_USE_DIF_SEC_XRI, 0}, /* Bug: xxxxxx, FCFI reported in SRB not corrrect */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0}, #if 0 /* Bug: 165642, FW version check for driver */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER), HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, #endif {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK), HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, /* Bug 177061, Lancer FW does not set the SGLC bit */ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_SGLC_MISREPORTED, 0}, /* BZ 181208/183914, enable this workaround for ALL revisions */ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0}, }; /** * @brief Function prototypes */ static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w); /** * @brief Parse the firmware version (name) * * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined * by the HW_FWREV() macro * * @param fwrev_string pointer to the firmware string * * @return packed firmware revision value */ static uint64_t parse_fw_version(const char *fwrev_string) { int v[4] = {0}; const char *p; int i; for (p = fwrev_string, i = 0; *p && (i < 4); i ++) { v[i] = ocs_strtoul(p, 0, 0); while(*p && *p != '.') { p ++; } if (*p) { p ++; } } /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */ if (v[2] == 9999) { return HW_FWREV_MAX; } else { return HW_FWREV(v[0], v[1], v[2], v[3]); } } /** * @brief Test for a workaround match * * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match. * * @param hw Pointer to the HW structure * @param w Pointer to a workaround structure entry * * @return Return TRUE for a match */ static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w) { return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) && ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) && (w->fwrev_low <= hw->workaround.fwrev) && ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high))); } /** * @brief Setup HW runtime workarounds * * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds * based on the HW/SLI setup. * * @param hw Pointer to HW structure * * @return none */ void ocs_hw_workaround_setup(struct ocs_hw_s *hw) { hw_workaround_t *w; sli4_t *sli4 = &hw->sli; uint32_t i; /* Initialize the workaround settings */ ocs_memset(&hw->workaround, 0, sizeof(hw->workaround)); /* If hw_war_version is non-null, then its a value that was set by a module parameter * (sorry for the break in abstraction, but workarounds are ... well, workarounds) */ if (hw->hw_war_version) { hw->workaround.fwrev = parse_fw_version(hw->hw_war_version); } else { hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]); } /* Walk the workaround list, if a match is found, then handle it */ for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) { if (ocs_hw_workaround_match(hw, w)) { switch(w->workaround) { case HW_WORKAROUND_TEST: { ocs_log_debug(hw->os, "Override: test: %d\n", w->value); break; } case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: { ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n"); hw->workaround.retain_tsend_io_length = 1; break; } case HW_WORKAROUND_MAX_QUEUE: { sli4_qtype_e q; ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value); for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { if (hw->num_qentries[q] > w->value) { hw->num_qentries[q] = w->value; } } break; } case HW_WORKAROUND_MAX_RQ: { ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value); if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) { hw->num_qentries[SLI_QTYPE_RQ] = w->value; } break; } case HW_WORKAROUND_WQE_COUNT_METHOD: { ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value); sli4->config.count_method[SLI_QTYPE_WQ] = w->value; sli_calc_max_qentries(sli4); break; } case HW_WORKAROUND_RQE_COUNT_METHOD: { ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value); sli4->config.count_method[SLI_QTYPE_RQ] = w->value; sli_calc_max_qentries(sli4); break; } case HW_WORKAROUND_USE_UNREGISTERD_RPI: ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n"); hw->workaround.use_unregistered_rpi = TRUE; /* * Allocate an RPI that is never registered, to be used in the case where * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF */ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid, &hw->workaround.unregistered_index)) { ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n"); hw->workaround.use_unregistered_rpi = FALSE; } break; case HW_WORKAROUND_DISABLE_AR_TGT_DIF: ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n"); hw->workaround.disable_ar_tgt_dif = TRUE; break; case HW_WORKAROUND_DISABLE_SET_DUMP_LOC: ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n"); hw->workaround.disable_dump_loc = TRUE; break; case HW_WORKAROUND_USE_DIF_QUARANTINE: ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n"); hw->workaround.use_dif_quarantine = TRUE; break; case HW_WORKAROUND_USE_DIF_SEC_XRI: ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n"); hw->workaround.use_dif_sec_xri = TRUE; break; case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB: ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n"); hw->workaround.override_fcfi = TRUE; break; case HW_WORKAROUND_FW_VERSION_TOO_LOW: ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n"); hw->workaround.fw_version_too_low = TRUE; break; case HW_WORKAROUND_SGLC_MISREPORTED: ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n"); hw->workaround.sglc_misreported = TRUE; break; case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE: ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n"); hw->workaround.ignore_send_frame = TRUE; break; } /* switch(w->workaround) */ } } } diff --git a/sys/dev/ocs_fc/ocs_hw.h b/sys/dev/ocs_fc/ocs_hw.h index 14fe4bbd3e8b..287e45acd539 100644 --- a/sys/dev/ocs_fc/ocs_hw.h +++ b/sys/dev/ocs_fc/ocs_hw.h @@ -1,1533 +1,1535 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Defines the Hardware Abstraction Layer (HW) interface functions. */ #ifndef _OCS_HW_H #define _OCS_HW_H #include "sli4.h" #include "ocs_hw.h" #include "ocs_stats.h" #include "ocs_utils.h" typedef struct ocs_hw_io_s ocs_hw_io_t; #if defined(OCS_INCLUDE_DEBUG) #else #define ocs_queue_history_wq(...) #define ocs_queue_history_cqe(...) #define ocs_queue_history_init(...) #define ocs_queue_history_free(...) #endif /** * @brief HW queue forward declarations */ typedef struct hw_eq_s hw_eq_t; typedef struct hw_cq_s hw_cq_t; typedef struct hw_mq_s hw_mq_t; typedef struct hw_wq_s hw_wq_t; typedef struct hw_rq_s hw_rq_t; typedef struct hw_rq_grp_s hw_rq_grp_t; /* HW asserts/verify * */ extern void _ocs_hw_assert(const char *cond, const char *filename, int linenum); extern void _ocs_hw_verify(const char *cond, const char *filename, int linenum); #if defined(HW_NDEBUG) #define ocs_hw_assert(cond) #define ocs_hw_verify(cond, ...) #else #define ocs_hw_assert(cond) \ do { \ if ((!(cond))) { \ _ocs_hw_assert(#cond, __FILE__, __LINE__); \ } \ } while (0) #define ocs_hw_verify(cond, ...) \ do { \ if ((!(cond))) { \ _ocs_hw_verify(#cond, __FILE__, __LINE__); \ return __VA_ARGS__; \ } \ } while (0) #endif #define ocs_hw_verify_arg(cond) ocs_hw_verify(cond, OCS_HW_RTN_INVALID_ARG) /* * HW completion loop control parameters. * * The HW completion loop must terminate periodically to keep the OS happy. The * loop terminates when a predefined time has elapsed, but to keep the overhead of * computing time down, the time is only checked after a number of loop iterations * has completed. * * OCS_HW_TIMECHECK_ITERATIONS number of loop iterations between time checks * */ #define OCS_HW_TIMECHECK_ITERATIONS 100 #define OCS_HW_MAX_NUM_MQ 1 #define OCS_HW_MAX_NUM_RQ 32 #define OCS_HW_MAX_NUM_EQ 16 #define OCS_HW_MAX_NUM_WQ 32 #define OCE_HW_MAX_NUM_MRQ_PAIRS 16 #define OCS_HW_MAX_WQ_CLASS 4 #define OCS_HW_MAX_WQ_CPU 128 /* * A CQ will be assinged to each WQ (CQ must have 2X entries of the WQ for abort * processing), plus a separate one for each RQ PAIR and one for MQ */ #define OCS_HW_MAX_NUM_CQ ((OCS_HW_MAX_NUM_WQ*2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2)) /* * Q hash - size is the maximum of all the queue sizes, rounded up to the next * power of 2 */ #define OCS_HW_Q_HASH_SIZE B32_NEXT_POWER_OF_2(OCS_MAX(OCS_HW_MAX_NUM_MQ, OCS_MAX(OCS_HW_MAX_NUM_RQ, \ OCS_MAX(OCS_HW_MAX_NUM_EQ, OCS_MAX(OCS_HW_MAX_NUM_WQ, \ OCS_HW_MAX_NUM_CQ))))) #define OCS_HW_RQ_HEADER_SIZE 128 #define OCS_HW_RQ_HEADER_INDEX 0 /** * @brief Options for ocs_hw_command(). */ enum { OCS_CMD_POLL, /**< command executes synchronously and busy-waits for completion */ OCS_CMD_NOWAIT, /**< command executes asynchronously. Uses callback */ }; typedef enum { OCS_HW_RTN_SUCCESS = 0, OCS_HW_RTN_SUCCESS_SYNC = 1, OCS_HW_RTN_ERROR = -1, OCS_HW_RTN_NO_RESOURCES = -2, OCS_HW_RTN_NO_MEMORY = -3, OCS_HW_RTN_IO_NOT_ACTIVE = -4, OCS_HW_RTN_IO_ABORT_IN_PROGRESS = -5, OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED = -6, OCS_HW_RTN_INVALID_ARG = -7, } ocs_hw_rtn_e; #define OCS_HW_RTN_IS_ERROR(e) ((e) < 0) typedef enum { OCS_HW_RESET_FUNCTION, OCS_HW_RESET_FIRMWARE, OCS_HW_RESET_MAX } ocs_hw_reset_e; typedef enum { OCS_HW_N_IO, OCS_HW_N_SGL, OCS_HW_MAX_IO, OCS_HW_MAX_SGE, OCS_HW_MAX_SGL, OCS_HW_MAX_NODES, OCS_HW_MAX_RQ_ENTRIES, OCS_HW_TOPOLOGY, /**< auto, nport, loop */ OCS_HW_WWN_NODE, OCS_HW_WWN_PORT, OCS_HW_FW_REV, OCS_HW_FW_REV2, OCS_HW_IPL, OCS_HW_VPD, OCS_HW_VPD_LEN, OCS_HW_MODE, /**< initiator, target, both */ OCS_HW_LINK_SPEED, OCS_HW_IF_TYPE, OCS_HW_SLI_REV, OCS_HW_SLI_FAMILY, OCS_HW_RQ_PROCESS_LIMIT, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_HW_AUTO_XFER_RDY_CAPABLE, OCS_HW_AUTO_XFER_RDY_XRI_CNT, OCS_HW_AUTO_XFER_RDY_SIZE, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, OCS_HW_AUTO_XFER_RDY_P_TYPE, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE, OCS_HW_DIF_CAPABLE, OCS_HW_DIF_SEED, OCS_HW_DIF_MODE, OCS_HW_DIF_MULTI_SEPARATE, OCS_HW_DUMP_MAX_SIZE, OCS_HW_DUMP_READY, OCS_HW_DUMP_PRESENT, OCS_HW_RESET_REQUIRED, OCS_HW_FW_ERROR, OCS_HW_FW_READY, OCS_HW_HIGH_LOGIN_MODE, OCS_HW_PREREGISTER_SGL, OCS_HW_HW_REV1, OCS_HW_HW_REV2, OCS_HW_HW_REV3, OCS_HW_LINKCFG, OCS_HW_ETH_LICENSE, OCS_HW_LINK_MODULE_TYPE, OCS_HW_NUM_CHUTES, OCS_HW_WAR_VERSION, OCS_HW_DISABLE_AR_TGT_DIF, OCS_HW_EMULATE_I_ONLY_AAB, /**< emulate IAAB=0 for initiator-commands only */ OCS_HW_EMULATE_TARGET_WQE_TIMEOUT, /**< enable driver timeouts for target WQEs */ OCS_HW_LINK_CONFIG_SPEED, OCS_HW_CONFIG_TOPOLOGY, OCS_HW_BOUNCE, OCS_HW_PORTNUM, OCS_HW_BIOS_VERSION_STRING, OCS_HW_RQ_SELECT_POLICY, OCS_HW_SGL_CHAINING_CAPABLE, OCS_HW_SGL_CHAINING_ALLOWED, OCS_HW_SGL_CHAINING_HOST_ALLOCATED, OCS_HW_SEND_FRAME_CAPABLE, OCS_HW_RQ_SELECTION_POLICY, OCS_HW_RR_QUANTA, OCS_HW_FILTER_DEF, OCS_HW_MAX_VPORTS, OCS_ESOC, OCS_HW_FW_TIMED_OUT, } ocs_hw_property_e; enum { OCS_HW_TOPOLOGY_AUTO, OCS_HW_TOPOLOGY_NPORT, OCS_HW_TOPOLOGY_LOOP, OCS_HW_TOPOLOGY_NONE, OCS_HW_TOPOLOGY_MAX }; enum { OCS_HW_MODE_INITIATOR, OCS_HW_MODE_TARGET, OCS_HW_MODE_BOTH, OCS_HW_MODE_MAX }; /** * @brief Port protocols */ typedef enum { OCS_HW_PORT_PROTOCOL_ISCSI, OCS_HW_PORT_PROTOCOL_FCOE, OCS_HW_PORT_PROTOCOL_FC, OCS_HW_PORT_PROTOCOL_OTHER, } ocs_hw_port_protocol_e; #define OCS_HW_MAX_PROFILES 40 /** * @brief A Profile Descriptor */ typedef struct { uint32_t profile_index; uint32_t profile_id; char profile_description[512]; } ocs_hw_profile_descriptor_t; /** * @brief A Profile List */ typedef struct { uint32_t num_descriptors; ocs_hw_profile_descriptor_t descriptors[OCS_HW_MAX_PROFILES]; } ocs_hw_profile_list_t; /** * @brief Defines DIF operation modes */ enum { OCS_HW_DIF_MODE_INLINE, OCS_HW_DIF_MODE_SEPARATE, }; /** * @brief Defines the type of RQ buffer */ typedef enum { OCS_HW_RQ_BUFFER_TYPE_HDR, OCS_HW_RQ_BUFFER_TYPE_PAYLOAD, OCS_HW_RQ_BUFFER_TYPE_MAX, } ocs_hw_rq_buffer_type_e; /** * @brief Defines a wrapper for the RQ payload buffers so that we can place it * back on the proper queue. */ typedef struct { uint16_t rqindex; ocs_dma_t dma; } ocs_hw_rq_buffer_t; /** * @brief T10 DIF operations. */ typedef enum { OCS_HW_DIF_OPER_DISABLED, OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC, OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF, OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM, OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF, OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC, OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM, OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM, OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC, OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW, } ocs_hw_dif_oper_e; #define OCS_HW_DIF_OPER_PASS_THRU OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC #define OCS_HW_DIF_OPER_STRIP OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF #define OCS_HW_DIF_OPER_INSERT OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC /** * @brief T10 DIF block sizes. */ typedef enum { OCS_HW_DIF_BK_SIZE_512, OCS_HW_DIF_BK_SIZE_1024, OCS_HW_DIF_BK_SIZE_2048, OCS_HW_DIF_BK_SIZE_4096, OCS_HW_DIF_BK_SIZE_520, OCS_HW_DIF_BK_SIZE_4104, OCS_HW_DIF_BK_SIZE_NA = 0 } ocs_hw_dif_blk_size_e; /** * @brief Link configurations. */ typedef enum { OCS_HW_LINKCFG_4X10G = 0, OCS_HW_LINKCFG_1X40G, OCS_HW_LINKCFG_2X16G, OCS_HW_LINKCFG_4X8G, OCS_HW_LINKCFG_4X1G, OCS_HW_LINKCFG_2X10G, OCS_HW_LINKCFG_2X10G_2X8G, /* must be last */ OCS_HW_LINKCFG_NA, } ocs_hw_linkcfg_e; /** * @brief link module types * * (note: these just happen to match SLI4 values) */ enum { OCS_HW_LINK_MODULE_TYPE_1GB = 0x0004, OCS_HW_LINK_MODULE_TYPE_2GB = 0x0008, OCS_HW_LINK_MODULE_TYPE_4GB = 0x0040, OCS_HW_LINK_MODULE_TYPE_8GB = 0x0080, OCS_HW_LINK_MODULE_TYPE_10GB = 0x0100, OCS_HW_LINK_MODULE_TYPE_16GB = 0x0200, OCS_HW_LINK_MODULE_TYPE_32GB = 0x0400, }; /** * @brief T10 DIF information passed to the transport. */ typedef struct ocs_hw_dif_info_s { ocs_hw_dif_oper_e dif_oper; ocs_hw_dif_blk_size_e blk_size; uint32_t ref_tag_cmp; uint32_t ref_tag_repl; uint32_t app_tag_cmp:16, app_tag_repl:16; uint32_t check_ref_tag:1, check_app_tag:1, check_guard:1, auto_incr_ref_tag:1, repl_app_tag:1, repl_ref_tag:1, dif:2, dif_separate:1, /* If the APP TAG is 0xFFFF, disable checking the REF TAG and CRC fields */ disable_app_ffff:1, /* if the APP TAG is 0xFFFF and REF TAG is 0xFFFF_FFFF, disable checking the received CRC field. */ disable_app_ref_ffff:1, :21; uint16_t dif_seed; } ocs_hw_dif_info_t; typedef enum { OCS_HW_ELS_REQ, /**< ELS request */ OCS_HW_ELS_RSP, /**< ELS response */ OCS_HW_ELS_RSP_SID, /**< ELS response, override the S_ID */ OCS_HW_FC_CT, /**< FC Common Transport */ OCS_HW_FC_CT_RSP, /**< FC Common Transport Response */ OCS_HW_BLS_ACC, /**< BLS accept (BA_ACC) */ OCS_HW_BLS_ACC_SID, /**< BLS accept (BA_ACC), override the S_ID */ OCS_HW_BLS_RJT, /**< BLS reject (BA_RJT) */ OCS_HW_BCAST, /**< Class 3 broadcast sequence */ OCS_HW_IO_TARGET_READ, OCS_HW_IO_TARGET_WRITE, OCS_HW_IO_TARGET_RSP, OCS_HW_IO_INITIATOR_READ, OCS_HW_IO_INITIATOR_WRITE, OCS_HW_IO_INITIATOR_NODATA, OCS_HW_IO_DNRX_REQUEUE, OCS_HW_IO_MAX, } ocs_hw_io_type_e; typedef enum { OCS_HW_IO_STATE_FREE, OCS_HW_IO_STATE_INUSE, OCS_HW_IO_STATE_WAIT_FREE, OCS_HW_IO_STATE_WAIT_SEC_HIO, } ocs_hw_io_state_e; /* Descriptive strings for the HW IO request types (note: these must always * match up with the ocs_hw_io_type_e declaration) */ #define OCS_HW_IO_TYPE_STRINGS \ "ELS request", \ "ELS response", \ "ELS response(set SID)", \ "FC CT request", \ "BLS accept", \ "BLS accept(set SID)", \ "BLS reject", \ "target read", \ "target write", \ "target response", \ "initiator read", \ "initiator write", \ "initiator nodata", /** * @brief HW command context. * * Stores the state for the asynchronous commands sent to the hardware. */ typedef struct ocs_command_ctx_s { ocs_list_t link; /**< Callback function */ int32_t (*cb)(struct ocs_hw_s *, int32_t, uint8_t *, void *); void *arg; /**< Argument for callback */ uint8_t *buf; /**< buffer holding command / results */ void *ctx; /**< upper layer context */ } ocs_command_ctx_t; typedef struct ocs_hw_sgl_s { uintptr_t addr; size_t len; } ocs_hw_sgl_t; /** * @brief HW callback type * * Typedef for HW "done" callback. */ typedef int32_t (*ocs_hw_done_t)(struct ocs_hw_io_s *, ocs_remote_node_t *, uint32_t len, int32_t status, uint32_t ext, void *ul_arg); typedef union ocs_hw_io_param_u { struct { uint16_t ox_id; uint16_t rx_id; uint8_t payload[12]; /**< big enough for ABTS BA_ACC */ } bls; struct { uint32_t s_id; uint16_t ox_id; uint16_t rx_id; uint8_t payload[12]; /**< big enough for ABTS BA_ACC */ } bls_sid; struct { uint8_t r_ctl; uint8_t type; uint8_t df_ctl; uint8_t timeout; } bcast; struct { uint16_t ox_id; uint8_t timeout; } els; struct { uint32_t s_id; uint16_t ox_id; uint8_t timeout; } els_sid; struct { uint8_t r_ctl; uint8_t type; uint8_t df_ctl; uint8_t timeout; } fc_ct; struct { uint8_t r_ctl; uint8_t type; uint8_t df_ctl; uint8_t timeout; uint16_t ox_id; } fc_ct_rsp; struct { uint32_t offset; uint16_t ox_id; uint16_t flags; uint8_t cs_ctl; ocs_hw_dif_oper_e dif_oper; ocs_hw_dif_blk_size_e blk_size; uint8_t timeout; uint32_t app_id; } fcp_tgt; struct { ocs_dma_t *cmnd; ocs_dma_t *rsp; ocs_hw_dif_oper_e dif_oper; ocs_hw_dif_blk_size_e blk_size; uint32_t cmnd_size; uint16_t flags; uint8_t timeout; uint32_t first_burst; } fcp_ini; } ocs_hw_io_param_t; /** * @brief WQ steering mode */ typedef enum { OCS_HW_WQ_STEERING_CLASS, OCS_HW_WQ_STEERING_REQUEST, OCS_HW_WQ_STEERING_CPU, } ocs_hw_wq_steering_e; /** * @brief HW wqe object */ typedef struct { uint32_t abort_wqe_submit_needed:1, /**< set if abort wqe needs to be submitted */ send_abts:1, /**< set to 1 to have hardware to automatically send ABTS */ auto_xfer_rdy_dnrx:1, /**< TRUE if DNRX was set on this IO */ :29; uint32_t id; uint32_t abort_reqtag; ocs_list_link_t link; uint8_t *wqebuf; /**< work queue entry buffer */ } ocs_hw_wqe_t; /** * @brief HW IO object. * * Stores the per-IO information necessary for both the lower (SLI) and upper * layers (ocs). */ struct ocs_hw_io_s { /* Owned by HW */ ocs_list_link_t link; /**< used for busy, wait_free, free lists */ ocs_list_link_t wqe_link; /**< used for timed_wqe list */ ocs_list_link_t dnrx_link; /**< used for io posted dnrx list */ ocs_hw_io_state_e state; /**< state of IO: free, busy, wait_free */ ocs_hw_wqe_t wqe; /**< Work queue object, with link for pending */ ocs_lock_t axr_lock; /**< Lock to synchronize TRSP and AXT Data/Cmd Cqes */ ocs_hw_t *hw; /**< pointer back to hardware context */ ocs_remote_node_t *rnode; struct ocs_hw_auto_xfer_rdy_buffer_s *axr_buf; ocs_dma_t xfer_rdy; uint16_t type; uint32_t port_owned_abort_count; /**< IO abort count */ hw_wq_t *wq; /**< WQ assigned to the exchange */ uint32_t xbusy; /**< Exchange is active in FW */ ocs_hw_done_t done; /**< Function called on IO completion */ void *arg; /**< argument passed to "IO done" callback */ ocs_hw_done_t abort_done; /**< Function called on abort completion */ void *abort_arg; /**< argument passed to "abort done" callback */ ocs_ref_t ref; /**< refcount object */ size_t length; /**< needed for bug O127585: length of IO */ uint8_t tgt_wqe_timeout; /**< timeout value for target WQEs */ uint64_t submit_ticks; /**< timestamp when current WQE was submitted */ uint32_t status_saved:1, /**< if TRUE, latched status should be returned */ abort_in_progress:1, /**< if TRUE, abort is in progress */ quarantine:1, /**< set if IO to be quarantined */ quarantine_first_phase:1, /**< set if first phase of IO */ is_port_owned:1, /**< set if POST_XRI was used to send XRI to th chip */ auto_xfer_rdy_dnrx:1, /**< TRUE if DNRX was set on this IO */ :26; uint32_t saved_status; /**< latched status */ uint32_t saved_len; /**< latched length */ uint32_t saved_ext; /**< latched extended status */ hw_eq_t *eq; /**< EQ that this HIO came up on */ ocs_hw_wq_steering_e wq_steering; /**< WQ steering mode request */ uint8_t wq_class; /**< WQ class if steering mode is Class */ /* Owned by SLI layer */ uint16_t reqtag; /**< request tag for this HW IO */ uint32_t abort_reqtag; /**< request tag for an abort of this HW IO (note: this is a 32 bit value to allow us to use UINT32_MAX as an uninitialized value) */ uint32_t indicator; /**< XRI */ ocs_dma_t def_sgl; /**< default scatter gather list */ uint32_t def_sgl_count; /**< count of SGEs in default SGL */ ocs_dma_t *sgl; /**< pointer to current active SGL */ uint32_t sgl_count; /**< count of SGEs in io->sgl */ uint32_t first_data_sge; /**< index of first data SGE */ ocs_dma_t *ovfl_sgl; /**< overflow SGL */ uint32_t ovfl_sgl_count; /**< count of SGEs in default SGL */ sli4_lsp_sge_t *ovfl_lsp; /**< pointer to overflow segment length */ ocs_hw_io_t *ovfl_io; /**< Used for SGL chaining on skyhawk */ uint32_t n_sge; /**< number of active SGEs */ uint32_t sge_offset; /* BZ 161832 Workaround: */ struct ocs_hw_io_s *sec_hio; /**< Secondary HW IO context */ ocs_hw_io_param_t sec_iparam; /**< Secondary HW IO context saved iparam */ uint32_t sec_len; /**< Secondary HW IO context saved len */ /* Owned by upper layer */ void *ul_io; /**< where upper layer can store reference to its IO */ }; typedef enum { OCS_HW_PORT_INIT, OCS_HW_PORT_SHUTDOWN, OCS_HW_PORT_SET_LINK_CONFIG, } ocs_hw_port_e; /** * @brief Fabric/Domain events */ typedef enum { OCS_HW_DOMAIN_ALLOC_OK, /**< domain successfully allocated */ OCS_HW_DOMAIN_ALLOC_FAIL, /**< domain allocation failed */ OCS_HW_DOMAIN_ATTACH_OK, /**< successfully attached to domain */ OCS_HW_DOMAIN_ATTACH_FAIL, /**< domain attach failed */ OCS_HW_DOMAIN_FREE_OK, /**< successfully freed domain */ OCS_HW_DOMAIN_FREE_FAIL, /**< domain free failed */ OCS_HW_DOMAIN_LOST, /**< previously discovered domain no longer available */ OCS_HW_DOMAIN_FOUND, /**< new domain discovered */ OCS_HW_DOMAIN_CHANGED, /**< previously discovered domain properties have changed */ } ocs_hw_domain_event_e; typedef enum { OCS_HW_PORT_ALLOC_OK, /**< port successfully allocated */ OCS_HW_PORT_ALLOC_FAIL, /**< port allocation failed */ OCS_HW_PORT_ATTACH_OK, /**< successfully attached to port */ OCS_HW_PORT_ATTACH_FAIL, /**< port attach failed */ OCS_HW_PORT_FREE_OK, /**< successfully freed port */ OCS_HW_PORT_FREE_FAIL, /**< port free failed */ } ocs_hw_port_event_e; typedef enum { OCS_HW_NODE_ATTACH_OK, OCS_HW_NODE_ATTACH_FAIL, OCS_HW_NODE_FREE_OK, OCS_HW_NODE_FREE_FAIL, OCS_HW_NODE_FREE_ALL_OK, OCS_HW_NODE_FREE_ALL_FAIL, } ocs_hw_remote_node_event_e; typedef enum { OCS_HW_CB_DOMAIN, OCS_HW_CB_PORT, OCS_HW_CB_REMOTE_NODE, OCS_HW_CB_UNSOLICITED, OCS_HW_CB_BOUNCE, OCS_HW_CB_MAX, /**< must be last */ } ocs_hw_callback_e; /** * @brief HW unsolicited callback status */ typedef enum { OCS_HW_UNSOL_SUCCESS, OCS_HW_UNSOL_ERROR, OCS_HW_UNSOL_ABTS_RCVD, OCS_HW_UNSOL_MAX, /**< must be last */ } ocs_hw_unsol_status_e; /** * @brief Node group rpi reference */ typedef struct { ocs_atomic_t rpi_count; ocs_atomic_t rpi_attached; } ocs_hw_rpi_ref_t; /** * @brief HW link stat types */ typedef enum { OCS_HW_LINK_STAT_LINK_FAILURE_COUNT, OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT, OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT, OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT, OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT, OCS_HW_LINK_STAT_CRC_COUNT, OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT, OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT, OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT, OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT, OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT, OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT, OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT, OCS_HW_LINK_STAT_RCV_EOFA_COUNT, OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT, OCS_HW_LINK_STAT_RCV_EOFNI_COUNT, OCS_HW_LINK_STAT_RCV_SOFF_COUNT, OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT, OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT, OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT, OCS_HW_LINK_STAT_MAX, /**< must be last */ } ocs_hw_link_stat_e; typedef enum { OCS_HW_HOST_STAT_TX_KBYTE_COUNT, OCS_HW_HOST_STAT_RX_KBYTE_COUNT, OCS_HW_HOST_STAT_TX_FRAME_COUNT, OCS_HW_HOST_STAT_RX_FRAME_COUNT, OCS_HW_HOST_STAT_TX_SEQ_COUNT, OCS_HW_HOST_STAT_RX_SEQ_COUNT, OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG, OCS_HW_HOST_STAT_TOTAL_EXCH_RESP, OCS_HW_HOSY_STAT_RX_P_BSY_COUNT, OCS_HW_HOST_STAT_RX_F_BSY_COUNT, OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT, OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT, OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT, OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT, OCS_HW_HOST_STAT_MAX /* MUST BE LAST */ } ocs_hw_host_stat_e; typedef enum { OCS_HW_STATE_UNINITIALIZED, /* power-on, no allocations, no initializations */ OCS_HW_STATE_QUEUES_ALLOCATED, /* chip is reset, allocations are complete (queues not registered) */ OCS_HW_STATE_ACTIVE, /* chip is up an running */ OCS_HW_STATE_RESET_IN_PROGRESS, /* chip is being reset */ OCS_HW_STATE_TEARDOWN_IN_PROGRESS, /* teardown has been started */ } ocs_hw_state_e; /** * @brief Defines a general FC sequence object, consisting of a header, payload buffers * and a HW IO in the case of port owned XRI */ typedef struct { ocs_hw_t *hw; /**< HW that owns this sequence */ /* sequence information */ uint8_t fcfi; /**< FCFI associated with sequence */ uint8_t auto_xrdy; /**< If auto XFER_RDY was generated */ uint8_t out_of_xris; /**< If IO would have been assisted if XRIs were available */ ocs_hw_rq_buffer_t *header; ocs_hw_rq_buffer_t *payload; /**< received frame payload buffer */ /* other "state" information from the SRB (sequence coalescing) */ ocs_hw_unsol_status_e status; uint32_t xri; /**< XRI associated with sequence; sequence coalescing only */ ocs_hw_io_t *hio; /**< HW IO */ ocs_list_link_t link; void *hw_priv; /**< HW private context */ } ocs_hw_sequence_t; /** * @brief Structure to track optimized write buffers posted to chip owned XRIs. * * Note: The rqindex will be set the following "fake" indexes. This will be used * when the buffer is returned via ocs_seq_free() to make the buffer available * for re-use on another XRI. * * The dma->alloc pointer on the dummy header will be used to get back to this structure when the buffer is freed. * * More of these object may be allocated on the fly if more XRIs are pushed to the chip. */ #define OCS_HW_RQ_INDEX_DUMMY_HDR 0xFF00 #define OCS_HW_RQ_INDEX_DUMMY_DATA 0xFF01 typedef struct ocs_hw_auto_xfer_rdy_buffer_s { fc_header_t hdr; /**< used to build a dummy data header for unsolicited processing */ ocs_hw_rq_buffer_t header; /**< Points to the dummy data header */ ocs_hw_rq_buffer_t payload; /**< received frame payload buffer */ ocs_hw_sequence_t seq; /**< sequence for passing the buffers */ uint8_t data_cqe; uint8_t cmd_cqe; /* fields saved from the command header that are needed when the data arrives */ uint8_t fcfi; /* To handle outof order completions save AXR cmd and data cqes */ uint8_t call_axr_cmd; uint8_t call_axr_data; ocs_hw_sequence_t *cmd_seq; } ocs_hw_auto_xfer_rdy_buffer_t; /** * @brief Node group rpi reference */ typedef struct { uint8_t overflow; uint32_t counter; } ocs_hw_link_stat_counts_t; /** * @brief HW object describing fc host stats */ typedef struct { uint32_t counter; } ocs_hw_host_stat_counts_t; #define TID_HASH_BITS 8 #define TID_HASH_LEN (1U << TID_HASH_BITS) typedef struct ocs_hw_iopt_s { char name[32]; uint32_t instance_index; ocs_thread_t iopt_thread; ocs_cbuf_t *iopt_free_queue; /* multiple reader, multiple writer */ ocs_cbuf_t *iopt_work_queue; ocs_array_t *iopt_cmd_array; } ocs_hw_iopt_t; typedef enum { HW_CQ_HANDLER_LOCAL, HW_CQ_HANDLER_THREAD, } hw_cq_handler_e; #include "ocs_hw_queues.h" /** * @brief Stucture used for the hash lookup of queue IDs */ typedef struct { uint32_t id:16, in_use:1, index:15; } ocs_queue_hash_t; /** * @brief Define the fields required to implement the skyhawk DIF quarantine. */ #define OCS_HW_QUARANTINE_QUEUE_DEPTH 4 typedef struct { uint32_t quarantine_index; ocs_hw_io_t *quarantine_ios[OCS_HW_QUARANTINE_QUEUE_DEPTH]; } ocs_quarantine_info_t; /** * @brief Define the WQ callback object */ typedef struct { uint16_t instance_index; /**< use for request tag */ void (*callback)(void *arg, uint8_t *cqe, int32_t status); void *arg; } hw_wq_callback_t; typedef struct { uint64_t fwrev; /* Control Declarations here ...*/ uint8_t retain_tsend_io_length; /* Use unregistered RPI */ uint8_t use_unregistered_rpi; uint32_t unregistered_rid; uint32_t unregistered_index; uint8_t disable_ar_tgt_dif; /* Disable auto response if target DIF */ uint8_t disable_dump_loc; uint8_t use_dif_quarantine; uint8_t use_dif_sec_xri; uint8_t override_fcfi; uint8_t fw_version_too_low; uint8_t sglc_misreported; uint8_t ignore_send_frame; } ocs_hw_workaround_t; /** * @brief HW object */ struct ocs_hw_s { ocs_os_handle_t os; sli4_t sli; uint16_t ulp_start; uint16_t ulp_max; uint32_t dump_size; ocs_hw_state_e state; uint8_t hw_setup_called; uint8_t sliport_healthcheck; uint16_t watchdog_timeout; ocs_lock_t watchdog_lock; /** HW configuration, subject to ocs_hw_set() */ struct { uint32_t n_eq; /**< number of event queues */ uint32_t n_cq; /**< number of completion queues */ uint32_t n_mq; /**< number of mailbox queues */ uint32_t n_rq; /**< number of receive queues */ uint32_t n_wq; /**< number of work queues */ uint32_t n_io; /**< total number of IO objects */ uint32_t n_sgl;/**< length of SGL */ uint32_t speed; /** requested link speed in Mbps */ uint32_t topology; /** requested link topology */ uint32_t rq_default_buffer_size; /** size of the buffers for first burst */ uint32_t auto_xfer_rdy_xri_cnt; /** Initial XRIs to post to chip at initialization */ uint32_t auto_xfer_rdy_size; /** max size IO to use with this feature */ uint8_t auto_xfer_rdy_blk_size_chip; /** block size to use with this feature */ uint8_t esoc; uint16_t dif_seed; /** The seed for the DIF CRC calculation */ uint16_t auto_xfer_rdy_app_tag_value; uint8_t dif_mode; /**< DIF mode to use */ uint8_t i_only_aab; /** Enable initiator-only auto-abort */ uint8_t emulate_tgt_wqe_timeout; /** Enable driver target wqe timeouts */ uint32_t bounce:1; const char *queue_topology; /**< Queue topology string */ uint8_t auto_xfer_rdy_t10_enable; /** Enable t10 PI for auto xfer ready */ uint8_t auto_xfer_rdy_p_type; /** p_type for auto xfer ready */ uint8_t auto_xfer_rdy_ref_tag_is_lba; uint8_t auto_xfer_rdy_app_tag_valid; uint8_t rq_selection_policy; /** MRQ RQ selection policy */ uint8_t rr_quanta; /** RQ quanta if rq_selection_policy == 2 */ uint32_t filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; } config; /* calculated queue sizes for each type */ uint32_t num_qentries[SLI_QTYPE_MAX]; /* Storage for SLI queue objects */ sli4_queue_t wq[OCS_HW_MAX_NUM_WQ]; sli4_queue_t rq[OCS_HW_MAX_NUM_RQ]; uint16_t hw_rq_lookup[OCS_HW_MAX_NUM_RQ]; sli4_queue_t mq[OCS_HW_MAX_NUM_MQ]; sli4_queue_t cq[OCS_HW_MAX_NUM_CQ]; sli4_queue_t eq[OCS_HW_MAX_NUM_EQ]; /* HW queue */ uint32_t eq_count; uint32_t cq_count; uint32_t mq_count; uint32_t wq_count; uint32_t rq_count; /**< count of SLI RQs */ ocs_list_t eq_list; ocs_queue_hash_t cq_hash[OCS_HW_Q_HASH_SIZE]; ocs_queue_hash_t rq_hash[OCS_HW_Q_HASH_SIZE]; ocs_queue_hash_t wq_hash[OCS_HW_Q_HASH_SIZE]; /* Storage for HW queue objects */ hw_wq_t *hw_wq[OCS_HW_MAX_NUM_WQ]; hw_rq_t *hw_rq[OCS_HW_MAX_NUM_RQ]; hw_mq_t *hw_mq[OCS_HW_MAX_NUM_MQ]; hw_cq_t *hw_cq[OCS_HW_MAX_NUM_CQ]; hw_eq_t *hw_eq[OCS_HW_MAX_NUM_EQ]; uint32_t hw_rq_count; /**< count of hw_rq[] entries */ uint32_t hw_mrq_count; /**< count of multirq RQs */ ocs_varray_t *wq_class_array[OCS_HW_MAX_WQ_CLASS]; /**< pool per class WQs */ ocs_varray_t *wq_cpu_array[OCS_HW_MAX_WQ_CPU]; /**< pool per CPU WQs */ /* Sequence objects used in incoming frame processing */ ocs_array_t *seq_pool; /* Auto XFER RDY Buffers - protect with io_lock */ uint32_t auto_xfer_rdy_enabled:1, /**< TRUE if auto xfer rdy is enabled */ :31; ocs_pool_t *auto_xfer_rdy_buf_pool; /**< pool of ocs_hw_auto_xfer_rdy_buffer_t objects */ /** Maintain an ordered, linked list of outstanding HW commands. */ ocs_lock_t cmd_lock; ocs_list_t cmd_head; ocs_list_t cmd_pending; uint32_t cmd_head_count; sli4_link_event_t link; ocs_hw_linkcfg_e linkcfg; /**< link configuration setting */ uint32_t eth_license; /**< Ethernet license; to enable FCoE on Lancer */ struct { /** * Function + argument used to notify upper layer of domain events. * * The final argument to the callback is a generic data pointer: * - ocs_domain_record_t on OCS_HW_DOMAIN_FOUND * - ocs_domain_t on OCS_HW_DOMAIN_ALLOC_FAIL, OCS_HW_DOMAIN_ALLOC_OK, * OCS_HW_DOMAIN_FREE_FAIL, OCS_HW_DOMAIN_FREE_OK, * OCS_HW_DOMAIN_ATTACH_FAIL, OCS_HW_DOMAIN_ATTACH_OK, and * OCS_HW_DOMAIN_LOST. */ int32_t (*domain)(void *, ocs_hw_domain_event_e, void *); /** * Function + argument used to notify upper layers of port events. * * The final argument to the callback is a pointer to the effected * SLI port for all events. */ int32_t (*port)(void *, ocs_hw_port_event_e, void *); /** Function + argument used to announce arrival of unsolicited frames */ int32_t (*unsolicited)(void *, ocs_hw_sequence_t *); int32_t (*rnode)(void *, ocs_hw_remote_node_event_e, void *); int32_t (*bounce)(void (*)(void *arg), void *arg, uint32_t s_id, uint32_t d_id, uint32_t ox_id); } callback; struct { void *domain; void *port; void *unsolicited; void *rnode; void *bounce; } args; /* OCS domain objects index by FCFI */ int32_t first_domain_idx; /* Workaround for srb->fcfi == 0 */ ocs_domain_t *domains[SLI4_MAX_FCFI]; /* Table of FCFI values index by FCF_index */ uint16_t fcf_index_fcfi[SLI4_MAX_FCF_INDEX]; uint16_t fcf_indicator; ocs_hw_io_t **io; /**< pointer array of IO objects */ uint8_t *wqe_buffs; /**< array of WQE buffs mapped to IO objects */ ocs_lock_t io_lock; /**< IO lock to synchronize list access */ ocs_lock_t io_abort_lock; /**< IO lock to synchronize IO aborting */ ocs_list_t io_inuse; /**< List of IO objects in use */ ocs_list_t io_timed_wqe; /**< List of IO objects with a timed target WQE */ ocs_list_t io_wait_free; /**< List of IO objects waiting to be freed */ ocs_list_t io_free; /**< List of IO objects available for allocation */ ocs_list_t io_port_owned; /**< List of IO objects posted for chip use */ ocs_list_t io_port_dnrx; /**< List of IO objects needing auto xfer rdy buffers */ ocs_dma_t loop_map; ocs_dma_t xfer_rdy; ocs_dma_t dump_sges; ocs_dma_t rnode_mem; ocs_dma_t domain_dmem; /*domain dma mem for service params */ ocs_dma_t fcf_dmem; /*dma men for fcf */ ocs_hw_rpi_ref_t *rpi_ref; char *hw_war_version; ocs_hw_workaround_t workaround; ocs_atomic_t io_alloc_failed_count; #if defined(OCS_DEBUG_QUEUE_HISTORY) ocs_hw_q_hist_t q_hist; #endif ocs_list_t sec_hio_wait_list; /**< BZ 161832 Workaround: Secondary HW IO context wait list */ uint32_t sec_hio_wait_count; /**< BZ 161832 Workaround: Count of IOs that were put on the * Secondary HW IO wait list */ #define HW_MAX_TCMD_THREADS 16 ocs_hw_qtop_t *qtop; /**< pointer to queue topology */ uint32_t tcmd_wq_submit[OCS_HW_MAX_NUM_WQ]; /**< stat: wq sumbit count */ uint32_t tcmd_wq_complete[OCS_HW_MAX_NUM_WQ]; /**< stat: wq complete count */ ocs_timer_t wqe_timer; /**< Timer to periodically check for WQE timeouts */ ocs_timer_t watchdog_timer; /**< Timer for heartbeat */ bool expiration_logged; uint32_t in_active_wqe_timer:1, /**< TRUE if currently in active wqe timer handler */ active_wqe_timer_shutdown:1, /** TRUE if wqe timer is to be shutdown */ :30; ocs_list_t iopc_list; /**< list of IO processing contexts */ ocs_lock_t iopc_list_lock; /**< lock for iopc_list */ ocs_pool_t *wq_reqtag_pool; /**< pool of hw_wq_callback_t objects */ ocs_atomic_t send_frame_seq_id; /**< send frame sequence ID */ }; typedef enum { OCS_HW_IO_INUSE_COUNT, OCS_HW_IO_FREE_COUNT, OCS_HW_IO_WAIT_FREE_COUNT, OCS_HW_IO_PORT_OWNED_COUNT, OCS_HW_IO_N_TOTAL_IO_COUNT, } ocs_hw_io_count_type_e; typedef void (*tcmd_cq_handler)(ocs_hw_t *hw, uint32_t cq_idx, void *cq_handler_arg); /* * HW queue data structures */ struct hw_eq_s { ocs_list_link_t link; /**< must be first */ sli4_qtype_e type; /**< must be second */ uint32_t instance; uint32_t entry_count; uint32_t entry_size; ocs_hw_t *hw; sli4_queue_t *queue; ocs_list_t cq_list; #if OCS_STAT_ENABLE uint32_t use_count; #endif ocs_varray_t *wq_array; /*<< array of WQs */ }; struct hw_cq_s { ocs_list_link_t link; /*<< must be first */ sli4_qtype_e type; /**< must be second */ uint32_t instance; /*<< CQ instance (cq_idx) */ uint32_t entry_count; /*<< Number of entries */ uint32_t entry_size; /*<< entry size */ hw_eq_t *eq; /*<< parent EQ */ sli4_queue_t *queue; /**< pointer to SLI4 queue */ ocs_list_t q_list; /**< list of children queues */ #if OCS_STAT_ENABLE uint32_t use_count; #endif }; typedef struct { ocs_list_link_t link; /*<< must be first */ sli4_qtype_e type; /*<< must be second */ } hw_q_t; struct hw_mq_s { ocs_list_link_t link; /*<< must be first */ sli4_qtype_e type; /*<< must be second */ uint32_t instance; uint32_t entry_count; uint32_t entry_size; hw_cq_t *cq; sli4_queue_t *queue; #if OCS_STAT_ENABLE uint32_t use_count; #endif }; struct hw_wq_s { ocs_list_link_t link; /*<< must be first */ sli4_qtype_e type; /*<< must be second */ uint32_t instance; ocs_hw_t *hw; uint32_t entry_count; uint32_t entry_size; hw_cq_t *cq; sli4_queue_t *queue; uint32_t class; uint8_t ulp; /* WQ consumed */ uint32_t wqec_set_count; /*<< how often IOs are submitted with wqce set */ uint32_t wqec_count; /*<< current wqce counter */ uint32_t free_count; /*<< free count */ uint32_t total_submit_count; /*<< total submit count */ ocs_list_t pending_list; /*<< list of IOs pending for this WQ */ /* * ---Skyhawk only --- * BZ 160124 - Driver must quarantine XRIs for target writes and * initiator read when using DIF separates. Throw them on a * queue until another 4 similar requests are completed to ensure they * are flushed from the internal chip cache before being re-used. * The must be a separate queue per CQ because the actual chip completion * order cannot be determined. Since each WQ has a separate CQ, use the wq * associated with the IO. * * Note: Protected by queue->lock */ ocs_quarantine_info_t quarantine_info; /* * HW IO allocated for use with Send Frame */ ocs_hw_io_t *send_frame_io; /* Stats */ #if OCS_STAT_ENABLE uint32_t use_count; /*<< use count */ uint32_t wq_pending_count; /*<< count of HW IOs that were queued on the WQ pending list */ #endif }; struct hw_rq_s { ocs_list_link_t link; /*<< must be first */ sli4_qtype_e type; /*<< must be second */ uint32_t instance; uint32_t entry_count; uint32_t hdr_entry_size; uint32_t first_burst_entry_size; uint32_t data_entry_size; uint8_t ulp; bool is_mrq; uint32_t base_mrq_id; hw_cq_t *cq; uint8_t filter_mask; /* Filter mask value */ sli4_queue_t *hdr; sli4_queue_t *first_burst; sli4_queue_t *data; ocs_hw_rq_buffer_t *hdr_buf; ocs_hw_rq_buffer_t *fb_buf; ocs_hw_rq_buffer_t *payload_buf; ocs_hw_sequence_t **rq_tracker; /* RQ tracker for this RQ */ #if OCS_STAT_ENABLE uint32_t use_count; uint32_t hdr_use_count; uint32_t fb_use_count; uint32_t payload_use_count; #endif }; typedef struct ocs_hw_global_s { const char *queue_topology_string; /**< queue topology string */ } ocs_hw_global_t; extern ocs_hw_global_t hw_global; extern hw_eq_t *hw_new_eq(ocs_hw_t *hw, uint32_t entry_count); extern hw_cq_t *hw_new_cq(hw_eq_t *eq, uint32_t entry_count); extern uint32_t hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count); extern hw_mq_t *hw_new_mq(hw_cq_t *cq, uint32_t entry_count); extern hw_wq_t *hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp); extern hw_rq_t *hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp); extern uint32_t hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp); extern void hw_del_eq(hw_eq_t *eq); extern void hw_del_cq(hw_cq_t *cq); extern void hw_del_mq(hw_mq_t *mq); extern void hw_del_wq(hw_wq_t *wq); extern void hw_del_rq(hw_rq_t *rq); extern void hw_queue_dump(ocs_hw_t *hw); extern void hw_queue_teardown(ocs_hw_t *hw); extern int32_t hw_route_rqe(ocs_hw_t *hw, ocs_hw_sequence_t *seq); extern int32_t ocs_hw_queue_hash_find(ocs_queue_hash_t *, uint16_t); extern ocs_hw_rtn_e ocs_hw_setup(ocs_hw_t *, ocs_os_handle_t, sli4_port_type_e); extern ocs_hw_rtn_e ocs_hw_init(ocs_hw_t *); extern ocs_hw_rtn_e ocs_hw_teardown(ocs_hw_t *); extern ocs_hw_rtn_e ocs_hw_reset(ocs_hw_t *, ocs_hw_reset_e); extern int32_t ocs_hw_get_num_eq(ocs_hw_t *); extern ocs_hw_rtn_e ocs_hw_get(ocs_hw_t *, ocs_hw_property_e, uint32_t *); extern void *ocs_hw_get_ptr(ocs_hw_t *, ocs_hw_property_e); extern ocs_hw_rtn_e ocs_hw_set(ocs_hw_t *, ocs_hw_property_e, uint32_t); extern ocs_hw_rtn_e ocs_hw_set_ptr(ocs_hw_t *, ocs_hw_property_e, void*); extern int32_t ocs_hw_event_check(ocs_hw_t *, uint32_t); extern int32_t ocs_hw_process(ocs_hw_t *, uint32_t, uint32_t); extern ocs_hw_rtn_e ocs_hw_command(ocs_hw_t *, uint8_t *, uint32_t, void *, void *); extern ocs_hw_rtn_e ocs_hw_callback(ocs_hw_t *, ocs_hw_callback_e, void *, void *); extern ocs_hw_rtn_e ocs_hw_port_alloc(ocs_hw_t *, ocs_sli_port_t *, ocs_domain_t *, uint8_t *); extern ocs_hw_rtn_e ocs_hw_port_attach(ocs_hw_t *, ocs_sli_port_t *, uint32_t); typedef void (*ocs_hw_port_control_cb_t)(int32_t status, uintptr_t value, void *arg); extern ocs_hw_rtn_e ocs_hw_port_control(ocs_hw_t *, ocs_hw_port_e, uintptr_t, ocs_hw_port_control_cb_t, void *); extern ocs_hw_rtn_e ocs_hw_port_free(ocs_hw_t *, ocs_sli_port_t *); extern ocs_hw_rtn_e ocs_hw_domain_alloc(ocs_hw_t *, ocs_domain_t *, uint32_t, uint32_t); extern ocs_hw_rtn_e ocs_hw_domain_attach(ocs_hw_t *, ocs_domain_t *, uint32_t); extern ocs_hw_rtn_e ocs_hw_domain_free(ocs_hw_t *, ocs_domain_t *); extern ocs_hw_rtn_e ocs_hw_domain_force_free(ocs_hw_t *, ocs_domain_t *); extern ocs_domain_t * ocs_hw_domain_get(ocs_hw_t *, uint16_t); extern ocs_hw_rtn_e ocs_hw_node_alloc(ocs_hw_t *, ocs_remote_node_t *, uint32_t, ocs_sli_port_t *); extern ocs_hw_rtn_e ocs_hw_node_free_all(ocs_hw_t *); extern ocs_hw_rtn_e ocs_hw_node_attach(ocs_hw_t *, ocs_remote_node_t *, ocs_dma_t *); extern ocs_hw_rtn_e ocs_hw_node_detach(ocs_hw_t *, ocs_remote_node_t *); extern ocs_hw_rtn_e ocs_hw_node_free_resources(ocs_hw_t *, ocs_remote_node_t *); extern ocs_hw_rtn_e ocs_hw_node_group_alloc(ocs_hw_t *, ocs_remote_node_group_t *); extern ocs_hw_rtn_e ocs_hw_node_group_attach(ocs_hw_t *, ocs_remote_node_group_t *, ocs_remote_node_t *); extern ocs_hw_rtn_e ocs_hw_node_group_free(ocs_hw_t *, ocs_remote_node_group_t *); extern ocs_hw_io_t *ocs_hw_io_alloc(ocs_hw_t *); extern ocs_hw_io_t *ocs_hw_io_activate_port_owned(ocs_hw_t *, ocs_hw_io_t *); extern int32_t ocs_hw_io_free(ocs_hw_t *, ocs_hw_io_t *); extern uint8_t ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io); typedef int32_t (*ocs_hw_srrs_cb_t)(ocs_hw_io_t *io, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg); extern ocs_hw_rtn_e ocs_hw_srrs_send(ocs_hw_t *, ocs_hw_io_type_e, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *, ocs_remote_node_t *, ocs_hw_io_param_t *, ocs_hw_srrs_cb_t, void *); extern ocs_hw_rtn_e ocs_hw_io_send(ocs_hw_t *, ocs_hw_io_type_e, ocs_hw_io_t *, uint32_t, ocs_hw_io_param_t *, ocs_remote_node_t *, void *, void *); extern ocs_hw_rtn_e _ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode, void *cb, void *arg); extern ocs_hw_rtn_e ocs_hw_io_register_sgl(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t); extern ocs_hw_rtn_e ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type); extern ocs_hw_rtn_e ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info); extern ocs_hw_rtn_e ocs_hw_io_add_sge(ocs_hw_t *, ocs_hw_io_t *, uintptr_t, uint32_t); extern ocs_hw_rtn_e ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr); extern ocs_hw_rtn_e ocs_hw_io_abort(ocs_hw_t *, ocs_hw_io_t *, uint32_t, void *, void *); extern int32_t ocs_hw_io_get_xid(ocs_hw_t *, ocs_hw_io_t *); extern uint32_t ocs_hw_io_get_count(ocs_hw_t *, ocs_hw_io_count_type_e); extern uint32_t ocs_hw_get_rqes_produced_count(ocs_hw_t *hw); typedef void (*ocs_hw_fw_cb_t)(int32_t status, uint32_t bytes_written, uint32_t change_status, void *arg); extern ocs_hw_rtn_e ocs_hw_firmware_write(ocs_hw_t *, ocs_dma_t *, uint32_t, uint32_t, int, ocs_hw_fw_cb_t, void*); /* Function for retrieving SFP data */ typedef void (*ocs_hw_sfp_cb_t)(void *, int32_t, uint32_t, uint32_t *, void *); extern ocs_hw_rtn_e ocs_hw_get_sfp(ocs_hw_t *, uint16_t, ocs_hw_sfp_cb_t, void *); /* Function for retrieving temperature data */ typedef void (*ocs_hw_temp_cb_t)(int32_t status, uint32_t curr_temp, uint32_t crit_temp_thrshld, uint32_t warn_temp_thrshld, uint32_t norm_temp_thrshld, uint32_t fan_off_thrshld, uint32_t fan_on_thrshld, void *arg); extern ocs_hw_rtn_e ocs_hw_get_temperature(ocs_hw_t *, ocs_hw_temp_cb_t, void*); /* Function for retrieving link statistics */ typedef void (*ocs_hw_link_stat_cb_t)(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg); extern ocs_hw_rtn_e ocs_hw_get_link_stats(ocs_hw_t *, uint8_t req_ext_counters, uint8_t clear_overflow_flags, uint8_t clear_all_counters, ocs_hw_link_stat_cb_t, void*); /* Function for retrieving host statistics */ typedef void (*ocs_hw_host_stat_cb_t)(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg); extern ocs_hw_rtn_e ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t, void *arg); extern ocs_hw_rtn_e ocs_hw_raise_ue(ocs_hw_t *, uint8_t); typedef void (*ocs_hw_dump_get_cb_t)(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg); extern ocs_hw_rtn_e ocs_hw_dump_get(ocs_hw_t *, ocs_dma_t *, uint32_t, uint32_t, ocs_hw_dump_get_cb_t, void *); extern ocs_hw_rtn_e ocs_hw_set_dump_location(ocs_hw_t *, uint32_t, ocs_dma_t *, uint8_t); typedef void (*ocs_get_port_protocol_cb_t)(int32_t status, ocs_hw_port_protocol_e port_protocol, void *arg); extern ocs_hw_rtn_e ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func, ocs_get_port_protocol_cb_t mgmt_cb, void* ul_arg); typedef void (*ocs_set_port_protocol_cb_t)(int32_t status, void *arg); extern ocs_hw_rtn_e ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e profile, uint32_t pci_func, ocs_set_port_protocol_cb_t mgmt_cb, void* ul_arg); typedef void (*ocs_get_profile_list_cb_t)(int32_t status, ocs_hw_profile_list_t*, void *arg); extern ocs_hw_rtn_e ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t mgmt_cb, void *arg); typedef void (*ocs_get_active_profile_cb_t)(int32_t status, uint32_t active_profile, void *arg); extern ocs_hw_rtn_e ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t mgmt_cb, void *arg); typedef void (*ocs_set_active_profile_cb_t)(int32_t status, void *arg); extern ocs_hw_rtn_e ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t mgmt_cb, uint32_t profile_id, void *arg); typedef void (*ocs_get_nvparms_cb_t)(int32_t status, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void *arg); extern ocs_hw_rtn_e ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t mgmt_cb, void *arg); typedef void (*ocs_set_nvparms_cb_t)(int32_t status, void *arg); extern ocs_hw_rtn_e ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t mgmt_cb, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void *arg); extern int32_t ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec); extern void ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq); extern void ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid); extern void ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid); extern int32_t hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe); typedef void (*ocs_hw_dump_clear_cb_t)(int32_t status, void *arg); extern ocs_hw_rtn_e ocs_hw_dump_clear(ocs_hw_t *, ocs_hw_dump_clear_cb_t, void *); extern uint8_t ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io); extern uint8_t ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri); extern ocs_hw_io_t * ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t indicator); extern uint32_t ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri); extern ocs_hw_rtn_e ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri); extern int32_t ocs_hw_reque_xri(ocs_hw_t *hw, ocs_hw_io_t *io); +ocs_hw_rtn_e ocs_hw_set_persistent_topology(ocs_hw_t *hw, uint32_t topology, uint32_t opts); +extern uint32_t ocs_hw_get_config_persistent_topology(ocs_hw_t *hw); typedef struct { /* structure elements used by HW */ ocs_hw_t *hw; /**> pointer to HW */ hw_wq_callback_t *wqcb; /**> WQ callback object, request tag */ ocs_hw_wqe_t wqe; /**> WQE buffer object (may be queued on WQ pending list) */ void (*callback)(int32_t status, void *arg); /**> final callback function */ void *arg; /**> final callback argument */ /* General purpose elements */ ocs_hw_sequence_t *seq; ocs_dma_t payload; /**> a payload DMA buffer */ } ocs_hw_send_frame_context_t; #define OCS_HW_OBJECT_G5 0xfeaa0001 #define OCS_HW_OBJECT_G6 0xfeaa0003 #define OCS_FILE_TYPE_GROUP 0xf7 #define OCS_FILE_ID_GROUP 0xa2 struct ocs_hw_grp_hdr { uint32_t size; uint32_t magic_number; uint32_t word2; uint8_t rev_name[128]; uint8_t date[12]; uint8_t revision[32]; }; ocs_hw_rtn_e ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload, ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg); /* RQ completion handlers for RQ pair mode */ extern int32_t ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe); extern ocs_hw_rtn_e ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq); extern int32_t ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe); extern int32_t ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe); extern ocs_hw_rtn_e ocs_hw_rqpair_init(ocs_hw_t *hw); extern ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers); extern uint8_t ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf); extern ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io); extern void ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io); extern void ocs_hw_rqpair_teardown(ocs_hw_t *hw); extern ocs_hw_rtn_e ocs_hw_rx_allocate(ocs_hw_t *hw); extern ocs_hw_rtn_e ocs_hw_rx_post(ocs_hw_t *hw); extern void ocs_hw_rx_free(ocs_hw_t *hw); extern void ocs_hw_unsol_process_bounce(void *arg); typedef int32_t (*ocs_hw_async_cb_t)(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); extern int32_t ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg); static inline void ocs_hw_sequence_copy(ocs_hw_sequence_t *dst, ocs_hw_sequence_t *src) { /* Copy the src to dst, then zero out the linked list link */ *dst = *src; ocs_memset(&dst->link, 0, sizeof(dst->link)); } static inline ocs_hw_rtn_e ocs_hw_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { /* Only RQ pair mode is supported */ return ocs_hw_rqpair_sequence_free(hw, seq); } /* HW WQ request tag API */ extern ocs_hw_rtn_e ocs_hw_reqtag_init(ocs_hw_t *hw); extern hw_wq_callback_t *ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg); extern void ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb); extern hw_wq_callback_t *ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index); extern void ocs_hw_reqtag_reset(ocs_hw_t *hw); extern uint32_t ocs_hw_dif_blocksize(ocs_hw_dif_info_t *dif_info); extern int32_t ocs_hw_dif_mem_blocksize(ocs_hw_dif_info_t *dif_info, int wiretomem); extern int32_t ocs_hw_dif_wire_blocksize(ocs_hw_dif_info_t *dif_info, int wiretomem); extern uint32_t ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn); /* Uncomment to enable CPUTRACE */ //#define ENABLE_CPUTRACE #ifdef ENABLE_CPUTRACE #define CPUTRACE(t) ocs_printf("trace: %-20s %2s %-16s cpu %2d\n", __func__, t, \ ({ocs_thread_t *self = ocs_thread_self(); self != NULL ? self->name : "unknown";}), ocs_thread_getcpu()); #else #define CPUTRACE(...) #endif /* Two levels of macro needed due to expansion */ #define HW_FWREV(a,b,c,d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) | ((uint64_t)(c) << 16) | ((uint64_t)(d))) #define HW_FWREV_1(x) HW_FWREV(x) #define OCS_FW_VER_STR2(a,b,c,d) #a "." #b "." #c "." #d #define OCS_FW_VER_STR(x) OCS_FW_VER_STR2(x) #define OCS_MIN_FW_VER_LANCER 10,4,255,0 #define OCS_MIN_FW_VER_SKYHAWK 10,4,255,0 extern void ocs_hw_workaround_setup(struct ocs_hw_s *hw); /** * @brief Defines the number of the RQ buffers for each RQ */ #ifndef OCS_HW_RQ_NUM_HDR #define OCS_HW_RQ_NUM_HDR 1024 #endif #ifndef OCS_HW_RQ_NUM_PAYLOAD #define OCS_HW_RQ_NUM_PAYLOAD 1024 #endif /** * @brief Defines the size of the RQ buffers used for each RQ */ #ifndef OCS_HW_RQ_SIZE_HDR #define OCS_HW_RQ_SIZE_HDR 128 #endif #ifndef OCS_HW_RQ_SIZE_PAYLOAD #define OCS_HW_RQ_SIZE_PAYLOAD 1024 #endif /* * @brief Define the maximum number of multi-receive queues */ #ifndef OCS_HW_MAX_MRQS #define OCS_HW_MAX_MRQS 8 #endif /* * @brief Define count of when to set the WQEC bit in a submitted * WQE, causing a consummed/released completion to be posted. */ #ifndef OCS_HW_WQEC_SET_COUNT #define OCS_HW_WQEC_SET_COUNT 32 #endif /* * @brief Send frame timeout in seconds */ #ifndef OCS_HW_SEND_FRAME_TIMEOUT #define OCS_HW_SEND_FRAME_TIMEOUT 10 #endif /* * @brief FDT Transfer Hint value, reads greater than this value * will be segmented to implement fairness. A value of zero disables * the feature. */ #ifndef OCS_HW_FDT_XFER_HINT #define OCS_HW_FDT_XFER_HINT 8192 #endif #endif /* !_OCS_HW_H */ diff --git a/sys/dev/ocs_fc/ocs_ioctl.c b/sys/dev/ocs_fc/ocs_ioctl.c index 46dd353edfd8..b8cc98b89bef 100644 --- a/sys/dev/ocs_fc/ocs_ioctl.c +++ b/sys/dev/ocs_fc/ocs_ioctl.c @@ -1,1226 +1,1226 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "ocs.h" #include "ocs_utils.h" #include #include #include #include #include #include #include static d_open_t ocs_open; static d_close_t ocs_close; static d_ioctl_t ocs_ioctl; static struct cdevsw ocs_cdevsw = { .d_version = D_VERSION, .d_open = ocs_open, .d_close = ocs_close, .d_ioctl = ocs_ioctl, .d_name = "ocs_fc" }; int ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len, uint8_t *change_status); static int ocs_open(struct cdev *cdev, int flags, int fmt, struct thread *td) { return 0; } static int ocs_close(struct cdev *cdev, int flag, int fmt, struct thread *td) { return 0; } static int32_t __ocs_ioctl_mbox_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { struct ocs_softc *ocs = arg; /* wait for the ioctl to sleep before calling wakeup */ mtx_lock(&ocs->dbg_lock); mtx_unlock(&ocs->dbg_lock); wakeup(arg); return 0; } static int ocs_process_sli_config (ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd, ocs_dma_t *dma) { sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)mcmd->payload; if (sli_config->emb) { sli4_req_hdr_t *req = (sli4_req_hdr_t *)sli_config->payload.embed; switch (req->opcode) { case SLI4_OPC_COMMON_READ_OBJECT: if (mcmd->out_bytes) { sli4_req_common_read_object_t *rdobj = (sli4_req_common_read_object_t *)sli_config->payload.embed; if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) { device_printf(ocs->dev, "%s: COMMON_READ_OBJECT - %lld allocation failed\n", __func__, (unsigned long long)mcmd->out_bytes); return ENXIO; } memset(dma->virt, 0, mcmd->out_bytes); rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64; rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes; rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys); rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } break; case SLI4_OPC_COMMON_WRITE_OBJECT: { sli4_req_common_write_object_t *wrobj = (sli4_req_common_write_object_t *)sli_config->payload.embed; if (ocs_dma_alloc(ocs, dma, wrobj->desired_write_length, 4096)) { device_printf(ocs->dev, "%s: COMMON_WRITE_OBJECT - %d allocation failed\n", __func__, wrobj->desired_write_length); return ENXIO; } /* setup the descriptor */ wrobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64; wrobj->host_buffer_descriptor[0].buffer_length = wrobj->desired_write_length; wrobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys); wrobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys); /* copy the data into the DMA buffer */ copyin((void *)(uintptr_t)mcmd->in_addr, dma->virt, mcmd->in_bytes); } break; case SLI4_OPC_COMMON_DELETE_OBJECT: break; case SLI4_OPC_COMMON_READ_OBJECT_LIST: if (mcmd->out_bytes) { sli4_req_common_read_object_list_t *rdobj = (sli4_req_common_read_object_list_t *)sli_config->payload.embed; if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) { device_printf(ocs->dev, "%s: COMMON_READ_OBJECT_LIST - %lld allocation failed\n", __func__,(unsigned long long) mcmd->out_bytes); return ENXIO; } memset(dma->virt, 0, mcmd->out_bytes); rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64; rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes; rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys); rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } break; case SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA: break; default: device_printf(ocs->dev, "%s: in=%p (%lld) out=%p (%lld)\n", __func__, (void *)(uintptr_t)mcmd->in_addr, (unsigned long long)mcmd->in_bytes, (void *)(uintptr_t)mcmd->out_addr, (unsigned long long)mcmd->out_bytes); device_printf(ocs->dev, "%s: unknown (opc=%#x)\n", __func__, req->opcode); hexdump(mcmd, mcmd->size, NULL, 0); break; } } else { uint32_t max_bytes = max(mcmd->in_bytes, mcmd->out_bytes); if (ocs_dma_alloc(ocs, dma, max_bytes, 4096)) { device_printf(ocs->dev, "%s: non-embedded - %u allocation failed\n", __func__, max_bytes); return ENXIO; } copyin((void *)(uintptr_t)mcmd->in_addr, dma->virt, mcmd->in_bytes); sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys); sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys); sli_config->payload.mem.length = max_bytes; } return 0; } static int ocs_process_mbx_ioctl(ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd) { ocs_dma_t dma = { 0 }; if ((ELXU_BSD_MAGIC != mcmd->magic) || (sizeof(ocs_ioctl_elxu_mbox_t) != mcmd->size)) { device_printf(ocs->dev, "%s: malformed command m=%08x s=%08x\n", __func__, mcmd->magic, mcmd->size); return EINVAL; } switch(((sli4_mbox_command_header_t *)mcmd->payload)->command) { case SLI4_MBOX_COMMAND_SLI_CONFIG: if (ENXIO == ocs_process_sli_config(ocs, mcmd, &dma)) return ENXIO; break; case SLI4_MBOX_COMMAND_READ_REV: case SLI4_MBOX_COMMAND_READ_STATUS: case SLI4_MBOX_COMMAND_READ_LNK_STAT: break; default: device_printf(ocs->dev, "command %d\n",((sli4_mbox_command_header_t *)mcmd->payload)->command); device_printf(ocs->dev, "%s, command not support\n", __func__); goto no_support; break; } /* * The dbg_lock usage here insures the command completion code * (__ocs_ioctl_mbox_cb), which calls wakeup(), does not run until * after first calling msleep() * * 1. ioctl grabs dbg_lock * 2. ioctl issues command * if the command completes before msleep(), the * command completion code (__ocs_ioctl_mbox_cb) will spin * on dbg_lock before calling wakeup() * 3. ioctl calls msleep which releases dbg_lock before sleeping * and reacquires it before waking * 4. command completion handler acquires the dbg_lock, immediately * releases it, and calls wakeup * 5. msleep returns, re-acquiring the lock * 6. ioctl code releases the lock */ mtx_lock(&ocs->dbg_lock); if (ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT, __ocs_ioctl_mbox_cb, ocs)) { device_printf(ocs->dev, "%s: command- %x failed\n", __func__, ((sli4_mbox_command_header_t *)mcmd->payload)->command); } msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0); mtx_unlock(&ocs->dbg_lock); if( SLI4_MBOX_COMMAND_SLI_CONFIG == ((sli4_mbox_command_header_t *)mcmd->payload)->command && mcmd->out_bytes && dma.virt) { copyout(dma.virt, (void *)(uintptr_t)mcmd->out_addr, mcmd->out_bytes); } no_support: ocs_dma_free(ocs, &dma); return 0; } /** * @brief perform requested Elx CoreDump helper function * * The Elx CoreDump facility used for BE3 diagnostics uses the OCS_IOCTL_CMD_ECD_HELPER * ioctl function to execute requested "help" functions * * @param ocs pointer to ocs structure * @param req pointer to helper function request * * @return returns 0 for success, a negative error code value for failure. */ static int ocs_process_ecd_helper (ocs_t *ocs, ocs_ioctl_ecd_helper_t *req) { int32_t rc = 0; uint8_t v8; uint16_t v16; uint32_t v32; /* Check the BAR read/write commands for valid bar */ switch(req->cmd) { case OCS_ECD_HELPER_BAR_READ8: case OCS_ECD_HELPER_BAR_READ16: case OCS_ECD_HELPER_BAR_READ32: case OCS_ECD_HELPER_BAR_WRITE8: case OCS_ECD_HELPER_BAR_WRITE16: case OCS_ECD_HELPER_BAR_WRITE32: if (req->bar >= PCI_MAX_BAR) { device_printf(ocs->dev, "Error: bar %d out of range\n", req->bar); return -EFAULT; } if (ocs->reg[req->bar].res == NULL) { device_printf(ocs->dev, "Error: bar %d not defined\n", req->bar); return -EFAULT; } break; default: break; } switch(req->cmd) { case OCS_ECD_HELPER_CFG_READ8: v8 = ocs_config_read8(ocs, req->offset); req->data = v8; break; case OCS_ECD_HELPER_CFG_READ16: v16 = ocs_config_read16(ocs, req->offset); req->data = v16; break; case OCS_ECD_HELPER_CFG_READ32: v32 = ocs_config_read32(ocs, req->offset); req->data = v32; break; case OCS_ECD_HELPER_CFG_WRITE8: ocs_config_write8(ocs, req->offset, req->data); break; case OCS_ECD_HELPER_CFG_WRITE16: ocs_config_write16(ocs, req->offset, req->data); break; case OCS_ECD_HELPER_CFG_WRITE32: ocs_config_write32(ocs, req->offset, req->data); break; case OCS_ECD_HELPER_BAR_READ8: req->data = ocs_reg_read8(ocs, req->bar, req->offset); break; case OCS_ECD_HELPER_BAR_READ16: req->data = ocs_reg_read16(ocs, req->bar, req->offset); break; case OCS_ECD_HELPER_BAR_READ32: req->data = ocs_reg_read32(ocs, req->bar, req->offset); break; case OCS_ECD_HELPER_BAR_WRITE8: ocs_reg_write8(ocs, req->bar, req->offset, req->data); break; case OCS_ECD_HELPER_BAR_WRITE16: ocs_reg_write16(ocs, req->bar, req->offset, req->data); break; case OCS_ECD_HELPER_BAR_WRITE32: ocs_reg_write32(ocs, req->bar, req->offset, req->data); break; default: device_printf(ocs->dev, "Invalid helper command=%d\n", req->cmd); break; } return rc; } static int ocs_ioctl(struct cdev *cdev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int status = 0; struct ocs_softc *ocs = cdev->si_drv1; device_t dev = ocs->dev; switch (cmd) { case OCS_IOCTL_CMD_ELXU_MBOX: { /* "copyin" done by kernel; thus, just dereference addr */ ocs_ioctl_elxu_mbox_t *mcmd = (void *)addr; status = ocs_process_mbx_ioctl(ocs, mcmd); break; } case OCS_IOCTL_CMD_ECD_HELPER: { /* "copyin" done by kernel; thus, just dereference addr */ ocs_ioctl_ecd_helper_t *req = (void *)addr; status = ocs_process_ecd_helper(ocs, req); break; } case OCS_IOCTL_CMD_VPORT: { int32_t rc = 0; ocs_ioctl_vport_t *req = (ocs_ioctl_vport_t*) addr; ocs_domain_t *domain; domain = ocs_domain_get_instance(ocs, req->domain_index); if (domain == NULL) { device_printf(ocs->dev, "domain [%d] nod found\n", req->domain_index); return -EFAULT; } if (req->req_create) { rc = ocs_sport_vport_new(domain, req->wwpn, req->wwnn, UINT32_MAX, req->enable_ini, req->enable_tgt, NULL, NULL, TRUE); } else { rc = ocs_sport_vport_del(ocs, domain, req->wwpn, req->wwnn); } return rc; } case OCS_IOCTL_CMD_GET_DDUMP: { ocs_ioctl_ddump_t *req = (ocs_ioctl_ddump_t*) addr; ocs_textbuf_t textbuf; int x; /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { device_printf(ocs->dev, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } switch (req->args.action) { case OCS_IOCTL_DDUMP_GET: case OCS_IOCTL_DDUMP_GET_SAVED: { uint32_t remaining; uint32_t written; uint32_t idx; int32_t n; ocs_textbuf_t *ptbuf = NULL; uint32_t flags = 0; if (req->args.action == OCS_IOCTL_DDUMP_GET_SAVED) { if (ocs_textbuf_initialized(&ocs->ddump_saved)) { ptbuf = &ocs->ddump_saved; } } else { if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } /* translate IOCTL ddump flags to ddump flags */ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_WQES) { flags |= OCS_DDUMP_FLAGS_WQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_CQES) { flags |= OCS_DDUMP_FLAGS_CQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_MQES) { flags |= OCS_DDUMP_FLAGS_MQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_RQES) { flags |= OCS_DDUMP_FLAGS_RQES; } if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_EQES) { flags |= OCS_DDUMP_FLAGS_EQES; } /* Try 3 times to get the dump */ for(x=0; x<3; x++) { if (ocs_ddump(ocs, &textbuf, flags, req->args.q_entries) != 0) { ocs_textbuf_reset(&textbuf); } else { /* Success */ x = 0; break; } } if (x != 0 ) { /* Retries failed */ ocs_log_test(ocs, "ocs_ddump failed\n"); } else { ptbuf = &textbuf; } } written = 0; if (ptbuf != NULL) { /* Process each textbuf segment */ remaining = req->user_buffer_len; for (idx = 0; remaining; idx++) { n = ocs_textbuf_ext_get_written(ptbuf, idx); if (n < 0) { break; } if ((uint32_t)n >= remaining) { n = (int32_t)remaining; } if (ocs_copy_to_user(req->user_buffer + written, ocs_textbuf_ext_get_buffer(ptbuf, idx), n)) { ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__); } written += n; remaining -= (uint32_t)n; } } req->bytes_written = written; if (ptbuf == &textbuf) { ocs_textbuf_free(ocs, &textbuf); } break; } case OCS_IOCTL_DDUMP_CLR_SAVED: ocs_clear_saved_ddump(ocs); break; default: ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); break; } break; } case OCS_IOCTL_CMD_DRIVER_INFO: { ocs_ioctl_driver_info_t *req = (ocs_ioctl_driver_info_t*)addr; ocs_memset(req, 0, sizeof(*req)); req->pci_vendor = ocs->pci_vendor; req->pci_device = ocs->pci_device; ocs_strncpy(req->businfo, ocs->businfo, sizeof(req->businfo)); req->sli_intf = ocs_config_read32(ocs, SLI4_INTF_REG); ocs_strncpy(req->desc, device_get_desc(dev), sizeof(req->desc)); ocs_strncpy(req->fw_rev, ocs->fwrev, sizeof(req->fw_rev)); if (ocs->domain && ocs->domain->sport) { *((uint64_t*)req->hw_addr.fc.wwnn) = ocs_htobe64(ocs->domain->sport->wwnn); *((uint64_t*)req->hw_addr.fc.wwpn) = ocs_htobe64(ocs->domain->sport->wwpn); } ocs_strncpy(req->serialnum, ocs->serialnum, sizeof(req->serialnum)); break; } case OCS_IOCTL_CMD_MGMT_LIST: { ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr; ocs_textbuf_t textbuf; /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } ocs_mgmt_get_list(ocs, &textbuf); if (ocs_textbuf_get_written(&textbuf)) { if (ocs_copy_to_user(req->user_buffer, ocs_textbuf_get_buffer(&textbuf), ocs_textbuf_get_written(&textbuf))) { ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__); } } req->bytes_written = ocs_textbuf_get_written(&textbuf); ocs_textbuf_free(ocs, &textbuf); break; } case OCS_IOCTL_CMD_MGMT_GET_ALL: { ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr; ocs_textbuf_t textbuf; int32_t n; uint32_t idx; uint32_t copied = 0; /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } ocs_mgmt_get_all(ocs, &textbuf); for (idx = 0; (n = ocs_textbuf_ext_get_written(&textbuf, idx)) > 0; idx++) { if(ocs_copy_to_user(req->user_buffer + copied, ocs_textbuf_ext_get_buffer(&textbuf, idx), ocs_textbuf_ext_get_written(&textbuf, idx))) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); } copied += n; } req->bytes_written = copied; ocs_textbuf_free(ocs, &textbuf); break; } case OCS_IOCTL_CMD_MGMT_GET: { ocs_ioctl_cmd_get_t* req = (ocs_ioctl_cmd_get_t*)addr; ocs_textbuf_t textbuf; char name[OCS_MGMT_MAX_NAME]; /* Copy the name value in from user space */ if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) { ocs_log_test(ocs, "ocs_copy_from_user failed\n"); ocs_ioctl_free(ocs, req, sizeof(ocs_ioctl_cmd_get_t)); return -EFAULT; } /* Build a text buffer */ if (ocs_textbuf_alloc(ocs, &textbuf, req->value_length)) { ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n"); return -EFAULT; } ocs_mgmt_get(ocs, name, &textbuf); if (ocs_textbuf_get_written(&textbuf)) { if (ocs_copy_to_user(req->value, ocs_textbuf_get_buffer(&textbuf), ocs_textbuf_get_written(&textbuf))) { ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__); } } req->value_length = ocs_textbuf_get_written(&textbuf); ocs_textbuf_free(ocs, &textbuf); break; } case OCS_IOCTL_CMD_MGMT_SET: { char name[OCS_MGMT_MAX_NAME]; char value[OCS_MGMT_MAX_VALUE]; ocs_ioctl_cmd_set_t* req = (ocs_ioctl_cmd_set_t*)addr; // Copy the name in from user space if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) { ocs_log_test(ocs, "Error: copy from user failed\n"); ocs_ioctl_free(ocs, req, sizeof(*req)); return -EFAULT; } // Copy the value in from user space if (ocs_copy_from_user(value, req->value, OCS_MGMT_MAX_VALUE)) { ocs_log_test(ocs, "Error: copy from user failed\n"); ocs_ioctl_free(ocs, req, sizeof(*req)); return -EFAULT; } - req->result = ocs_mgmt_set(ocs, req->name, req->value); + req->result = ocs_mgmt_set(ocs, name, value); break; } case OCS_IOCTL_CMD_MGMT_EXEC: { ocs_ioctl_action_t* req = (ocs_ioctl_action_t*) addr; char action_name[OCS_MGMT_MAX_NAME]; if (ocs_copy_from_user(action_name, req->name, sizeof(action_name))) { ocs_log_test(ocs, "Error: copy req.name from user failed\n"); ocs_ioctl_free(ocs, req, sizeof(*req)); return -EFAULT; } req->result = ocs_mgmt_exec(ocs, action_name, req->arg_in, req->arg_in_length, req->arg_out, req->arg_out_length); break; } default: ocs_log_test(ocs, "Error: unknown cmd %#lx\n", cmd); status = -ENOTTY; break; } return status; } static void ocs_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg) { ocs_mgmt_fw_write_result_t *result = arg; result->status = status; result->actual_xfer = actual_write_length; result->change_status = change_status; ocs_sem_v(&(result->semaphore)); } int ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len, uint8_t *change_status) { int rc = 0; uint32_t bytes_left; uint32_t xfer_size; uint32_t offset; ocs_dma_t dma; int last = 0; ocs_mgmt_fw_write_result_t result; ocs_sem_init(&(result.semaphore), 0, "fw_write"); bytes_left = buf_len; offset = 0; if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) { ocs_log_err(ocs, "ocs_firmware_write: malloc failed\n"); return -ENOMEM; } while (bytes_left > 0) { if (bytes_left > FW_WRITE_BUFSIZE) { xfer_size = FW_WRITE_BUFSIZE; } else { xfer_size = bytes_left; } ocs_memcpy(dma.virt, buf + offset, xfer_size); if (bytes_left == xfer_size) { last = 1; } ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset, last, ocs_fw_write_cb, &result); if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { rc = -ENXIO; break; } if (result.actual_xfer == 0 || result.status != 0) { rc = -EFAULT; break; } if (last) { *change_status = result.change_status; } bytes_left -= result.actual_xfer; offset += result.actual_xfer; } ocs_dma_free(ocs, &dma); return rc; } static int ocs_sys_fwupgrade(SYSCTL_HANDLER_ARGS) { char file_name[256] = {0}; char fw_change_status; uint32_t rc = 1; ocs_t *ocs = (ocs_t *)arg1; const struct firmware *fw; const struct ocs_hw_grp_hdr *fw_image; rc = sysctl_handle_string(oidp, file_name, sizeof(file_name), req); if (rc || !req->newptr) return rc; fw = firmware_get(file_name); if (fw == NULL) { device_printf(ocs->dev, "Unable to get Firmware. " "Make sure %s is copied to /boot/modules\n", file_name); return ENOENT; } fw_image = (const struct ocs_hw_grp_hdr *)fw->data; /* Check if firmware provided is compatible with this particular * Adapter of not*/ if ((ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G5) && (ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G6)) { device_printf(ocs->dev, "Invalid FW image found Magic: 0x%x Size: %zu \n", ocs_be32toh(fw_image->magic_number), fw->datasize); rc = -1; goto exit; } if (!strncmp(ocs->fw_version, fw_image->revision, strnlen(fw_image->revision, 16))) { device_printf(ocs->dev, "No update req. " "Firmware is already up to date. \n"); rc = 0; goto exit; } device_printf(ocs->dev, "Upgrading Firmware from %s to %s \n", ocs->fw_version, fw_image->revision); rc = ocs_firmware_write(ocs, fw->data, fw->datasize, &fw_change_status); if (rc) { ocs_log_err(ocs, "Firmware update failed with status = %d\n", rc); } else { ocs_log_info(ocs, "Firmware updated successfully\n"); switch (fw_change_status) { case 0x00: device_printf(ocs->dev, "No reset needed, new firmware is active.\n"); break; case 0x01: device_printf(ocs->dev, "A physical device reset (host reboot) is " "needed to activate the new firmware\n"); break; case 0x02: case 0x03: device_printf(ocs->dev, "firmware is resetting to activate the new " "firmware, Host reboot is needed \n"); break; default: ocs_log_warn(ocs, "Unexected value change_status: %d\n", fw_change_status); break; } } exit: /* Release Firmware*/ firmware_put(fw, FIRMWARE_UNLOAD); return rc; } static int ocs_sysctl_wwnn(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; char old[64]; char new[64]; uint64_t *wwnn = NULL; ocs_xport_t *xport = ocs->xport; if (xport->req_wwnn) { wwnn = &xport->req_wwnn; memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) *wwnn); } else { wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE); memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) ocs_htobe64(*wwnn)); } /*Read wwnn*/ if (!req->newptr) { return (sysctl_handle_string(oidp, old, sizeof(old), req)); } /*Configure port wwn*/ rc = sysctl_handle_string(oidp, new, sizeof(new), req); if (rc) return (rc); if (strncmp(old, new, strlen(old)) == 0) { return 0; } return (set_req_wwnn(ocs, NULL, new)); } static int ocs_sysctl_wwpn(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; char old[64]; char new[64]; uint64_t *wwpn = NULL; ocs_xport_t *xport = ocs->xport; if (xport->req_wwpn) { wwpn = &xport->req_wwpn; memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx",(unsigned long long) *wwpn); } else { wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT); memset(old, 0, sizeof(old)); snprintf(old, sizeof(old), "0x%llx",(unsigned long long) ocs_htobe64(*wwpn)); } /*Read wwpn*/ if (!req->newptr) { return (sysctl_handle_string(oidp, old, sizeof(old), req)); } /*Configure port wwn*/ rc = sysctl_handle_string(oidp, new, sizeof(new), req); if (rc) return (rc); if (strncmp(old, new, strlen(old)) == 0) { return 0; } return (set_req_wwpn(ocs, NULL, new)); } static int ocs_sysctl_current_topology(SYSCTL_HANDLER_ARGS) { ocs_t *ocs = oidp->oid_arg1; uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_TOPOLOGY, &value); return (sysctl_handle_int(oidp, &value, 0, req)); } static int ocs_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { ocs_t *ocs = oidp->oid_arg1; uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &value); return (sysctl_handle_int(oidp, &value, 0, req)); } static int ocs_sysctl_config_topology(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; uint32_t old_value; uint32_t new_value; char buf[64]; ocs_hw_get(&ocs->hw, OCS_HW_CONFIG_TOPOLOGY, &old_value); /*Read topo*/ if (!req->newptr) { return (sysctl_handle_int(oidp, &old_value, 0, req)); } /*Configure port wwn*/ rc = sysctl_handle_int(oidp, &new_value, 0, req); if (rc) return (rc); if (new_value == old_value) { return 0; } snprintf(buf, sizeof(buf), "%d",new_value); rc = set_configured_topology(ocs, NULL, buf); return rc; } static int ocs_sysctl_config_speed(SYSCTL_HANDLER_ARGS) { uint32_t rc = 1; ocs_t *ocs = oidp->oid_arg1; uint32_t old_value; uint32_t new_value; char buf[64]; ocs_hw_get(&ocs->hw, OCS_HW_LINK_CONFIG_SPEED, &old_value); /*Read topo*/ if (!req->newptr) { return (sysctl_handle_int(oidp, &old_value, 0, req)); } /*Configure port wwn*/ rc = sysctl_handle_int(oidp, &new_value, 0, req); if (rc) return (rc); if (new_value == old_value) { return 0; } snprintf(buf, sizeof(buf), "%d",new_value); rc = set_configured_speed(ocs, NULL,buf); return rc; } static int ocs_sysctl_fcid(SYSCTL_HANDLER_ARGS) { ocs_t *ocs = oidp->oid_arg1; char buf[64]; memset(buf, 0, sizeof(buf)); if (ocs->domain && ocs->domain->attached) { snprintf(buf, sizeof(buf), "0x%06x", ocs->domain->sport->fc_id); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int ocs_sysctl_port_state(SYSCTL_HANDLER_ARGS) { char new[256] = {0}; uint32_t rc = 1; ocs_xport_stats_t old; ocs_t *ocs = (ocs_t *)arg1; ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &old); /*Read port state */ if (!req->newptr) { snprintf(new, sizeof(new), "%s", (old.value == OCS_XPORT_PORT_OFFLINE) ? "offline" : "online"); return (sysctl_handle_string(oidp, new, sizeof(new), req)); } /*Configure port state*/ rc = sysctl_handle_string(oidp, new, sizeof(new), req); if (rc) return (rc); if (ocs_strcasecmp(new, "offline") == 0) { if (old.value == OCS_XPORT_PORT_OFFLINE) { return (0); } ocs_log_debug(ocs, "Setting port to %s\n", new); rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc != 0) { ocs_log_err(ocs, "Setting port to offline failed\n"); } } else if (ocs_strcasecmp(new, "online") == 0) { if (old.value == OCS_XPORT_PORT_ONLINE) { return (0); } ocs_log_debug(ocs, "Setting port to %s\n", new); rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc != 0) { ocs_log_err(ocs, "Setting port to online failed\n"); } } else { ocs_log_err(ocs, "Unsupported link state %s\n", new); rc = 1; } return (rc); } static int ocs_sysctl_vport_wwpn(SYSCTL_HANDLER_ARGS) { ocs_fcport *fcp = oidp->oid_arg1; char str_wwpn[64]; memset(str_wwpn, 0, sizeof(str_wwpn)); snprintf(str_wwpn, sizeof(str_wwpn), "0x%llx", (unsigned long long)fcp->vport->wwpn); return (sysctl_handle_string(oidp, str_wwpn, sizeof(str_wwpn), req)); } static int ocs_sysctl_vport_wwnn(SYSCTL_HANDLER_ARGS) { ocs_fcport *fcp = oidp->oid_arg1; char str_wwnn[64]; memset(str_wwnn, 0, sizeof(str_wwnn)); snprintf(str_wwnn, sizeof(str_wwnn), "0x%llx", (unsigned long long)fcp->vport->wwnn); return (sysctl_handle_string(oidp, str_wwnn, sizeof(str_wwnn), req)); } /** * @brief Initialize sysctl * * Initialize sysctl so elxsdkutil can query device information. * * @param ocs pointer to ocs * @return void */ static void ocs_sysctl_init(ocs_t *ocs) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(ocs->dev); struct sysctl_oid *tree = device_get_sysctl_tree(ocs->dev); struct sysctl_oid *vtree; const char *str = NULL; char name[16]; uint32_t rev, if_type, family, i; ocs_fcport *fcp = NULL; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "devid", CTLFLAG_RD, NULL, pci_get_devid(ocs->dev), "Device ID"); memset(ocs->modeldesc, 0, sizeof(ocs->modeldesc)); if (0 == pci_get_vpd_ident(ocs->dev, &str)) { snprintf(ocs->modeldesc, sizeof(ocs->modeldesc), "%s", str); } SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "modeldesc", CTLFLAG_RD, ocs->modeldesc, 0, "Model Description"); memset(ocs->serialnum, 0, sizeof(ocs->serialnum)); if (0 == pci_get_vpd_readonly(ocs->dev, "SN", &str)) { snprintf(ocs->serialnum, sizeof(ocs->serialnum), "%s", str); } SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sn", CTLFLAG_RD, ocs->serialnum, 0, "Serial Number"); ocs_hw_get(&ocs->hw, OCS_HW_SLI_REV, &rev); ocs_hw_get(&ocs->hw, OCS_HW_IF_TYPE, &if_type); ocs_hw_get(&ocs->hw, OCS_HW_SLI_FAMILY, &family); memset(ocs->fwrev, 0, sizeof(ocs->fwrev)); snprintf(ocs->fwrev, sizeof(ocs->fwrev), "%s, sli-%d:%d:%x", (char *)ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV), rev, if_type, family); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fwrev", CTLFLAG_RD, ocs->fwrev, 0, "Firmware Revision"); memset(ocs->sli_intf, 0, sizeof(ocs->sli_intf)); snprintf(ocs->sli_intf, sizeof(ocs->sli_intf), "%08x", ocs_config_read32(ocs, SLI4_INTF_REG)); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sli_intf", CTLFLAG_RD, ocs->sli_intf, 0, "SLI Interface"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fw_upgrade", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, (void *)ocs, 0, ocs_sys_fwupgrade, "A", "Firmware grp file"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_wwnn, "A", "World Wide Node Name, wwnn should be in the format 0x"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_wwpn, "A", "World Wide Port Name, wwpn should be in the format 0x"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "current_topology", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_current_topology, "IU", "Current Topology, 1-NPort; 2-Loop; 3-None"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "current_speed", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_current_speed, "IU", "Current Speed"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "configured_topology", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_config_topology, "IU", "Configured Topology, 0-Auto; 1-NPort; 2-Loop"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "configured_speed", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_config_speed, "IU", "Configured Speed, 0-Auto, 2000, 4000, 8000, 16000, 32000"); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "businfo", CTLFLAG_RD, ocs->businfo, 0, "Bus Info"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fcid", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_fcid, "A", "Port FC ID"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "port_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, ocs, 0, ocs_sysctl_port_state, "A", "configured port state"); for (i = 0; i < ocs->num_vports; i++) { fcp = FCPORT(ocs, i+1); memset(name, 0, sizeof(name)); snprintf(name, sizeof(name), "vport%d", i); vtree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Virtual port"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO, "wwnn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, fcp, 0, ocs_sysctl_vport_wwnn, "A", "World Wide Node Name"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO, "wwpn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, fcp, 0, ocs_sysctl_vport_wwpn, "A", "World Wide Port Name"); } } /** * @brief Initialize the debug module * * Parse device hints (similar to Linux module parameters) here. To use, * run the command * kenv hint.ocs.U.P=V * from the command line replacing U with the unit # (0,1,...), * P with the parameter name (debug_mask), and V with the value */ void ocs_debug_attach(void *os) { struct ocs_softc *ocs = os; int error = 0; char *resname = NULL; int32_t unit = INT32_MAX; uint32_t ocs_debug_mask = 0; resname = "debug_mask"; if (0 == (error = resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), resname, &ocs_debug_mask))) { device_printf(ocs->dev, "setting %s to %010x\n", resname, ocs_debug_mask); ocs_debug_enable(ocs_debug_mask); } unit = device_get_unit(ocs->dev); ocs->cdev = make_dev(&ocs_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "ocs%d", unit); if (ocs->cdev) { ocs->cdev->si_drv1 = ocs; } /* initialize sysctl interface */ ocs_sysctl_init(ocs); mtx_init(&ocs->dbg_lock, "ocs_dbg_lock", NULL, MTX_DEF); } /** * @brief Free the debug module */ void ocs_debug_detach(void *os) { struct ocs_softc *ocs = os; mtx_destroy(&ocs->dbg_lock); if (ocs->cdev) { ocs->cdev->si_drv1 = NULL; destroy_dev(ocs->cdev); } } diff --git a/sys/dev/ocs_fc/ocs_mgmt.c b/sys/dev/ocs_fc/ocs_mgmt.c index ff0e028caea2..f9db394c4f20 100644 --- a/sys/dev/ocs_fc/ocs_mgmt.c +++ b/sys/dev/ocs_fc/ocs_mgmt.c @@ -1,2761 +1,2766 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * The ocs_mgmt top level functions for Fibre Channel. */ /** * @defgroup mgmt Management Functions */ #include "ocs.h" #include "ocs_mgmt.h" #include "ocs_gendump.h" #include "ocs_vpd.h" #define SFP_PAGE_SIZE 128 /* Executables*/ static int ocs_mgmt_firmware_write(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static int ocs_mgmt_firmware_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static int ocs_mgmt_function_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg); static int ocs_mgmt_force_assert(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); /* Getters */ static void get_nodes_count(ocs_t *, char *, ocs_textbuf_t*); static void get_desc(ocs_t *, char *, ocs_textbuf_t*); static void get_fw_rev(ocs_t *, char *, ocs_textbuf_t*); static void get_fw_rev2(ocs_t *, char *, ocs_textbuf_t*); static void get_ipl(ocs_t *, char *, ocs_textbuf_t*); static void get_wwnn(ocs_t *, char *, ocs_textbuf_t*); static void get_wwpn(ocs_t *, char *, ocs_textbuf_t*); static void get_fcid(ocs_t *, char *, ocs_textbuf_t *); static void get_sn(ocs_t *, char *, ocs_textbuf_t*); static void get_pn(ocs_t *, char *, ocs_textbuf_t*); static void get_sli4_intf_reg(ocs_t *, char *, ocs_textbuf_t*); static void get_phy_port_num(ocs_t *, char *, ocs_textbuf_t*); static void get_asic_id(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_vendor(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_device(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_subsystem_vendor(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_subsystem_device(ocs_t *, char *, ocs_textbuf_t*); static void get_businfo(ocs_t *, char *, ocs_textbuf_t*); static void get_sfp_a0(ocs_t *, char *, ocs_textbuf_t*); static void get_sfp_a2(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev1(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev2(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev3(ocs_t *, char *, ocs_textbuf_t*); static void get_debug_mq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_cq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_wq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_eq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_logmask(ocs_t*, char*, ocs_textbuf_t*); static void get_current_speed(ocs_t*, char*, ocs_textbuf_t*); static void get_current_topology(ocs_t*, char*, ocs_textbuf_t*); static void get_current_link_state(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_speed(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_topology(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_link_state(ocs_t*, char*, ocs_textbuf_t*); static void get_linkcfg(ocs_t*, char*, ocs_textbuf_t*); static void get_req_wwnn(ocs_t*, char*, ocs_textbuf_t*); static void get_req_wwpn(ocs_t*, char*, ocs_textbuf_t*); static void get_nodedb_mask(ocs_t*, char*, ocs_textbuf_t*); static void get_profile_list(ocs_t*, char*, ocs_textbuf_t*); static void get_active_profile(ocs_t*, char*, ocs_textbuf_t*); static void get_port_protocol(ocs_t*, char*, ocs_textbuf_t*); static void get_driver_version(ocs_t*, char*, ocs_textbuf_t*); static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); /* Setters */ static int set_debug_mq_dump(ocs_t*, char*, char*); static int set_debug_cq_dump(ocs_t*, char*, char*); static int set_debug_wq_dump(ocs_t*, char*, char*); static int set_debug_eq_dump(ocs_t*, char*, char*); static int set_logmask(ocs_t*, char*, char*); static int set_configured_link_state(ocs_t*, char*, char*); static int set_linkcfg(ocs_t*, char*, char*); static int set_nodedb_mask(ocs_t*, char*, char*); static int set_port_protocol(ocs_t*, char*, char*); static int set_active_profile(ocs_t*, char*, char*); static int set_tgt_rscn_delay(ocs_t*, char*, char*); static int set_tgt_rscn_period(ocs_t*, char*, char*); static int set_inject_drop_cmd(ocs_t*, char*, char*); static int set_inject_free_drop_cmd(ocs_t*, char*, char*); static int set_inject_drop_data(ocs_t*, char*, char*); static int set_inject_drop_resp(ocs_t*, char*, char*); static int set_cmd_err_inject(ocs_t*, char*, char*); static int set_cmd_delay_value(ocs_t*, char*, char*); static int set_nv_wwn(ocs_t*, char*, char*); static int set_loglevel(ocs_t*, char*, char*); static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg); ocs_mgmt_table_entry_t mgmt_table[] = { {"nodes_count", get_nodes_count, NULL, NULL}, {"desc", get_desc, NULL, NULL}, {"fw_rev", get_fw_rev, NULL, NULL}, {"fw_rev2", get_fw_rev2, NULL, NULL}, {"ipl", get_ipl, NULL, NULL}, {"hw_rev1", get_hw_rev1, NULL, NULL}, {"hw_rev2", get_hw_rev2, NULL, NULL}, {"hw_rev3", get_hw_rev3, NULL, NULL}, {"wwnn", get_wwnn, NULL, NULL}, {"wwpn", get_wwpn, NULL, NULL}, {"fc_id", get_fcid, NULL, NULL}, {"sn", get_sn, NULL, NULL}, {"pn", get_pn, NULL, NULL}, {"sli4_intf_reg", get_sli4_intf_reg, NULL, NULL}, {"phy_port_num", get_phy_port_num, NULL, NULL}, {"asic_id_reg", get_asic_id, NULL, NULL}, {"pci_vendor", get_pci_vendor, NULL, NULL}, {"pci_device", get_pci_device, NULL, NULL}, {"pci_subsystem_vendor", get_pci_subsystem_vendor, NULL, NULL}, {"pci_subsystem_device", get_pci_subsystem_device, NULL, NULL}, {"businfo", get_businfo, NULL, NULL}, {"sfp_a0", get_sfp_a0, NULL, NULL}, {"sfp_a2", get_sfp_a2, NULL, NULL}, {"profile_list", get_profile_list, NULL, NULL}, {"driver_version", get_driver_version, NULL, NULL}, {"current_speed", get_current_speed, NULL, NULL}, {"current_topology", get_current_topology, NULL, NULL}, {"current_link_state", get_current_link_state, NULL, NULL}, {"chip_type", get_chip_type, NULL, NULL}, {"configured_speed", get_configured_speed, set_configured_speed, NULL}, {"configured_topology", get_configured_topology, set_configured_topology, NULL}, {"configured_link_state", get_configured_link_state, set_configured_link_state, NULL}, {"debug_mq_dump", get_debug_mq_dump, set_debug_mq_dump, NULL}, {"debug_cq_dump", get_debug_cq_dump, set_debug_cq_dump, NULL}, {"debug_wq_dump", get_debug_wq_dump, set_debug_wq_dump, NULL}, {"debug_eq_dump", get_debug_eq_dump, set_debug_eq_dump, NULL}, {"logmask", get_logmask, set_logmask, NULL}, {"loglevel", get_loglevel, set_loglevel, NULL}, {"linkcfg", get_linkcfg, set_linkcfg, NULL}, {"requested_wwnn", get_req_wwnn, set_req_wwnn, NULL}, {"requested_wwpn", get_req_wwpn, set_req_wwpn, NULL}, {"nodedb_mask", get_nodedb_mask, set_nodedb_mask, NULL}, {"port_protocol", get_port_protocol, set_port_protocol, NULL}, {"active_profile", get_active_profile, set_active_profile, NULL}, {"firmware_write", NULL, NULL, ocs_mgmt_firmware_write}, {"firmware_reset", NULL, NULL, ocs_mgmt_firmware_reset}, {"function_reset", NULL, NULL, ocs_mgmt_function_reset}, {"force_assert", NULL, NULL, ocs_mgmt_force_assert}, {"tgt_rscn_delay", get_tgt_rscn_delay, set_tgt_rscn_delay, NULL}, {"tgt_rscn_period", get_tgt_rscn_period, set_tgt_rscn_period, NULL}, {"inject_drop_cmd", get_inject_drop_cmd, set_inject_drop_cmd, NULL}, {"inject_free_drop_cmd", get_inject_free_drop_cmd, set_inject_free_drop_cmd, NULL}, {"inject_drop_data", get_inject_drop_data, set_inject_drop_data, NULL}, {"inject_drop_resp", get_inject_drop_resp, set_inject_drop_resp, NULL}, {"cmd_err_inject", get_cmd_err_inject, set_cmd_err_inject, NULL}, {"cmd_delay_value", get_cmd_delay_value, set_cmd_delay_value, NULL}, {"nv_wwpn", get_nv_wwpn, NULL, NULL}, {"nv_wwnn", get_nv_wwnn, NULL, NULL}, {"nv_wwn", NULL, set_nv_wwn, NULL}, {"node_abort_cnt", get_node_abort_cnt, NULL, NULL}, }; /** * @ingroup mgmt * @brief Get a list of options supported by the driver. * * @par Description * This is the top level "get list" handler for the driver. It * performs the following: * - Adds entries to the textbuf for any actions supported by this level in the driver. * - Calls a back-end function to add any actions supported by the back-end. * - Calls a function on each child (domain) to recursively add supported actions. * * @param ocs Pointer to the ocs structure. * @param textbuf Pointer to an ocs_textbuf, which is used to accumulate the results. * * @return Returns 0 on success, or a negative value on failure. */ void ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; uint32_t i; int access; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_list_handler)) { ocs->mgmt_functions->get_list_handler(textbuf, ocs); } if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_list_handler)) { ocs->tgt_mgmt_functions->get_list_handler(textbuf, &(ocs->tgt_ocs)); } /* Have each of my children add their actions */ if (ocs_device_lock_try(ocs) == TRUE) { /* If we get here then we are holding the device lock */ ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_list_handler)) { domain->mgmt_functions->get_list_handler(textbuf, domain); } } ocs_device_unlock(ocs); } ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); } /** * @ingroup mgmt * @brief Return the value of a management item. * * @par Description * This is the top level "get" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches a parameter that is known at this level, * writes the value into textbuf. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, * passes the request to each of the children (domains) to * have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be answered * when a response has been written into textbuf, indicated by textbuf->buffer_written * being non-zero. * * @param ocs Pointer to the ocs structure. * @param name Name of the status item to be retrieved. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return Returns 0 if the value was found and returned, or -1 if an error occurred. */ int ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; char qualifier[6]; int retval = -1; uint32_t i; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); snprintf(qualifier, sizeof(qualifier), "/ocs"); /* See if the name starts with my qualifier. If not then this request isn't for me */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) + 1; for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_handler)) { retval = ocs->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, ocs); } if (retval != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_handler)) { retval = ocs->tgt_mgmt_functions->get_handler(textbuf, qualifier, (char*)name, &(ocs->tgt_ocs)); } } if (retval != 0) { /* The driver didn't handle it, pass it to each domain */ ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_handler)) { retval = domain->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, domain); } if (retval == 0) { break; } } ocs_device_unlock(ocs); } } ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); return retval; } /** * @ingroup mgmt * @brief Set the value of a mgmt item. * * @par Description * This is the top level "set" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches a parameter that is known at this level, * calls the correct function to change the configuration. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, passes the request to each of the * children (domains) to have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be handled * if the function returns 0. * * @param ocs Pointer to the ocs structure. * @param name Name of the property to be changed. * @param value Requested new value of the property. * * @return Returns 0 if the configuration value was updated, or -1 otherwise. */ int ocs_mgmt_set(ocs_t *ocs, char *name, char *value) { ocs_domain_t *domain; int result = -1; char qualifier[80]; uint32_t i; snprintf(qualifier, sizeof(qualifier), "/ocs"); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can set */ for (i=0;imgmt_functions) && (ocs->mgmt_functions->set_handler)) { result = ocs->mgmt_functions->set_handler(qualifier, name, (char *)value, ocs); } if (result != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->set_handler)) { result = ocs->tgt_mgmt_functions->set_handler(qualifier, name, (char *)value, &(ocs->tgt_ocs)); } } /* If I didn't know how to set this config value pass the request to each of my children */ if (result != 0) { ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->set_handler)) { result = domain->mgmt_functions->set_handler(qualifier, name, (char*)value, domain); } if (result == 0) { break; } } ocs_device_unlock(ocs); } } return result; } /** * @ingroup mgmt * @brief Perform a management action. * * @par Description * This is the top level "exec" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches an action that is known at this level, * calls the correct function to perform the action. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, passes the request to each of the * children (domains) to have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be handled * if the function returns 0. * * @param ocs Pointer to the ocs structure. * @param action Name of the action to be performed. * @param arg_in Pointer to an argument being passed to the action. * @param arg_in_length Length of the argument pointed to by @c arg_in. * @param arg_out Pointer to an argument being passed to the action. * @param arg_out_length Length of the argument pointed to by @c arg_out. * * @return Returns 0 if the action was completed, or -1 otherwise. * * */ int ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length) { ocs_domain_t *domain; int result = -1; char qualifier[80]; uint32_t i; snprintf(qualifier, sizeof(qualifier), "/ocs"); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = action + strlen(qualifier) +1; /* See if it's an action I can perform */ for (i=0;imgmt_functions) && (ocs->mgmt_functions->exec_handler)) { result = ocs->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, ocs); } if (result != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->exec_handler)) { result = ocs->tgt_mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, &(ocs->tgt_ocs)); } } /* If I didn't know how to do this action pass the request to each of my children */ if (result != 0) { ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->exec_handler)) { result = domain->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, domain); } if (result == 0) { break; } } ocs_device_unlock(ocs); } } return result; } void ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; uint32_t i; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_all_handler)) { ocs->mgmt_functions->get_all_handler(textbuf, ocs); } if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_all_handler)) { ocs->tgt_mgmt_functions->get_all_handler(textbuf, &(ocs->tgt_ocs)); } ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_all_handler)) { domain->mgmt_functions->get_all_handler(textbuf, domain); } } ocs_device_unlock(ocs); ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); } static int32_t ocs_mgmt_firmware_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int rc = 0; int index = 0; uint8_t bus, dev, func; ocs_t *other_ocs; ocs_get_bus_dev_func(ocs, &bus, &dev, &func); ocs_log_debug(ocs, "Resetting port\n"); if (ocs_hw_reset(&ocs->hw, OCS_HW_RESET_FIRMWARE)) { ocs_log_test(ocs, "failed to reset port\n"); rc = -1; } else { ocs_log_debug(ocs, "successfully reset port\n"); /* now reset all functions on the same device */ while ((other_ocs = ocs_get_instance(index++)) != NULL) { uint8_t other_bus, other_dev, other_func; ocs_get_bus_dev_func(other_ocs, &other_bus, &other_dev, &other_func); if ((bus == other_bus) && (dev == other_dev)) { if (other_ocs->hw.state != OCS_HW_STATE_UNINITIALIZED) { other_ocs->hw.state = OCS_HW_STATE_QUEUES_ALLOCATED; } ocs_device_detach(other_ocs); if (ocs_device_attach(other_ocs)) { ocs_log_err(other_ocs, "device %d attach failed \n", index); rc = -1; } } } } return rc; } static int32_t ocs_mgmt_function_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int32_t rc; ocs_device_detach(ocs); rc = ocs_device_attach(ocs); return rc; } static int32_t ocs_mgmt_firmware_write(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int rc = 0; uint32_t bytes_left; uint32_t xfer_size; uint32_t offset; uint8_t *userp; ocs_dma_t dma; int last = 0; ocs_mgmt_fw_write_result_t result; uint32_t change_status = 0; char status_str[80]; ocs_sem_init(&(result.semaphore), 0, "fw_write"); bytes_left = buf_len; offset = 0; userp = (uint8_t *)buf; if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) { ocs_log_err(ocs, "ocs_mgmt_firmware_write: malloc failed"); return -ENOMEM; } while (bytes_left > 0) { if (bytes_left > FW_WRITE_BUFSIZE) { xfer_size = FW_WRITE_BUFSIZE; } else { xfer_size = bytes_left; } /* Copy xfer_size bytes from user space to kernel buffer */ if (ocs_copy_from_user(dma.virt, userp, xfer_size)) { rc = -EFAULT; break; } /* See if this is the last block */ if (bytes_left == xfer_size) { last = 1; } /* Send the HW command */ ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset, last, ocs_mgmt_fw_write_cb, &result); /* Wait for semaphore to be signaled when the command completes * TODO: Should there be a timeout on this? If so, how long? */ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { ocs_log_err(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } if (result.actual_xfer == 0) { ocs_log_test(ocs, "actual_write_length is %d\n", result.actual_xfer); rc = -EFAULT; break; } /* Check status */ if (result.status != 0) { ocs_log_test(ocs, "write returned status %d\n", result.status); rc = -EFAULT; break; } if (last) { change_status = result.change_status; } bytes_left -= result.actual_xfer; offset += result.actual_xfer; userp += result.actual_xfer; } /* Create string with status and copy to userland */ if ((arg_out_length > 0) && (arg_out != NULL)) { if (arg_out_length > sizeof(status_str)) { arg_out_length = sizeof(status_str); } ocs_memset(status_str, 0, sizeof(status_str)); ocs_snprintf(status_str, arg_out_length, "%d", change_status); if (ocs_copy_to_user(arg_out, status_str, arg_out_length)) { ocs_log_test(ocs, "copy to user failed for change_status\n"); } } ocs_dma_free(ocs, &dma); return rc; } static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg) { ocs_mgmt_fw_write_result_t *result = arg; result->status = status; result->actual_xfer = actual_write_length; result->change_status = change_status; ocs_sem_v(&(result->semaphore)); } typedef struct ocs_mgmt_sfp_result { ocs_sem_t semaphore; ocs_lock_t cb_lock; int32_t running; int32_t status; uint32_t bytes_read; uint32_t page_data[32]; } ocs_mgmt_sfp_result_t; static void ocs_mgmt_sfp_cb(void *os, int32_t status, uint32_t bytes_read, uint32_t *data, void *arg) { ocs_mgmt_sfp_result_t *result = arg; ocs_t *ocs = os; ocs_lock(&(result->cb_lock)); result->running++; if(result->running == 2) { /* get_sfp() has timed out */ ocs_unlock(&(result->cb_lock)); ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t)); return; } result->status = status; result->bytes_read = bytes_read; ocs_memcpy(&result->page_data, data, SFP_PAGE_SIZE); ocs_sem_v(&(result->semaphore)); ocs_unlock(&(result->cb_lock)); } static int32_t ocs_mgmt_get_sfp(ocs_t *ocs, uint16_t page, void *buf, uint32_t buf_len) { int rc = 0; ocs_mgmt_sfp_result_t *result = ocs_malloc(ocs, sizeof(ocs_mgmt_sfp_result_t), OCS_M_ZERO | OCS_M_NOWAIT); ocs_sem_init(&(result->semaphore), 0, "get_sfp"); ocs_lock_init(ocs, &(result->cb_lock), "get_sfp"); /* Send the HW command */ ocs_hw_get_sfp(&ocs->hw, page, ocs_mgmt_sfp_cb, result); /* Wait for semaphore to be signaled when the command completes */ if (ocs_sem_p(&(result->semaphore), 5 * 1000 * 1000) != 0) { /* Timed out, callback will free memory */ ocs_lock(&(result->cb_lock)); result->running++; if(result->running == 1) { ocs_log_err(ocs, "ocs_sem_p failed\n"); ocs_unlock(&(result->cb_lock)); return (-ENXIO); } /* sfp_cb() has already executed, proceed as normal */ ocs_unlock(&(result->cb_lock)); } /* Check status */ if (result->status != 0) { ocs_log_test(ocs, "read_transceiver_data returned status %d\n", result->status); rc = -EFAULT; } if (rc == 0) { rc = (result->bytes_read > buf_len ? buf_len : result->bytes_read); /* Copy the results back to the supplied buffer */ ocs_memcpy(buf, result->page_data, rc); } ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t)); return rc; } static int32_t ocs_mgmt_force_assert(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { ocs_assert(FALSE, 0); } static void get_nodes_count(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "nodes_count", "%d", xport->nodes_count); } static void get_driver_version(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "driver_version", ocs->driver_version); } static void get_desc(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "desc", ocs->desc); } static void get_fw_rev(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV)); } static void get_fw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev2", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV2)); } static void get_ipl(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "ipl", ocs_hw_get_ptr(&ocs->hw, OCS_HW_IPL)); } static void get_hw_rev1(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV1, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev1", "%u", value); } static void get_hw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV2, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev2", "%u", value); } static void get_hw_rev3(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV3, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev3", "%u", value); } static void get_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint64_t *wwnn; wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%llx", (unsigned long long)ocs_htobe64(*wwnn)); } static void get_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint64_t *wwpn; wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%llx", (unsigned long long)ocs_htobe64(*wwpn)); } static void get_fcid(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { if (ocs->domain && ocs->domain->attached) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", ocs->domain->sport->fc_id); } else { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "UNKNOWN"); } } static void get_sn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *pserial; uint32_t len; char sn_buf[256]; pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_SERIALNUMBER); if (pserial) { len = *pserial ++; strncpy(sn_buf, (char*)pserial, len); sn_buf[len] = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sn", sn_buf); } } static void get_pn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *pserial; uint32_t len; char sn_buf[256]; pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PARTNUMBER); if (pserial) { len = *pserial ++; strncpy(sn_buf, (char*)pserial, len); sn_buf[len] = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", sn_buf); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", ocs->model); } } static void get_sli4_intf_reg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "sli4_intf_reg", "0x%04x", ocs_config_read32(ocs, SLI4_INTF_REG)); } static void get_phy_port_num(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char *phy_port = NULL; phy_port = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PORTNUM); if (phy_port) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", phy_port); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", "unknown"); } } static void get_asic_id(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "asic_id_reg", "0x%04x", ocs_config_read32(ocs, SLI4_ASIC_ID_REG)); } static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t family; uint32_t asic_id; uint32_t asic_gen_num; uint32_t asic_rev_num; uint32_t rev_id; char result_buf[80]; char tmp_buf[80]; family = (ocs_config_read32(ocs, SLI4_INTF_REG) & 0x00000f00) >> 8; asic_id = ocs_config_read32(ocs, SLI4_ASIC_ID_REG); asic_rev_num = asic_id & 0xff; asic_gen_num = (asic_id & 0xff00) >> 8; rev_id = ocs_config_read32(ocs, SLI4_PCI_CLASS_REVISION) & 0xff; switch(family) { case 0x00: /* BE2 */ ocs_strncpy(result_buf, "BE2 A", sizeof(result_buf)); ocs_snprintf(tmp_buf, 2, "%d", rev_id); strcat(result_buf, tmp_buf); break; case 0x01: /* BE3 */ ocs_strncpy(result_buf, "BE3", sizeof(result_buf)); if (rev_id >= 0x10) { strcat(result_buf, "-R"); } ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x02: /* Skyhawk A0 */ ocs_strncpy(result_buf, "Skyhawk A0", sizeof(result_buf)); break; case 0x0a: /* Lancer A0 */ ocs_strncpy(result_buf, "Lancer A", sizeof(result_buf)); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x0b: /* Lancer B0 or D0 */ ocs_strncpy(result_buf, "Lancer", sizeof(result_buf)); ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x0c: ocs_strncpy(result_buf, "Lancer G6", sizeof(result_buf)); break; case 0x0f: /* Refer to ASIC_ID */ switch(asic_gen_num) { case 0x00: ocs_strncpy(result_buf, "BE2", sizeof(result_buf)); break; case 0x03: ocs_strncpy(result_buf, "BE3-R", sizeof(result_buf)); break; case 0x04: ocs_strncpy(result_buf, "Skyhawk-R", sizeof(result_buf)); break; case 0x05: ocs_strncpy(result_buf, "Corsair", sizeof(result_buf)); break; case 0x0b: ocs_strncpy(result_buf, "Lancer", sizeof(result_buf)); break; case 0x0c: ocs_strncpy(result_buf, "LancerG6", sizeof(result_buf)); break; default: ocs_strncpy(result_buf, "Unknown", sizeof(result_buf)); } if (ocs_strcmp(result_buf, "Unknown") != 0) { ocs_snprintf(tmp_buf, 3, " %c", ((asic_rev_num & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", asic_rev_num & 0x0f); strcat(result_buf, tmp_buf); } break; default: ocs_strncpy(result_buf, "Unknown", sizeof(result_buf)); } ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "chip_type", result_buf); } static void get_pci_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_vendor", "0x%04x", ocs->pci_vendor); } static void get_pci_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_device", "0x%04x", ocs->pci_device); } static void get_pci_subsystem_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_vendor", "0x%04x", ocs->pci_subsystem_vendor); } static void get_pci_subsystem_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_device", "0x%04x", ocs->pci_subsystem_device); } static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_delay", "%ld", (unsigned long)ocs->tgt_rscn_delay_msec / 1000); } static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_period", "%ld", (unsigned long)ocs->tgt_rscn_period_msec / 1000); } static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_cmd", "%d", (ocs->err_injection == INJECT_DROP_CMD ? 1:0)); } static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_free_drop_cmd", "%d", (ocs->err_injection == INJECT_FREE_DROPPED ? 1:0)); } static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_data", "%d", (ocs->err_injection == INJECT_DROP_DATA ? 1:0)); } static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_resp", "%d", (ocs->err_injection == INJECT_DROP_RESP ? 1:0)); } static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_err_inject", "0x%02x", ocs->cmd_err_inject); } static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_delay_value", "%ld", (unsigned long)ocs->delay_value_msec); } static void get_businfo(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "businfo", ocs->businfo); } static void get_sfp_a0(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *page_data; char *buf; int i; int32_t bytes_read; page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (page_data == NULL) { return; } buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT); if (buf == NULL) { ocs_free(ocs, page_data, SFP_PAGE_SIZE); return; } bytes_read = ocs_mgmt_get_sfp(ocs, 0xa0, page_data, SFP_PAGE_SIZE); if (bytes_read <= 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", "(unknown)"); } else { char *d = buf; uint8_t *s = page_data; int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; for (i = 0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; buffer_remaining -= bytes_added; } *d = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", buf); } ocs_free(ocs, page_data, SFP_PAGE_SIZE); ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1); } static void get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *page_data; char *buf; int i; int32_t bytes_read; page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (page_data == NULL) { return; } buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT); if (buf == NULL) { ocs_free(ocs, page_data, SFP_PAGE_SIZE); return; } bytes_read = ocs_mgmt_get_sfp(ocs, 0xa2, page_data, SFP_PAGE_SIZE); if (bytes_read <= 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", "(unknown)"); } else { char *d = buf; uint8_t *s = page_data; int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; for (i=0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; buffer_remaining -= bytes_added; } *d = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", buf); } ocs_free(ocs, page_data, SFP_PAGE_SIZE); ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1); } static void get_debug_mq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_mq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)); } static void get_debug_cq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_cq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_CQ_DUMP)); } static void get_debug_wq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_wq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_WQ_DUMP)); } static void get_debug_eq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_eq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_EQ_DUMP)); } static void get_logmask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "logmask", "0x%02x", ocs->logmask); } static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "loglevel", "%d", loglevel); } static void get_current_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_speed", "%d", value); } static void get_configured_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_LINK_CONFIG_SPEED, &value); if (value == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_speed", "auto"); } else { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_speed", "%d", value); } } static void get_current_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_TOPOLOGY, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_topology", "%d", value); } static void get_configured_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_CONFIG_TOPOLOGY, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_topology", "%d", value); } static void get_current_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_stats_t value; if (ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value) == 0) { if (value.value == OCS_XPORT_PORT_ONLINE) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "online"); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "offline"); } } } static void get_configured_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_stats_t value; if (ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &value) == 0) { if (value.value == OCS_XPORT_PORT_ONLINE) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "online"); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "offline"); } } } /** * @brief HW link config enum to mgmt string value mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the mgmt string that is passed in by the mgmt application * (elxsdkutil). */ typedef struct ocs_mgmt_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; const char *mgmt_str; } ocs_mgmt_linkcfg_map_t; static ocs_mgmt_linkcfg_map_t mgmt_linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, OCS_CONFIG_LINKCFG_4X10G}, {OCS_HW_LINKCFG_1X40G, OCS_CONFIG_LINKCFG_1X40G}, {OCS_HW_LINKCFG_2X16G, OCS_CONFIG_LINKCFG_2X16G}, {OCS_HW_LINKCFG_4X8G, OCS_CONFIG_LINKCFG_4X8G}, {OCS_HW_LINKCFG_4X1G, OCS_CONFIG_LINKCFG_4X1G}, {OCS_HW_LINKCFG_2X10G, OCS_CONFIG_LINKCFG_2X10G}, {OCS_HW_LINKCFG_2X10G_2X8G, OCS_CONFIG_LINKCFG_2X10G_2X8G}}; /** * @brief Get the HW linkcfg enum from the mgmt config string. * * @param mgmt_str mgmt string value. * * @return Returns the HW linkcfg enum corresponding to clp_str. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_mgmt(const char *mgmt_str) { uint32_t i; for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) { if (ocs_strncmp(mgmt_linkcfg_map[i].mgmt_str, mgmt_str, ocs_strlen(mgmt_str)) == 0) { return mgmt_linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Get the mgmt string value from the HW linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the mgmt string value corresponding to the given HW linkcfg. */ static const char * ocs_mgmt_from_hw_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) { if (mgmt_linkcfg_map[i].linkcfg == linkcfg) { return mgmt_linkcfg_map[i].mgmt_str; } } return OCS_CONFIG_LINKCFG_UNKNOWN; } /** * @brief Link configuration callback argument */ typedef struct ocs_mgmt_linkcfg_arg_s { ocs_sem_t semaphore; int32_t status; ocs_hw_linkcfg_e linkcfg; } ocs_mgmt_linkcfg_arg_t; /** * @brief Get linkcfg config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_linkcfg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { const char *linkcfg_str = NULL; uint32_t value; ocs_hw_linkcfg_e linkcfg; ocs_hw_get(&ocs->hw, OCS_HW_LINKCFG, &value); linkcfg = (ocs_hw_linkcfg_e)value; linkcfg_str = ocs_mgmt_from_hw_linkcfg(linkcfg); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "linkcfg", linkcfg_str); } /** * @brief Get requested WWNN config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_req_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwnn", "0x%llx", (unsigned long long)xport->req_wwnn); } /** * @brief Get requested WWPN config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_req_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwpn", "0x%llx", (unsigned long long)xport->req_wwpn); } /** * @brief Get requested nodedb_mask config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_nodedb_mask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "nodedb_mask", "0x%08x", ocs->nodedb_mask); } /** * @brief Set requested WWNN value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ int set_req_wwnn(ocs_t *ocs, char *name, char *value) { int rc; uint64_t wwnn; if (ocs_strcasecmp(value, "default") == 0) { wwnn = 0; } else if (parse_wwn(value, &wwnn) != 0) { ocs_log_test(ocs, "Invalid WWNN: %s\n", value); return 1; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWNN_SET, wwnn); if(rc) { ocs_log_test(ocs, "OCS_XPORT_WWNN_SET failed: %d\n", rc); return rc; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_test(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_test(ocs, "port online failed : %d\n", rc); } return rc; } /** * @brief Set requested WWNP value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ int set_req_wwpn(ocs_t *ocs, char *name, char *value) { int rc; uint64_t wwpn; if (ocs_strcasecmp(value, "default") == 0) { wwpn = 0; } else if (parse_wwn(value, &wwpn) != 0) { ocs_log_test(ocs, "Invalid WWPN: %s\n", value); return 1; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWPN_SET, wwpn); if(rc) { ocs_log_test(ocs, "OCS_XPORT_WWPN_SET failed: %d\n", rc); return rc; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_test(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_test(ocs, "port online failed : %d\n", rc); } return rc; } /** * @brief Set node debug mask value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the nodedb_mask is set. * * @return Returns 0 on success. */ static int set_nodedb_mask(ocs_t *ocs, char *name, char *value) { ocs->nodedb_mask = ocs_strtoul(value, 0, 0); return 0; } /** * @brief Set linkcfg config value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ static int set_linkcfg(ocs_t *ocs, char *name, char *value) { ocs_hw_linkcfg_e linkcfg; ocs_mgmt_linkcfg_arg_t cb_arg; ocs_hw_rtn_e status; ocs_sem_init(&cb_arg.semaphore, 0, "mgmt_linkcfg"); /* translate mgmt linkcfg string to HW linkcfg enum */ linkcfg = ocs_hw_linkcfg_from_mgmt(value); /* set HW linkcfg */ status = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SET_LINK_CONFIG, (uintptr_t)linkcfg, ocs_mgmt_linkcfg_cb, &cb_arg); if (status) { ocs_log_test(ocs, "ocs_hw_set_linkcfg failed\n"); return -1; } if (ocs_sem_p(&cb_arg.semaphore, OCS_SEM_FOREVER)) { ocs_log_err(ocs, "ocs_sem_p failed\n"); return -1; } if (cb_arg.status) { ocs_log_test(ocs, "failed to set linkcfg from HW status=%d\n", cb_arg.status); return -1; } return 0; } /** * @brief Linkcfg callback * * @param status Result of the linkcfg get/set operation. * @param value Resulting linkcfg value. * @param arg Callback argument. * * @return None. */ static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg) { ocs_mgmt_linkcfg_arg_t *cb_arg = (ocs_mgmt_linkcfg_arg_t *)arg; cb_arg->status = status; cb_arg->linkcfg = (ocs_hw_linkcfg_e)value; ocs_sem_v(&cb_arg->semaphore); } static int set_debug_mq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_MQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_MQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_cq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_CQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_CQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_wq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_WQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_WQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_eq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_EQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_EQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_logmask(ocs_t *ocs, char *name, char *value) { ocs->logmask = ocs_strtoul(value, NULL, 0); return 0; } static int set_loglevel(ocs_t *ocs, char *name, char *value) { loglevel = ocs_strtoul(value, NULL, 0); return 0; } int set_configured_speed(ocs_t *ocs, char *name, char *value) { int result = 0; ocs_hw_rtn_e hw_rc; int xport_rc; uint32_t spd; spd = ocs_strtoul(value, NULL, 0); if ((spd != 0) && (spd != 2000) && (spd != 4000) && (spd != 8000) && (spd != 16000) && (spd != 32000)) { ocs_log_test(ocs, "unsupported speed %d\n", spd); return 1; } ocs_log_debug(ocs, "Taking port offline\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Port offline failed\n"); result = 1; } else { ocs_log_debug(ocs, "Setting port to speed %d\n", spd); hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, spd); if (hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(ocs, "Speed set failed\n"); result = 1; } /* If we failed to set the speed we still want to try to bring * the port back online */ ocs_log_debug(ocs, "Bringing port online\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { result = 1; } } return result; } int set_configured_topology(ocs_t *ocs, char *name, char *value) { int result = 0; ocs_hw_rtn_e hw_rc; int xport_rc; uint32_t topo; topo = ocs_strtoul(value, NULL, 0); if (topo >= OCS_HW_TOPOLOGY_NONE) { return 1; } ocs_log_debug(ocs, "Taking port offline\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Port offline failed\n"); result = 1; } else { ocs_log_debug(ocs, "Setting port to topology %d\n", topo); hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topo); if (hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(ocs, "Topology set failed\n"); result = 1; + } else { + // Set the persistent topology before port is online + hw_rc = ocs_hw_set_persistent_topology(&ocs->hw, topo, OCS_CMD_NOWAIT); + if (hw_rc != OCS_HW_RTN_SUCCESS) + ocs_log_err(ocs, "Set persistent topology feature failed: %d\n", hw_rc); } /* If we failed to set the topology we still want to try to bring * the port back online */ ocs_log_debug(ocs, "Bringing port online\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { result = 1; } } return result; } static int set_configured_link_state(ocs_t *ocs, char *name, char *value) { int result = 0; int xport_rc; if (ocs_strcasecmp(value, "offline") == 0) { ocs_log_debug(ocs, "Setting port to %s\n", value); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Setting port to offline failed\n"); result = -1; } } else if (ocs_strcasecmp(value, "online") == 0) { ocs_log_debug(ocs, "Setting port to %s\n", value); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Setting port to online failed\n"); result = -1; } } else { ocs_log_test(ocs, "Unsupported link state \"%s\"\n", value); result = -1; } return result; } typedef struct ocs_mgmt_get_port_protocol_result { ocs_sem_t semaphore; int32_t status; ocs_hw_port_protocol_e port_protocol; } ocs_mgmt_get_port_protocol_result_t; static void ocs_mgmt_get_port_protocol_cb(int32_t status, ocs_hw_port_protocol_e port_protocol, void *arg) { ocs_mgmt_get_port_protocol_result_t *result = arg; result->status = status; result->port_protocol = port_protocol; ocs_sem_v(&(result->semaphore)); } static void get_port_protocol(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_get_port_protocol_result_t result; uint8_t bus; uint8_t dev; uint8_t func; ocs_sem_init(&(result.semaphore), 0, "get_port_protocol"); ocs_get_bus_dev_func(ocs, &bus, &dev, &func); if(ocs_hw_get_port_protocol(&ocs->hw, func, ocs_mgmt_get_port_protocol_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { switch (result.port_protocol) { case OCS_HW_PORT_PROTOCOL_ISCSI: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "iSCSI"); break; case OCS_HW_PORT_PROTOCOL_FCOE: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FCoE"); break; case OCS_HW_PORT_PROTOCOL_FC: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FC"); break; case OCS_HW_PORT_PROTOCOL_OTHER: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Other"); break; } } else { ocs_log_test(ocs, "getting port profile status 0x%x\n", result.status); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Unknown"); } } } typedef struct ocs_mgmt_set_port_protocol_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_port_protocol_result_t; static void ocs_mgmt_set_port_protocol_cb(int32_t status, void *arg) { ocs_mgmt_get_port_protocol_result_t *result = arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set port protocol * @par Description * This is a management action handler to set the current * port protocol. Input value should be one of iSCSI, * FC, or FCoE. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param value The value to be assigned * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_port_protocol(ocs_t *ocs, char *name, char *value) { ocs_mgmt_set_port_protocol_result_t result; int32_t rc = 0; ocs_hw_port_protocol_e new_protocol; uint8_t bus; uint8_t dev; uint8_t func; ocs_get_bus_dev_func(ocs, &bus, &dev, &func); ocs_sem_init(&(result.semaphore), 0, "set_port_protocol"); if (ocs_strcasecmp(value, "iscsi") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; } else if (ocs_strcasecmp(value, "fc") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_FC; } else if (ocs_strcasecmp(value, "fcoe") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_FCOE; } else { return -1; } rc = ocs_hw_set_port_protocol(&ocs->hw, new_protocol, func, ocs_mgmt_set_port_protocol_cb, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status == 0) { /* Success. */ rc = 0; } else { rc = -1; ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status); } } return rc; } typedef struct ocs_mgmt_get_profile_list_result_s { ocs_sem_t semaphore; int32_t status; ocs_hw_profile_list_t *list; } ocs_mgmt_get_profile_list_result_t; static void ocs_mgmt_get_profile_list_cb(int32_t status, ocs_hw_profile_list_t *list, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; result->list = list; ocs_sem_v(&(result->semaphore)); } /** * @brief Get list of profiles * @par Description * This is a management action handler to get the list of * profiles supported by the SLI port. Although the spec says * that all SLI platforms support this, only Skyhawk actually * has a useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_get_profile_list_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_profile_list"); if(ocs_hw_get_profile_list(&ocs->hw, ocs_mgmt_get_profile_list_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { /* Success. */ #define MAX_LINE_SIZE 520 #define BUFFER_SIZE MAX_LINE_SIZE*40 char *result_buf; char result_line[MAX_LINE_SIZE]; uint32_t bytes_left; uint32_t i; result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO); bytes_left = BUFFER_SIZE; for (i=0; inum_descriptors; i++) { sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id, result.list->descriptors[i].profile_description); if (strlen(result_line) < bytes_left) { strcat(result_buf, result_line); bytes_left -= strlen(result_line); } } ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "profile_list", result_buf); ocs_free(ocs, result_buf, BUFFER_SIZE); ocs_free(ocs, result.list, sizeof(ocs_hw_profile_list_t)); } else { ocs_log_test(ocs, "getting profile list status 0x%x\n", result.status); } } } typedef struct ocs_mgmt_get_active_profile_result { ocs_sem_t semaphore; int32_t status; uint32_t active_profile_id; } ocs_mgmt_get_active_profile_result_t; static void ocs_mgmt_get_active_profile_cb(int32_t status, uint32_t active_profile, void *ul_arg) { ocs_mgmt_get_active_profile_result_t *result = ul_arg; result->status = status; result->active_profile_id = active_profile; ocs_sem_v(&(result->semaphore)); } #define MAX_PROFILE_LENGTH 5 /** * @brief Get active profile * @par Description * This is a management action handler to get the currently * active profile for an SLI port. Although the spec says that * all SLI platforms support this, only Skyhawk actually has a * useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_active_profile(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[MAX_PROFILE_LENGTH]; ocs_mgmt_get_active_profile_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_active_profile"); if(ocs_hw_get_active_profile(&ocs->hw, ocs_mgmt_get_active_profile_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { /* Success. */ sprintf(result_string, "0x%02x", result.active_profile_id); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "active_profile", result_string); } else { ocs_log_test(ocs, "getting active profile status 0x%x\n", result.status); } } } typedef struct ocs_mgmt_set_active_profile_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_active_profile_result_t; static void ocs_mgmt_set_active_profile_cb(int32_t status, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set active profile * @par Description * This is a management action handler to set the currently * active profile for an SLI port. Although the spec says that * all SLI platforms support this, only Skyhawk actually has a * useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param value Requested new value of the property. * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_active_profile(ocs_t *ocs, char *name, char *value) { ocs_mgmt_set_active_profile_result_t result; int32_t rc = 0; int32_t new_profile; new_profile = ocs_strtoul(value, NULL, 0); ocs_sem_init(&(result.semaphore), 0, "set_active_profile"); rc = ocs_hw_set_active_profile(&ocs->hw, ocs_mgmt_set_active_profile_cb, new_profile, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status == 0) { /* Success. */ rc = 0; } else { rc = -1; ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status); } } return rc; } typedef struct ocs_mgmt_get_nvparms_result { ocs_sem_t semaphore; int32_t status; uint8_t wwpn[8]; uint8_t wwnn[8]; uint8_t hard_alpa; uint32_t preferred_d_id; } ocs_mgmt_get_nvparms_result_t; static void ocs_mgmt_get_nvparms_cb(int32_t status, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void *ul_arg) { ocs_mgmt_get_nvparms_result_t *result = ul_arg; result->status = status; ocs_memcpy(result->wwpn, wwpn, sizeof(result->wwpn)); ocs_memcpy(result->wwnn, wwnn, sizeof(result->wwnn)); result->hard_alpa = hard_alpa; result->preferred_d_id = preferred_d_id; ocs_sem_v(&(result->semaphore)); } /** * @brief Get wwpn * @par Description * * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[24]; ocs_mgmt_get_nvparms_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_nv_wwpn"); if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return; } if (result.status == 0) { /* Success. Copy wwpn from result struct to result string */ sprintf(result_string, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", result.wwpn[0], result.wwpn[1], result.wwpn[2], result.wwpn[3], result.wwpn[4], result.wwpn[5], result.wwpn[6], result.wwpn[7]); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwpn", result_string); } else { ocs_log_test(ocs, "getting wwpn status 0x%x\n", result.status); } } } /** * @brief Get wwnn * @par Description * * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[24]; ocs_mgmt_get_nvparms_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_nv_wwnn"); if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return; } if (result.status == 0) { /* Success. Copy wwnn from result struct to result string */ ocs_snprintf(result_string, sizeof(result_string), "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", result.wwnn[0], result.wwnn[1], result.wwnn[2], result.wwnn[3], result.wwnn[4], result.wwnn[5], result.wwnn[6], result.wwnn[7]); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwnn", result_string); } else { ocs_log_test(ocs, "getting wwnn status 0x%x\n", result.status); } } } /** * @brief Get accumulated node abort counts * @par Description Get the sum of all nodes abort count. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return None. */ static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t abort_counts = 0; ocs_domain_t *domain; ocs_sport_t *sport; ocs_node_t *node; if (ocs_device_lock_try(ocs) != TRUE) { /* Didn't get the lock */ return; } /* Here the Device lock is held */ ocs_list_foreach(&ocs->domain_list, domain) { if (ocs_domain_lock_try(domain) != TRUE) { /* Didn't get the lock */ ocs_device_unlock(ocs); return; } /* Here the Domain lock is held */ ocs_list_foreach(&domain->sport_list, sport) { if (ocs_sport_lock_try(sport) != TRUE) { /* Didn't get the lock */ ocs_domain_unlock(domain); ocs_device_unlock(ocs); return; } /* Here the sport lock is held */ ocs_list_foreach(&sport->node_list, node) { abort_counts += node->abort_cnt; } ocs_sport_unlock(sport); } ocs_domain_unlock(domain); } ocs_device_unlock(ocs); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "node_abort_cnt", "%d" , abort_counts); } typedef struct ocs_mgmt_set_nvparms_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_nvparms_result_t; static void ocs_mgmt_set_nvparms_cb(int32_t status, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set wwn * @par Description Sets the Non-volatile worldwide names, * if provided. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param wwn_p Requested new WWN values. * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_nv_wwn(ocs_t *ocs, char *name, char *wwn_p) { ocs_mgmt_get_nvparms_result_t result; uint8_t new_wwpn[8]; uint8_t new_wwnn[8]; char *wwpn_p = NULL; char *wwnn_p = NULL; int32_t rc = -1; int wwpn = 0; int wwnn = 0; int i; /* This is a read-modify-write operation, so first we have to read * the current values */ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn1"); rc = ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status != 0) { ocs_log_test(ocs, "getting nvparms status 0x%x\n", result.status); return -1; } } /* wwn_p contains wwpn_p@wwnn_p values */ if (wwn_p != NULL) { wwpn_p = ocs_strsep(&wwn_p, "@"); wwnn_p = wwn_p; } if (wwpn_p != NULL) { wwpn = ocs_strcmp(wwpn_p, "NA"); } if (wwnn_p != NULL) { wwnn = ocs_strcmp(wwnn_p, "NA"); } /* Parse the new WWPN */ if ((wwpn_p != NULL) && (wwpn != 0)) { if (ocs_sscanf(wwpn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &(new_wwpn[0]), &(new_wwpn[1]), &(new_wwpn[2]), &(new_wwpn[3]), &(new_wwpn[4]), &(new_wwpn[5]), &(new_wwpn[6]), &(new_wwpn[7])) != 8) { ocs_log_test(ocs, "can't parse WWPN %s\n", wwpn_p); return -1; } } /* Parse the new WWNN */ if ((wwnn_p != NULL) && (wwnn != 0 )) { if (ocs_sscanf(wwnn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &(new_wwnn[0]), &(new_wwnn[1]), &(new_wwnn[2]), &(new_wwnn[3]), &(new_wwnn[4]), &(new_wwnn[5]), &(new_wwnn[6]), &(new_wwnn[7])) != 8) { ocs_log_test(ocs, "can't parse WWNN %s\n", wwnn_p); return -1; } } for (i = 0; i < 8; i++) { /* Use active wwpn, if new one is not provided */ if (wwpn == 0) { new_wwpn[i] = result.wwpn[i]; } /* Use active wwnn, if new one is not provided */ if (wwnn == 0) { new_wwnn[i] = result.wwnn[i]; } } /* Modify the nv_wwnn and nv_wwpn, then write it back */ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn2"); rc = ocs_hw_set_nvparms(&ocs->hw, ocs_mgmt_set_nvparms_cb, new_wwpn, new_wwnn, result.hard_alpa, result.preferred_d_id, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status != 0) { ocs_log_test(ocs, "setting wwn status 0x%x\n", result.status); return -1; } } return rc; } static int set_tgt_rscn_delay(ocs_t *ocs, char *name, char *value) { ocs->tgt_rscn_delay_msec = ocs_strtoul(value, NULL, 0) * 1000; ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_tgt_rscn_period(ocs_t *ocs, char *name, char *value) { ocs->tgt_rscn_period_msec = ocs_strtoul(value, NULL, 0) * 1000; ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_cmd(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_CMD); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_free_drop_cmd(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_FREE_DROPPED); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_data(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_DATA); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_resp(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_RESP); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_cmd_err_inject(ocs_t *ocs, char *name, char *value) { ocs->cmd_err_inject = ocs_strtoul(value, NULL, 0); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_cmd_delay_value(ocs_t *ocs, char *name, char *value) { ocs->delay_value_msec = ocs_strtoul(value, NULL, 0); ocs->err_injection = (ocs->delay_value_msec == 0 ? NO_ERR_INJECT : INJECT_DELAY_CMD); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } /** * @brief parse a WWN from a string into a 64-bit value * * Given a pointer to a string, parse the string into a 64-bit * WWN value. The format of the string must be xx:xx:xx:xx:xx:xx:xx:xx * * @param wwn_in pointer to the string to be parsed * @param wwn_out pointer to uint64_t in which to put the parsed result * * @return 0 if successful, non-zero if the WWN is malformed and couldn't be parsed */ int parse_wwn(char *wwn_in, uint64_t *wwn_out) { uint8_t byte0; uint8_t byte1; uint8_t byte2; uint8_t byte3; uint8_t byte4; uint8_t byte5; uint8_t byte6; uint8_t byte7; int rc; rc = ocs_sscanf(wwn_in, "0x%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx", &byte0, &byte1, &byte2, &byte3, &byte4, &byte5, &byte6, &byte7); if (rc == 8) { *wwn_out = ((uint64_t)byte0 << 56) | ((uint64_t)byte1 << 48) | ((uint64_t)byte2 << 40) | ((uint64_t)byte3 << 32) | ((uint64_t)byte4 << 24) | ((uint64_t)byte5 << 16) | ((uint64_t)byte6 << 8) | ((uint64_t)byte7); return 0; } else { return 1; } } static char *mode_string(int mode); /** * @ingroup mgmt * @brief Generate the beginning of a numbered section in a management XML document. * * @par Description * This function begins a section. The XML information is appended to * the textbuf. This form of the function is used for sections that might have * multiple instances, such as a node or a SLI Port (sport). The index number * is appended to the name. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * @param index Index number of this instance of the section. * * @return None. */ extern void ocs_mgmt_start_section(ocs_textbuf_t *textbuf, const char *name, int index) { ocs_textbuf_printf(textbuf, "<%s instance=\"%d\">\n", name, index); } /** * @ingroup mgmt * @brief Generate the beginning of an unnumbered section in a management XML document. * * @par Description * This function begins a section. The XML information is appended to * the textbuf. This form of the function is used for sections that have * a single instance only. Therefore, no index number is needed. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * * @return None. */ extern void ocs_mgmt_start_unnumbered_section(ocs_textbuf_t *textbuf, const char *name) { ocs_textbuf_printf(textbuf, "<%s>\n", name); } /** * @ingroup mgmt * @brief Generate the end of a section in a management XML document. * * @par Description * This function ends a section. The XML information is appended to * the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * * @return None. */ void ocs_mgmt_end_unnumbered_section(ocs_textbuf_t *textbuf, const char *name) { ocs_textbuf_printf(textbuf, "\n", name); } /** * @ingroup mgmt * @brief Generate the indexed end of a section in a management XML document. * * @par Description * This function ends a section. The XML information is appended to * the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * @param index Index number of this instance of the section. * * @return None. */ void ocs_mgmt_end_section(ocs_textbuf_t *textbuf, const char *name, int index) { ocs_textbuf_printf(textbuf, "\n", name); } /** * @ingroup mgmt * @brief Generate a property, with no value, in a management XML document. * * @par Description * This function generates a property name. The XML information is appended to * the textbuf. This form of the function is used by the list functions * when the property name only (and not the current value) is given. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * * @return None. */ void ocs_mgmt_emit_property_name(ocs_textbuf_t *textbuf, int mode, const char *name) { ocs_textbuf_printf(textbuf, "<%s mode=\"%s\"/>\n", name, mode_string(mode)); } /** * @ingroup mgmt * @brief Generate a property with a string value in a management XML document. * * @par Description * This function generates a property name and a string value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param value Value of the property. * * @return None. */ void ocs_mgmt_emit_string(ocs_textbuf_t *textbuf, int mode, const char *name, const char *value) { ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), value, name); } /** * @ingroup mgmt * @brief Generate a property with an integer value in a management XML document. * * @par Description * This function generates a property name and an integer value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param fmt A printf format for formatting the integer value. * * @return none */ void ocs_mgmt_emit_int(ocs_textbuf_t *textbuf, int mode, const char *name, const char *fmt, ...) { va_list ap; char valuebuf[64]; va_start(ap, fmt); ocs_vsnprintf(valuebuf, sizeof(valuebuf), fmt, ap); va_end(ap); ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), valuebuf, name); } /** * @ingroup mgmt * @brief Generate a property with a boolean value in a management XML document. * * @par Description * This function generates a property name and a boolean value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param value Boolean value to be added to the textbuf. * * @return None. */ void ocs_mgmt_emit_boolean(ocs_textbuf_t *textbuf, int mode, const char *name, int value) { char *valuebuf = value ? "true" : "false"; ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), valuebuf, name); } static char *mode_string(int mode) { static char mode_str[4]; mode_str[0] = '\0'; if (mode & MGMT_MODE_RD) { strcat(mode_str, "r"); } if (mode & MGMT_MODE_WR) { strcat(mode_str, "w"); } if (mode & MGMT_MODE_EX) { strcat(mode_str, "x"); } return mode_str; } diff --git a/sys/dev/ocs_fc/ocs_xport.c b/sys/dev/ocs_fc/ocs_xport.c index 6b5ff6fa5b48..6170413a3d89 100644 --- a/sys/dev/ocs_fc/ocs_xport.c +++ b/sys/dev/ocs_fc/ocs_xport.c @@ -1,1098 +1,1135 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * FC transport API * */ #include "ocs.h" #include "ocs_device.h" static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg); static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg); /** * @brief Post node event callback argument. */ typedef struct { ocs_sem_t sem; ocs_node_t *node; ocs_sm_event_t evt; void *context; } ocs_xport_post_node_event_t; /** * @brief Allocate a transport object. * * @par Description * A transport object is allocated, and associated with a device instance. * * @param ocs Pointer to device instance. * * @return Returns the pointer to the allocated transport object, or NULL if failed. */ ocs_xport_t * ocs_xport_alloc(ocs_t *ocs) { ocs_xport_t *xport; ocs_assert(ocs, NULL); xport = ocs_malloc(ocs, sizeof(*xport), OCS_M_ZERO); if (xport != NULL) { xport->ocs = ocs; } return xport; } /** * @brief Create the RQ threads and the circular buffers used to pass sequences. * * @par Description * Creates the circular buffers and the servicing threads for RQ processing. * * @param xport Pointer to transport object * * @return Returns 0 on success, or a non-zero value on failure. */ static void ocs_xport_rq_threads_teardown(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; uint32_t i; if (xport->num_rq_threads == 0 || xport->rq_thread_info == NULL) { return; } /* Abort any threads */ for (i = 0; i < xport->num_rq_threads; i++) { if (xport->rq_thread_info[i].thread_started) { ocs_thread_terminate(&xport->rq_thread_info[i].thread); /* wait for the thread to exit */ ocs_log_debug(ocs, "wait for thread %d to exit\n", i); while (xport->rq_thread_info[i].thread_started) { ocs_udelay(10000); } ocs_log_debug(ocs, "thread %d to exited\n", i); } if (xport->rq_thread_info[i].seq_cbuf != NULL) { ocs_cbuf_free(xport->rq_thread_info[i].seq_cbuf); xport->rq_thread_info[i].seq_cbuf = NULL; } } } /** * @brief Create the RQ threads and the circular buffers used to pass sequences. * * @par Description * Creates the circular buffers and the servicing threads for RQ processing. * * @param xport Pointer to transport object. * @param num_rq_threads Number of RQ processing threads that the * driver creates. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_xport_rq_threads_create(ocs_xport_t *xport, uint32_t num_rq_threads) { ocs_t *ocs = xport->ocs; int32_t rc = 0; uint32_t i; xport->num_rq_threads = num_rq_threads; ocs_log_debug(ocs, "number of RQ threads %d\n", num_rq_threads); if (num_rq_threads == 0) { return 0; } /* Allocate the space for the thread objects */ xport->rq_thread_info = ocs_malloc(ocs, sizeof(ocs_xport_rq_thread_info_t) * num_rq_threads, OCS_M_ZERO); if (xport->rq_thread_info == NULL) { ocs_log_err(ocs, "memory allocation failure\n"); return -1; } /* Create the circular buffers and threads. */ for (i = 0; i < num_rq_threads; i++) { xport->rq_thread_info[i].ocs = ocs; xport->rq_thread_info[i].seq_cbuf = ocs_cbuf_alloc(ocs, OCS_HW_RQ_NUM_HDR); if (xport->rq_thread_info[i].seq_cbuf == NULL) { goto ocs_xport_rq_threads_create_error; } ocs_snprintf(xport->rq_thread_info[i].thread_name, sizeof(xport->rq_thread_info[i].thread_name), "ocs_unsol_rq:%d:%d", ocs->instance_index, i); rc = ocs_thread_create(ocs, &xport->rq_thread_info[i].thread, ocs_unsol_rq_thread, xport->rq_thread_info[i].thread_name, &xport->rq_thread_info[i], OCS_THREAD_RUN); if (rc) { ocs_log_err(ocs, "ocs_thread_create failed: %d\n", rc); goto ocs_xport_rq_threads_create_error; } xport->rq_thread_info[i].thread_started = TRUE; } return 0; ocs_xport_rq_threads_create_error: ocs_xport_rq_threads_teardown(xport); return -1; } /** * @brief Do as much allocation as possible, but do not initialization the device. * * @par Description * Performs the functions required to get a device ready to run. * * @param xport Pointer to transport object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_xport_attach(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; int32_t rc; uint32_t max_sgl; uint32_t n_sgl; uint32_t i; uint32_t value; uint32_t max_remote_nodes; /* booleans used for cleanup if initialization fails */ uint8_t io_pool_created = FALSE; uint8_t node_pool_created = FALSE; uint8_t rq_threads_created = FALSE; ocs_list_init(&ocs->domain_list, ocs_domain_t, link); for (i = 0; i < SLI4_MAX_FCFI; i++) { xport->fcfi[i].hold_frames = 1; ocs_lock_init(ocs, &xport->fcfi[i].pend_frames_lock, "xport pend_frames[%d]", i); ocs_list_init(&xport->fcfi[i].pend_frames, ocs_hw_sequence_t, link); } rc = ocs_hw_set_ptr(&ocs->hw, OCS_HW_WAR_VERSION, ocs->hw_war_version); if (rc) { ocs_log_test(ocs, "can't set OCS_HW_WAR_VERSION\n"); return -1; } rc = ocs_hw_setup(&ocs->hw, ocs, SLI4_PORT_TYPE_FC); if (rc) { ocs_log_err(ocs, "%s: Can't setup hardware\n", ocs->desc); return -1; } else if (ocs->ctrlmask & OCS_CTRLMASK_CRASH_RESET) { ocs_log_debug(ocs, "stopping after ocs_hw_setup\n"); return -1; } ocs_hw_set(&ocs->hw, OCS_HW_BOUNCE, ocs->hw_bounce); ocs_log_debug(ocs, "HW bounce: %d\n", ocs->hw_bounce); ocs_hw_set(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, ocs->rq_selection_policy); ocs_hw_set(&ocs->hw, OCS_HW_RR_QUANTA, ocs->rr_quanta); ocs_hw_get(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, &value); ocs_log_debug(ocs, "RQ Selection Policy: %d\n", value); ocs_hw_set_ptr(&ocs->hw, OCS_HW_FILTER_DEF, (void*) ocs->filter_def); ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); max_sgl -= SLI4_SGE_MAX_RESERVED; n_sgl = MIN(OCS_FC_MAX_SGL, max_sgl); /* EVT: For chained SGL testing */ if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) { n_sgl = 4; } /* Note: number of SGLs must be set for ocs_node_create_pool */ if (ocs_hw_set(&ocs->hw, OCS_HW_N_SGL, n_sgl) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set number of SGLs\n", ocs->desc); return -1; } else { ocs_log_debug(ocs, "%s: Configured for %d SGLs\n", ocs->desc, n_sgl); } ocs_hw_get(&ocs->hw, OCS_HW_MAX_NODES, &max_remote_nodes); if (!ocs->max_remote_nodes) ocs->max_remote_nodes = max_remote_nodes; rc = ocs_node_create_pool(ocs, ocs->max_remote_nodes); if (rc) { ocs_log_err(ocs, "Can't allocate node pool\n"); goto ocs_xport_attach_cleanup; } else { node_pool_created = TRUE; } /* EVT: if testing chained SGLs allocate OCS_FC_MAX_SGL SGE's in the IO */ xport->io_pool = ocs_io_pool_create(ocs, ocs->num_scsi_ios, (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) ? OCS_FC_MAX_SGL : n_sgl); if (xport->io_pool == NULL) { ocs_log_err(ocs, "Can't allocate IO pool\n"); goto ocs_xport_attach_cleanup; } else { io_pool_created = TRUE; } /* * setup the RQ processing threads */ if (ocs_xport_rq_threads_create(xport, ocs->rq_threads) != 0) { ocs_log_err(ocs, "failure creating RQ threads\n"); goto ocs_xport_attach_cleanup; } rq_threads_created = TRUE; return 0; ocs_xport_attach_cleanup: if (io_pool_created) { ocs_io_pool_free(xport->io_pool); } if (node_pool_created) { ocs_node_free_pool(ocs); } return -1; } /** * @brief Determines how to setup auto Xfer ready. * * @par Description * @param xport Pointer to transport object. * * @return Returns 0 on success or a non-zero value on failure. */ static int32_t ocs_xport_initialize_auto_xfer_ready(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; uint32_t auto_xfer_rdy; char prop_buf[32]; uint32_t ramdisc_blocksize = 512; uint8_t p_type = 0; ocs_hw_get(&ocs->hw, OCS_HW_AUTO_XFER_RDY_CAPABLE, &auto_xfer_rdy); if (!auto_xfer_rdy) { ocs->auto_xfer_rdy_size = 0; ocs_log_test(ocs, "Cannot enable auto xfer rdy for this port\n"); return 0; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_SIZE, ocs->auto_xfer_rdy_size)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } /* * Determine if we are doing protection in the backend. We are looking * at the modules parameters here. The backend cannot allow a format * command to change the protection mode when using this feature, * otherwise the firmware will not do the proper thing. */ if (ocs_get_property("p_type", prop_buf, sizeof(prop_buf)) == 0) { p_type = ocs_strtoul(prop_buf, 0, 0); } if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); } if (ocs_get_property("external_dif", prop_buf, sizeof(prop_buf)) == 0) { if(ocs_strlen(prop_buf)) { if (p_type == 0) { p_type = 1; } } } if (p_type != 0) { if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, TRUE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, ramdisc_blocksize)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy blk size\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_P_TYPE, p_type)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, TRUE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy ref tag\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, FALSE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy app tag valid\n", ocs->desc); return -1; } } ocs_log_debug(ocs, "Auto xfer rdy is enabled, p_type=%d, blksize=%d\n", p_type, ramdisc_blocksize); return 0; } +/** + * @brief Initialize link topology config + * + * @par Description + * Topology can be fetched from mod-param or Persistet Topology(PT). + * a. Mod-param value is used when the value is 1(P2P) or 2(LOOP). + * a. PT is used if mod-param is not provided( i.e, default value of AUTO) + * Also, if mod-param is used, update PT. + * + * @param ocs Pointer to ocs + * + * @return Returns 0 on success, or a non-zero value on failure. + */ +static int +ocs_topology_setup(ocs_t *ocs) +{ + uint32_t topology; + + if (ocs->topology == OCS_HW_TOPOLOGY_AUTO) { + topology = ocs_hw_get_config_persistent_topology(&ocs->hw); + } else { + topology = ocs->topology; + /* ignore failure here. link will come-up either in auto mode + * if PT is not supported or last saved PT value */ + ocs_hw_set_persistent_topology(&ocs->hw, topology, OCS_CMD_POLL); + } + + return ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topology); +} + /** * @brief Initializes the device. * * @par Description * Performs the functions required to make a device functional. * * @param xport Pointer to transport object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_xport_initialize(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; int32_t rc; uint32_t i; uint32_t max_hw_io; uint32_t max_sgl; uint32_t hlm; uint32_t rq_limit; uint32_t dif_capable; uint8_t dif_separate = 0; char prop_buf[32]; /* booleans used for cleanup if initialization fails */ uint8_t ini_device_set = FALSE; uint8_t tgt_device_set = FALSE; uint8_t hw_initialized = FALSE; ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); if (ocs_hw_set(&ocs->hw, OCS_HW_N_IO, max_hw_io) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set number of IOs\n", ocs->desc); return -1; } ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); max_sgl -= SLI4_SGE_MAX_RESERVED; if (ocs->enable_hlm) { ocs_hw_get(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, &hlm); if (!hlm) { ocs->enable_hlm = FALSE; ocs_log_err(ocs, "Cannot enable high login mode for this port\n"); } else { ocs_log_debug(ocs, "High login mode is enabled\n"); if (ocs_hw_set(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, TRUE)) { ocs_log_err(ocs, "%s: Can't set high login mode\n", ocs->desc); return -1; } } } /* validate the auto xfer_rdy size */ if (ocs->auto_xfer_rdy_size > 0 && (ocs->auto_xfer_rdy_size < 2048 || ocs->auto_xfer_rdy_size > 65536)) { ocs_log_err(ocs, "Auto XFER_RDY size is out of range (2K-64K)\n"); return -1; } ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); if (ocs->auto_xfer_rdy_size > 0) { if (ocs_xport_initialize_auto_xfer_ready(xport)) { ocs_log_err(ocs, "%s: Failed auto xfer ready setup\n", ocs->desc); return -1; } if (ocs->esoc){ ocs_hw_set(&ocs->hw, OCS_ESOC, TRUE); } } if (ocs->explicit_buffer_list) { /* Are pre-registered SGL's required? */ ocs_hw_get(&ocs->hw, OCS_HW_PREREGISTER_SGL, &i); if (i == TRUE) { ocs_log_err(ocs, "Explicit Buffer List not supported on this device, not enabled\n"); } else { ocs_hw_set(&ocs->hw, OCS_HW_PREREGISTER_SGL, FALSE); } } + /* Setup persistent topology based on topology mod-param value */ + rc = ocs_topology_setup(ocs); + if (rc) { + ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc); + return -1; + } + if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc); return -1; } ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT); if (ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, ocs->speed) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the link speed\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_ETH_LICENSE, ocs->ethernet_license) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the ethernet license\n", ocs->desc); return -1; } /* currently only lancer support setting the CRC seed value */ if (ocs->hw.sli.asic_type == SLI4_ASIC_TYPE_LANCER) { if (ocs_hw_set(&ocs->hw, OCS_HW_DIF_SEED, OCS_FC_DIF_SEED) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the DIF seed\n", ocs->desc); return -1; } } /* Set the Dif mode */ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &dif_capable)) { if (dif_capable) { if (ocs_get_property("dif_separate", prop_buf, sizeof(prop_buf)) == 0) { dif_separate = ocs_strtoul(prop_buf, 0, 0); } if ((rc = ocs_hw_set(&ocs->hw, OCS_HW_DIF_MODE, (dif_separate == 0 ? OCS_HW_DIF_MODE_INLINE : OCS_HW_DIF_MODE_SEPARATE)))) { ocs_log_err(ocs, "Requested DIF MODE not supported\n"); } } } if (ocs->target_io_timer_sec) { ocs_log_debug(ocs, "setting target io timer=%d\n", ocs->target_io_timer_sec); ocs_hw_set(&ocs->hw, OCS_HW_EMULATE_TARGET_WQE_TIMEOUT, TRUE); } ocs_hw_callback(&ocs->hw, OCS_HW_CB_DOMAIN, ocs_domain_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_REMOTE_NODE, ocs_remote_node_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_UNSOLICITED, ocs_unsolicited_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_PORT, ocs_port_cb, ocs); ocs->fw_version = (const char*) ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV); /* Initialize vport list */ ocs_list_init(&xport->vport_list, ocs_vport_spec_t, link); ocs_lock_init(ocs, &xport->io_pending_lock, "io_pending_lock[%d]", ocs->instance_index); ocs_list_init(&xport->io_pending_list, ocs_io_t, io_pending_link); ocs_atomic_init(&xport->io_active_count, 0); ocs_atomic_init(&xport->io_pending_count, 0); ocs_atomic_init(&xport->io_total_free, 0); ocs_atomic_init(&xport->io_total_pending, 0); ocs_atomic_init(&xport->io_alloc_failed_count, 0); ocs_atomic_init(&xport->io_pending_recursing, 0); ocs_lock_init(ocs, &ocs->hw.watchdog_lock, " Watchdog Lock[%d]", ocs_instance(ocs)); rc = ocs_hw_init(&ocs->hw); if (rc) { ocs_log_err(ocs, "ocs_hw_init failure\n"); goto ocs_xport_init_cleanup; } else { hw_initialized = TRUE; } rq_limit = max_hw_io/2; if (ocs_hw_set(&ocs->hw, OCS_HW_RQ_PROCESS_LIMIT, rq_limit) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the RQ process limit\n", ocs->desc); } if (ocs->config_tgt) { rc = ocs_scsi_tgt_new_device(ocs); if (rc) { ocs_log_err(ocs, "failed to initialize target\n"); goto ocs_xport_init_cleanup; } else { tgt_device_set = TRUE; } } if (ocs->enable_ini) { rc = ocs_scsi_ini_new_device(ocs); if (rc) { ocs_log_err(ocs, "failed to initialize initiator\n"); goto ocs_xport_init_cleanup; } else { ini_device_set = TRUE; } } /* Add vports */ if (ocs->num_vports != 0) { uint32_t max_vports; ocs_hw_get(&ocs->hw, OCS_HW_MAX_VPORTS, &max_vports); if (ocs->num_vports < max_vports) { ocs_log_debug(ocs, "Provisioning %d vports\n", ocs->num_vports); for (i = 0; i < ocs->num_vports; i++) { ocs_vport_create_spec(ocs, 0, 0, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt, NULL, NULL); } } else { ocs_log_err(ocs, "failed to create vports. num_vports range should be (1-%d) \n", max_vports-1); goto ocs_xport_init_cleanup; } } return 0; ocs_xport_init_cleanup: if (ini_device_set) { ocs_scsi_ini_del_device(ocs); } if (tgt_device_set) { ocs_scsi_tgt_del_device(ocs); } if (hw_initialized) { /* ocs_hw_teardown can only execute after ocs_hw_init */ ocs_hw_teardown(&ocs->hw); } return -1; } /** * @brief Detaches the transport from the device. * * @par Description * Performs the functions required to shut down a device. * * @param xport Pointer to transport object. * * @return Returns 0 on success or a non-zero value on failure. */ int32_t ocs_xport_detach(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; /* free resources associated with target-server and initiator-client */ if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); if (ocs->enable_ini) { ocs_scsi_ini_del_device(ocs); /*Shutdown FC Statistics timer*/ if (ocs_timer_pending(&ocs->xport->stats_timer)) ocs_del_timer(&ocs->xport->stats_timer); } ocs_hw_teardown(&ocs->hw); return 0; } /** * @brief domain list empty callback * * @par Description * Function is invoked when the device domain list goes empty. By convention * @c arg points to an ocs_sem_t instance, that is incremented. * * @param ocs Pointer to device object. * @param arg Pointer to semaphore instance. * * @return None. */ static void ocs_xport_domain_list_empty_cb(ocs_t *ocs, void *arg) { ocs_sem_t *sem = arg; ocs_assert(ocs); ocs_assert(sem); ocs_sem_v(sem); } /** * @brief post node event callback * * @par Description * This function is called from the mailbox completion interrupt context to post an * event to a node object. By doing this in the interrupt context, it has * the benefit of only posting events in the interrupt context, deferring the need to * create a per event node lock. * * @param hw Pointer to HW structure. * @param status Completion status for mailbox command. * @param mqe Mailbox queue completion entry. * @param arg Callback argument. * * @return Returns 0 on success, a negative error code value on failure. */ static int32_t ocs_xport_post_node_event_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_xport_post_node_event_t *payload = arg; if (payload != NULL) { ocs_node_post_event(payload->node, payload->evt, payload->context); ocs_sem_v(&payload->sem); } return 0; } /** * @brief Initiate force free. * * @par Description * Perform force free of OCS. * * @param xport Pointer to transport object. * * @return None. */ static void ocs_xport_force_free(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; ocs_domain_t *domain; ocs_domain_t *next; ocs_log_debug(ocs, "reset required, do force shutdown\n"); ocs_device_lock(ocs); ocs_list_foreach_safe(&ocs->domain_list, domain, next) { ocs_domain_force_free(domain); } ocs_device_unlock(ocs); } /** * @brief Perform transport attach function. * * @par Description * Perform the attach function, which for the FC transport makes a HW call * to bring up the link. * * @param xport pointer to transport object. * @param cmd command to execute. * * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_ONLINE) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_OFFLINE) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_SHUTDOWN) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_POST_NODE_EVENT, ocs_node_t *node, ocs_sm_event_t, void *context) * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_xport_control(ocs_xport_t *xport, ocs_xport_ctrl_e cmd, ...) { uint32_t rc = 0; ocs_t *ocs = NULL; va_list argp; ocs_assert(xport, -1); ocs_assert(xport->ocs, -1); ocs = xport->ocs; switch (cmd) { case OCS_XPORT_PORT_ONLINE: { /* Bring the port on-line */ rc = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_INIT, 0, NULL, NULL); if (rc) { ocs_log_err(ocs, "%s: Can't init port\n", ocs->desc); } else { xport->configured_link_state = cmd; } break; } case OCS_XPORT_PORT_OFFLINE: { if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { ocs_log_err(ocs, "port shutdown failed\n"); } else { xport->configured_link_state = cmd; } break; } case OCS_XPORT_SHUTDOWN: { ocs_sem_t sem; uint32_t reset_required; /* if a PHYSDEV reset was performed (e.g. hw dump), will affect * all PCI functions; orderly shutdown won't work, just force free */ /* TODO: need to poll this regularly... */ if (ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required) != OCS_HW_RTN_SUCCESS) { reset_required = 0; } if (reset_required) { ocs_log_debug(ocs, "reset required, do force shutdown\n"); ocs_xport_force_free(xport); break; } ocs_sem_init(&sem, 0, "domain_list_sem"); ocs_register_domain_list_empty_cb(ocs, ocs_xport_domain_list_empty_cb, &sem); if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { ocs_log_debug(ocs, "port shutdown failed, do force shutdown\n"); ocs_xport_force_free(xport); } else { ocs_log_debug(ocs, "Waiting %d seconds for domain shutdown.\n", (OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC/1000000)); rc = ocs_sem_p(&sem, OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC); if (rc) { ocs_log_debug(ocs, "Note: Domain shutdown timed out\n"); ocs_xport_force_free(xport); } } ocs_register_domain_list_empty_cb(ocs, NULL, NULL); /* Free up any saved virtual ports */ ocs_vport_del_all(ocs); break; } /* * POST_NODE_EVENT: post an event to a node object * * This transport function is used to post an event to a node object. It does * this by submitting a NOP mailbox command to defer execution to the * interrupt context (thereby enforcing the serialized execution of event posting * to the node state machine instances) * * A counting semaphore is used to make the call synchronous (we wait until * the callback increments the semaphore before returning (or times out) */ case OCS_XPORT_POST_NODE_EVENT: { ocs_node_t *node; ocs_sm_event_t evt; void *context; ocs_xport_post_node_event_t payload; ocs_t *ocs; ocs_hw_t *hw; /* Retrieve arguments */ va_start(argp, cmd); node = va_arg(argp, ocs_node_t*); evt = va_arg(argp, ocs_sm_event_t); context = va_arg(argp, void *); va_end(argp); ocs_assert(node, -1); ocs_assert(node->ocs, -1); ocs = node->ocs; hw = &ocs->hw; /* if node's state machine is disabled, don't bother continuing */ if (!node->sm.current_state) { ocs_log_test(ocs, "node %p state machine disabled\n", node); return -1; } /* Setup payload */ ocs_memset(&payload, 0, sizeof(payload)); ocs_sem_init(&payload.sem, 0, "xport_post_node_Event"); payload.node = node; payload.evt = evt; payload.context = context; if (ocs_hw_async_call(hw, ocs_xport_post_node_event_cb, &payload)) { ocs_log_test(ocs, "ocs_hw_async_call failed\n"); rc = -1; break; } /* Wait for completion */ if (ocs_sem_p(&payload.sem, OCS_SEM_FOREVER)) { ocs_log_test(ocs, "POST_NODE_EVENT: sem wait failed\n"); rc = -1; } break; } /* * Set wwnn for the port. This will be used instead of the default provided by FW. */ case OCS_XPORT_WWNN_SET: { uint64_t wwnn; /* Retrieve arguments */ va_start(argp, cmd); wwnn = va_arg(argp, uint64_t); va_end(argp); ocs_log_debug(ocs, " WWNN %016" PRIx64 "\n", wwnn); xport->req_wwnn = wwnn; break; } /* * Set wwpn for the port. This will be used instead of the default provided by FW. */ case OCS_XPORT_WWPN_SET: { uint64_t wwpn; /* Retrieve arguments */ va_start(argp, cmd); wwpn = va_arg(argp, uint64_t); va_end(argp); ocs_log_debug(ocs, " WWPN %016" PRIx64 "\n", wwpn); xport->req_wwpn = wwpn; break; } default: break; } return rc; } /** * @brief Return status on a link. * * @par Description * Returns status information about a link. * * @param xport Pointer to transport object. * @param cmd Command to execute. * @param result Pointer to result value. * * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_PORT_STATUS) * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_SPEED, ocs_xport_stats_t *result) * return link speed in MB/sec * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_IS_SUPPORTED_LINK_SPEED, ocs_xport_stats_t *result) * [in] *result is speed to check in MB/s * returns 1 if supported, 0 if not * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STATISTICS, ocs_xport_stats_t *result) * return link/host port stats * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STAT_RESET, ocs_xport_stats_t *result) * resets link/host stats * * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_xport_status(ocs_xport_t *xport, ocs_xport_status_e cmd, ocs_xport_stats_t *result) { uint32_t rc = 0; ocs_t *ocs = NULL; ocs_xport_stats_t value; ocs_hw_rtn_e hw_rc; ocs_assert(xport, -1); ocs_assert(xport->ocs, -1); ocs = xport->ocs; switch (cmd) { case OCS_XPORT_CONFIG_PORT_STATUS: ocs_assert(result, -1); if (xport->configured_link_state == 0) { /* Initial state is offline. configured_link_state is */ /* set to online explicitly when port is brought online. */ xport->configured_link_state = OCS_XPORT_PORT_OFFLINE; } result->value = xport->configured_link_state; break; case OCS_XPORT_PORT_STATUS: ocs_assert(result, -1); /* Determine port status based on link speed. */ hw_rc = ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value.value); if (hw_rc == OCS_HW_RTN_SUCCESS) { if (value.value == 0) { result->value = 0; } else { result->value = 1; } rc = 0; } else { rc = -1; } break; case OCS_XPORT_LINK_SPEED: { uint32_t speed; ocs_assert(result, -1); result->value = 0; rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &speed); if (rc == 0) { result->value = speed; } break; } case OCS_XPORT_IS_SUPPORTED_LINK_SPEED: { uint32_t speed; uint32_t link_module_type; ocs_assert(result, -1); speed = result->value; rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_MODULE_TYPE, &link_module_type); if (rc == 0) { switch(speed) { case 1000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_1GB) != 0; break; case 2000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_2GB) != 0; break; case 4000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_4GB) != 0; break; case 8000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_8GB) != 0; break; case 10000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_10GB) != 0; break; case 16000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_16GB) != 0; break; case 32000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_32GB) != 0; break; default: rc = 0; break; } } else { rc = 0; } break; } case OCS_XPORT_LINK_STATISTICS: ocs_device_lock(ocs); ocs_memcpy((void *)result, &ocs->xport->fc_xport_stats, sizeof(ocs_xport_stats_t)); ocs_device_unlock(ocs); break; case OCS_XPORT_LINK_STAT_RESET: { /* Create a semaphore to synchronize the stat reset process. */ ocs_sem_init(&(result->stats.semaphore), 0, "fc_stats_reset"); /* First reset the link stats */ if ((rc = ocs_hw_get_link_stats(&ocs->hw, 0, 1, 1, ocs_xport_link_stats_cb, result)) != 0) { ocs_log_err(ocs, "%s: Failed to reset link statistics\n", __func__); break; } /* Wait for semaphore to be signaled when the command completes */ /* TODO: Should there be a timeout on this? If so, how long? */ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_test(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } /* Next reset the host stats */ if ((rc = ocs_hw_get_host_stats(&ocs->hw, 1, ocs_xport_host_stats_cb, result)) != 0) { ocs_log_err(ocs, "%s: Failed to reset host statistics\n", __func__); break; } /* Wait for semaphore to be signaled when the command completes */ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_test(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } break; } case OCS_XPORT_IS_QUIESCED: ocs_device_lock(ocs); result->value = ocs_list_empty(&ocs->domain_list); ocs_device_unlock(ocs); break; default: rc = -1; break; } return rc; } static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg) { ocs_xport_stats_t *result = arg; result->stats.link_stats.link_failure_error_count = counters[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter; result->stats.link_stats.loss_of_sync_error_count = counters[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter; result->stats.link_stats.primitive_sequence_error_count = counters[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter; result->stats.link_stats.invalid_transmission_word_error_count = counters[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter; result->stats.link_stats.crc_error_count = counters[OCS_HW_LINK_STAT_CRC_COUNT].counter; ocs_sem_v(&(result->stats.semaphore)); } static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg) { ocs_xport_stats_t *result = arg; result->stats.host_stats.transmit_kbyte_count = counters[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter; result->stats.host_stats.receive_kbyte_count = counters[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter; result->stats.host_stats.transmit_frame_count = counters[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter; result->stats.host_stats.receive_frame_count = counters[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter; ocs_sem_v(&(result->stats.semaphore)); } /** * @brief Free a transport object. * * @par Description * The transport object is freed. * * @param xport Pointer to transport object. * * @return None. */ void ocs_xport_free(ocs_xport_t *xport) { ocs_t *ocs; uint32_t i; if (xport) { ocs = xport->ocs; ocs_io_pool_free(xport->io_pool); ocs_node_free_pool(ocs); if(mtx_initialized(&xport->io_pending_lock.lock)) ocs_lock_free(&xport->io_pending_lock); for (i = 0; i < SLI4_MAX_FCFI; i++) { ocs_lock_free(&xport->fcfi[i].pend_frames_lock); } ocs_xport_rq_threads_teardown(xport); ocs_free(ocs, xport, sizeof(*xport)); } } diff --git a/sys/dev/ocs_fc/sli4.c b/sys/dev/ocs_fc/sli4.c index b40c4cfa2946..e4ce20c9beac 100644 --- a/sys/dev/ocs_fc/sli4.c +++ b/sys/dev/ocs_fc/sli4.c @@ -1,8735 +1,8789 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @defgroup sli SLI-4 Base APIs */ /** * @file * All common (i.e. transport-independent) SLI-4 functions are implemented * in this file. */ #include "sli4.h" #if defined(OCS_INCLUDE_DEBUG) #include "ocs_utils.h" #endif #define SLI4_BMBX_DELAY_US 1000 /* 1 ms */ #define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */ static int32_t sli_fw_init(sli4_t *); static int32_t sli_fw_term(sli4_t *); static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian); static int32_t sli_cmd_fw_deinitialize(sli4_t *, void *, size_t); static int32_t sli_cmd_fw_initialize(sli4_t *, void *, size_t); static int32_t sli_queue_doorbell(sli4_t *, sli4_queue_t *); static uint8_t sli_queue_entry_is_valid(sli4_queue_t *, uint8_t *, uint8_t); const uint8_t sli4_fw_initialize[] = { 0xff, 0x12, 0x34, 0xff, 0xff, 0x56, 0x78, 0xff, }; const uint8_t sli4_fw_deinitialize[] = { 0xff, 0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd, 0xff, }; typedef struct { uint32_t rev_id; uint32_t family; /* generation */ sli4_asic_type_e type; sli4_asic_rev_e rev; } sli4_asic_entry_t; sli4_asic_entry_t sli4_asic_table[] = { { 0x00, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A0}, { 0x01, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A1}, { 0x02, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A2}, { 0x00, 4, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0}, { 0x00, 2, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0}, { 0x10, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_B0}, { 0x10, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B0}, { 0x11, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B1}, { 0x0, 0x0a, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0}, { 0x10, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_B0}, { 0x30, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_D0}, { 0x3, 0x0b, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3}, { 0x0, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A0}, { 0x1, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A1}, { 0x3, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3}, { 0x1, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_A1}, { 0x10, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_B0}, { 0x00, 0x05, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_REV_A0}, }; /* * @brief Convert queue type enum (SLI_QTYPE_*) into a string. */ const char *SLI_QNAME[] = { "Event Queue", "Completion Queue", "Mailbox Queue", "Work Queue", "Receive Queue", "Undefined" }; /** * @brief Define the mapping of registers to their BAR and offset. * * @par Description * Although SLI-4 specification defines a common set of registers, their locations * (both BAR and offset) depend on the interface type. This array maps a register * enum to an array of BAR/offset pairs indexed by the interface type. For * example, to access the bootstrap mailbox register on an interface type 0 * device, code can refer to the offset using regmap[SLI4_REG_BMBX][0].offset. * * @b Note: A value of UINT32_MAX for either the register set (rset) or offset (off) * indicates an invalid mapping. */ const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES] = { /* SLI4_REG_BMBX */ { { 2, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } , { 0, SLI4_BMBX_REG }, }, /* SLI4_REG_EQCQ_DOORBELL */ { { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 1, SLI4_IF6_EQ_DOORBELL_REG } }, // SLI4_REG_CQ_DOORBELL { { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 1, SLI4_IF6_CQ_DOORBELL_REG } }, /* SLI4_REG_FCOE_RQ_DOORBELL */ { { 2, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 1, SLI4_IF6_RQ_DOORBELL_REG } }, /* SLI4_REG_IO_WQ_DOORBELL */ { { 2, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 1, SLI4_IF6_WQ_DOORBELL_REG } }, /* SLI4_REG_MQ_DOORBELL */ { { 2, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 1, SLI4_IF6_MQ_DOORBELL_REG } }, /* SLI4_REG_PHYSDEV_CONTROL */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PHSDEV_CONTROL_REG_236 }, { 0, SLI4_PHSDEV_CONTROL_REG_236 }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PHSDEV_CONTROL_REG_236 } }, /* SLI4_REG_SLIPORT_CONTROL */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_CONTROL_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_CONTROL_REG }, }, /* SLI4_REG_SLIPORT_ERROR1 */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR1 }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR1 }, }, /* SLI4_REG_SLIPORT_ERROR2 */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR2 }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR2 }, }, /* SLI4_REG_SLIPORT_SEMAPHORE */ { { 1, SLI4_PORT_SEMAPHORE_REG_0 }, { 0, SLI4_PORT_SEMAPHORE_REG_1 }, { 0, SLI4_PORT_SEMAPHORE_REG_236 }, { 0, SLI4_PORT_SEMAPHORE_REG_236 }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PORT_SEMAPHORE_REG_236 }, }, /* SLI4_REG_SLIPORT_STATUS */ { { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PORT_STATUS_REG_236 }, { 0, SLI4_PORT_STATUS_REG_236 }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PORT_STATUS_REG_236 }, }, /* SLI4_REG_UERR_MASK_HI */ { { 0, SLI4_UERR_MASK_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } }, /* SLI4_REG_UERR_MASK_LO */ { { 0, SLI4_UERR_MASK_LOW_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } }, /* SLI4_REG_UERR_STATUS_HI */ { { 0, SLI4_UERR_STATUS_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } }, /* SLI4_REG_UERR_STATUS_LO */ { { 0, SLI4_UERR_STATUS_LOW_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } }, /* SLI4_REG_SW_UE_CSR1 */ { { 1, SLI4_SW_UE_CSR1}, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } }, /* SLI4_REG_SW_UE_CSR2 */ { { 1, SLI4_SW_UE_CSR2}, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } }, }; /** * @brief Read the given SLI register. * * @param sli Pointer to the SLI context. * @param reg Register name enum. * * @return Returns the register value. */ uint32_t sli_reg_read(sli4_t *sli, sli4_regname_e reg) { const sli4_reg_t *r = &(regmap[reg][sli->if_type]); if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) { ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type); return UINT32_MAX; } return ocs_reg_read32(sli->os, r->rset, r->off); } /** * @brief Write the value to the given SLI register. * * @param sli Pointer to the SLI context. * @param reg Register name enum. * @param val Value to write. * * @return None. */ void sli_reg_write(sli4_t *sli, sli4_regname_e reg, uint32_t val) { const sli4_reg_t *r = &(regmap[reg][sli->if_type]); if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) { ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type); return; } ocs_reg_write32(sli->os, r->rset, r->off, val); } /** * @brief Check if the SLI_INTF register is valid. * * @param val 32-bit SLI_INTF register value. * * @return Returns 0 on success, or a non-zero value on failure. */ static uint8_t sli_intf_valid_check(uint32_t val) { return ((val >> SLI4_INTF_VALID_SHIFT) & SLI4_INTF_VALID_MASK) != SLI4_INTF_VALID; } /** * @brief Retrieve the SLI revision level. * * @param val 32-bit SLI_INTF register value. * * @return Returns the SLI revision level. */ static uint8_t sli_intf_sli_revision(uint32_t val) { return ((val >> SLI4_INTF_SLI_REVISION_SHIFT) & SLI4_INTF_SLI_REVISION_MASK); } static uint8_t sli_intf_sli_family(uint32_t val) { return ((val >> SLI4_INTF_SLI_FAMILY_SHIFT) & SLI4_INTF_SLI_FAMILY_MASK); } /** * @brief Retrieve the SLI interface type. * * @param val 32-bit SLI_INTF register value. * * @return Returns the SLI interface type. */ static uint8_t sli_intf_if_type(uint32_t val) { return ((val >> SLI4_INTF_IF_TYPE_SHIFT) & SLI4_INTF_IF_TYPE_MASK); } /** * @brief Retrieve PCI revision ID. * * @param val 32-bit PCI CLASS_REVISION register value. * * @return Returns the PCI revision ID. */ static uint8_t sli_pci_rev_id(uint32_t val) { return ((val >> SLI4_PCI_REV_ID_SHIFT) & SLI4_PCI_REV_ID_MASK); } /** * @brief retrieve SLI ASIC generation * * @param val 32-bit SLI_ASIC_ID register value * * @return SLI ASIC generation */ static uint8_t sli_asic_gen(uint32_t val) { return ((val >> SLI4_ASIC_GEN_SHIFT) & SLI4_ASIC_GEN_MASK); } /** * @brief Wait for the bootstrap mailbox to report "ready". * * @param sli4 SLI context pointer. * @param msec Number of milliseconds to wait. * * @return Returns 0 if BMBX is ready, or non-zero otherwise (i.e. time out occurred). */ static int32_t sli_bmbx_wait(sli4_t *sli4, uint32_t msec) { uint32_t val = 0; do { ocs_udelay(SLI4_BMBX_DELAY_US); val = sli_reg_read(sli4, SLI4_REG_BMBX); msec--; } while(msec && !(val & SLI4_BMBX_RDY)); return(!(val & SLI4_BMBX_RDY)); } /** * @brief Write bootstrap mailbox. * * @param sli4 SLI context pointer. * * @return Returns 0 if command succeeded, or non-zero otherwise. */ static int32_t sli_bmbx_write(sli4_t *sli4) { uint32_t val = 0; /* write buffer location to bootstrap mailbox register */ ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_PREWRITE); val = SLI4_BMBX_WRITE_HI(sli4->bmbx.phys); sli_reg_write(sli4, SLI4_REG_BMBX, val); if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { ocs_log_crit(sli4->os, "BMBX WRITE_HI failed\n"); return -1; } val = SLI4_BMBX_WRITE_LO(sli4->bmbx.phys); sli_reg_write(sli4, SLI4_REG_BMBX, val); /* wait for SLI Port to set ready bit */ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC/*XXX*/); } #if defined(OCS_INCLUDE_DEBUG) /** * @ingroup sli * @brief Dump BMBX mailbox command. * * @par Description * Convenience function for dumping BMBX mailbox commands. Takes * into account which mailbox command is given since SLI_CONFIG * commands are special. * * @b Note: This function takes advantage of * the one-command-at-a-time nature of the BMBX to be able to * display non-embedded SLI_CONFIG commands. This will not work * for mailbox commands on the MQ. Luckily, all current non-emb * mailbox commands go through the BMBX. * * @param sli4 SLI context pointer. * @param mbx Pointer to mailbox command to dump. * @param prefix Prefix for dump label. * * @return None. */ static void sli_dump_bmbx_command(sli4_t *sli4, void *mbx, const char *prefix) { uint32_t size = 0; char label[64]; uint32_t i; /* Mailbox diagnostic logging */ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mbx; if (!ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)) { return; } if (hdr->command == SLI4_MBOX_COMMAND_SLI_CONFIG) { sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)hdr; sli4_req_hdr_t *sli_config_hdr; if (sli_config->emb) { ocs_snprintf(label, sizeof(label), "%s (emb)", prefix); /* if embedded, dump entire command */ sli_config_hdr = (sli4_req_hdr_t *)sli_config->payload.embed; size = sizeof(*sli_config) - sizeof(sli_config->payload) + sli_config_hdr->request_length + (4*sizeof(uint32_t)); ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, (uint8_t *)sli4->bmbx.virt, size); } else { sli4_sli_config_pmd_t *pmd; ocs_snprintf(label, sizeof(label), "%s (non-emb hdr)", prefix); /* if non-embedded, break up into two parts: SLI_CONFIG hdr and the payload(s) */ size = sizeof(*sli_config) - sizeof(sli_config->payload) + (12 * sli_config->pmd_count); ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, (uint8_t *)sli4->bmbx.virt, size); /* as sanity check, make sure first PMD matches what was saved */ pmd = &sli_config->payload.mem; if ((pmd->address_high == ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys)) && (pmd->address_low == ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys))) { for (i = 0; i < sli_config->pmd_count; i++, pmd++) { sli_config_hdr = sli4->bmbx_non_emb_pmd->virt; ocs_snprintf(label, sizeof(label), "%s (non-emb pay[%d])", prefix, i); ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, (uint8_t *)sli4->bmbx_non_emb_pmd->virt, sli_config_hdr->request_length + (4*sizeof(uint32_t))); } } else { ocs_log_debug(sli4->os, "pmd addr does not match pmd:%x %x (%x %x)\n", pmd->address_high, pmd->address_low, ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys), ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys)); } } } else { /* not an SLI_CONFIG command, just display first 64 bytes, like we do for MQEs */ size = 64; ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, prefix, (uint8_t *)mbx, size); } } #endif /** * @ingroup sli * @brief Submit a command to the bootstrap mailbox and check the status. * * @param sli4 SLI context pointer. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_bmbx_command(sli4_t *sli4) { void *cqe = (uint8_t *)sli4->bmbx.virt + SLI4_BMBX_SIZE; #if defined(OCS_INCLUDE_DEBUG) sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmd"); #endif if (sli_fw_error_status(sli4) > 0) { ocs_log_crit(sli4->os, "Chip is in an error state - Mailbox " "command rejected status=%#x error1=%#x error2=%#x\n", sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2)); return -1; } if (sli_bmbx_write(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail phys=%p reg=%#x\n", (void*)sli4->bmbx.phys, sli_reg_read(sli4, SLI4_REG_BMBX)); return -1; } /* check completion queue entry status */ ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_POSTREAD); if (((sli4_mcqe_t *)cqe)->val) { #if defined(OCS_INCLUDE_DEBUG) sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmpl"); ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "bmbx cqe", cqe, sizeof(sli4_mcqe_t)); #endif return sli_cqe_mq(cqe); } else { ocs_log_err(sli4->os, "invalid or wrong type\n"); return -1; } } /**************************************************************************** * Messages */ /** * @ingroup sli * @brief Write a CONFIG_LINK command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_config_link(sli4_t *sli4, void *buf, size_t size) { sli4_cmd_config_link_t *config_link = buf; ocs_memset(buf, 0, size); config_link->hdr.command = SLI4_MBOX_COMMAND_CONFIG_LINK; /* Port interprets zero in a field as "use default value" */ return sizeof(sli4_cmd_config_link_t); } /** * @ingroup sli * @brief Write a DOWN_LINK command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_down_link(sli4_t *sli4, void *buf, size_t size) { sli4_mbox_command_header_t *hdr = buf; ocs_memset(buf, 0, size); hdr->command = SLI4_MBOX_COMMAND_DOWN_LINK; /* Port interprets zero in a field as "use default value" */ return sizeof(sli4_mbox_command_header_t); } /** * @ingroup sli * @brief Write a DUMP Type 4 command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param wki The well known item ID. * * @return Returns the number of bytes written. */ int32_t sli_cmd_dump_type4(sli4_t *sli4, void *buf, size_t size, uint16_t wki) { sli4_cmd_dump4_t *cmd = buf; ocs_memset(buf, 0, size); cmd->hdr.command = SLI4_MBOX_COMMAND_DUMP; cmd->type = 4; cmd->wki_selection = wki; return sizeof(sli4_cmd_dump4_t); } /** * @ingroup sli * @brief Write a COMMON_READ_TRANSCEIVER_DATA command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param page_num The page of SFP data to retrieve (0xa0 or 0xa2). * @param dma DMA structure from which the data will be copied. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_read_transceiver_data(sli4_t *sli4, void *buf, size_t size, uint32_t page_num, ocs_dma_t *dma) { sli4_req_common_read_transceiver_data_t *req = NULL; uint32_t sli_config_off = 0; uint32_t payload_size; if (dma == NULL) { /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_read_transceiver_data_t), sizeof(sli4_res_common_read_transceiver_data_t)); } else { payload_size = dma->size; } if (sli4->port_type == SLI4_PORT_TYPE_FC) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); } if (dma == NULL) { req = (sli4_req_common_read_transceiver_data_t *)((uint8_t *)buf + sli_config_off); } else { req = (sli4_req_common_read_transceiver_data_t *)dma->virt; ocs_memset(req, 0, dma->size); } req->hdr.opcode = SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->page_number = page_num; req->port = sli4->physical_port; return(sli_config_off + sizeof(sli4_req_common_read_transceiver_data_t)); } /** * @ingroup sli * @brief Write a READ_LINK_STAT command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param req_ext_counters If TRUE, then the extended counters will be requested. * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. * @param clear_all_counters If TRUE, the counters will be cleared. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_link_stats(sli4_t *sli4, void *buf, size_t size, uint8_t req_ext_counters, uint8_t clear_overflow_flags, uint8_t clear_all_counters) { sli4_cmd_read_link_stats_t *cmd = buf; ocs_memset(buf, 0, size); cmd->hdr.command = SLI4_MBOX_COMMAND_READ_LNK_STAT; cmd->rec = req_ext_counters; cmd->clrc = clear_all_counters; cmd->clof = clear_overflow_flags; return sizeof(sli4_cmd_read_link_stats_t); } /** * @ingroup sli * @brief Write a READ_STATUS command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param clear_counters If TRUE, the counters will be cleared. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size, uint8_t clear_counters) { sli4_cmd_read_status_t *cmd = buf; ocs_memset(buf, 0, size); cmd->hdr.command = SLI4_MBOX_COMMAND_READ_STATUS; cmd->cc = clear_counters; return sizeof(sli4_cmd_read_status_t); } /** * @brief Write a FW_DEINITIALIZE command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_fw_deinitialize(sli4_t *sli4, void *buf, size_t size) { ocs_memset(buf, 0, size); ocs_memcpy(buf, sli4_fw_deinitialize, sizeof(sli4_fw_deinitialize)); return sizeof(sli4_fw_deinitialize); } /** * @brief Write a FW_INITIALIZE command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_fw_initialize(sli4_t *sli4, void *buf, size_t size) { ocs_memset(buf, 0, size); ocs_memcpy(buf, sli4_fw_initialize, sizeof(sli4_fw_initialize)); return sizeof(sli4_fw_initialize); } +/** + * @ingroup sli + * @brief update INIT_LINK flags with the sli config topology. + * + * @param sli4 SLI context pointer. + * @param init_link Pointer to the init link command + * + * @return Returns 0 on success, -1 on failure + */ +static int32_t +sli4_set_link_flags_config_topo(sli4_t *sli4, sli4_cmd_init_link_t *init_link) +{ + + switch (sli4->config.topology) { + case SLI4_READ_CFG_TOPO_FC: + // Attempt P2P but failover to FC-AL + init_link->link_flags.enable_topology_failover = TRUE; + init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER; + break; + case SLI4_READ_CFG_TOPO_FC_AL: + init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY; + return (!sli_fcal_is_speed_supported(init_link->link_speed_selection_code)); + + case SLI4_READ_CFG_TOPO_FC_DA: + init_link->link_flags.topology = FC_TOPOLOGY_P2P; + break; + default: + ocs_log_err(sli4->os, "unsupported topology %#x\n", sli4->config.topology); + return -1; + } + + return 0; +} + +/** + * @ingroup sli + * @brief update INIT_LINK flags with the persistent topology. + * PT stores value in compatible form, directly assign to link_flags + * + * @param sli4 SLI context pointer. + * @param init_link Pointer to the init link command + * + * @return Returns 0 on success, -1 on failure + */ +static int32_t +sli4_set_link_flags_persistent_topo(sli4_t *sli4, sli4_cmd_init_link_t *init_link) +{ + if ((sli4->config.pt == SLI4_INIT_LINK_F_FCAL_ONLY) && + (!sli_fcal_is_speed_supported(init_link->link_speed_selection_code))) + return -1; + + init_link->link_flags.enable_topology_failover = sli4->config.tf; + init_link->link_flags.topology = sli4->config.pt; + + return 0; +} + /** * @ingroup sli * @brief Write an INIT_LINK command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param speed Link speed. * @param reset_alpa For native FC, this is the selective reset AL_PA * * @return Returns the number of bytes written. */ int32_t sli_cmd_init_link(sli4_t *sli4, void *buf, size_t size, uint32_t speed, uint8_t reset_alpa) { sli4_cmd_init_link_t *init_link = buf; + int32_t rc = 0; ocs_memset(buf, 0, size); init_link->hdr.command = SLI4_MBOX_COMMAND_INIT_LINK; /* Most fields only have meaning for FC links */ if (sli4->config.topology != SLI4_READ_CFG_TOPO_FCOE) { init_link->selective_reset_al_pa = reset_alpa; init_link->link_flags.loopback = FALSE; init_link->link_speed_selection_code = speed; switch (speed) { case FC_LINK_SPEED_1G: case FC_LINK_SPEED_2G: case FC_LINK_SPEED_4G: case FC_LINK_SPEED_8G: case FC_LINK_SPEED_16G: case FC_LINK_SPEED_32G: init_link->link_flags.fixed_speed = TRUE; break; case FC_LINK_SPEED_10G: ocs_log_test(sli4->os, "unsupported FC speed %d\n", speed); return 0; } - switch (sli4->config.topology) { - case SLI4_READ_CFG_TOPO_FC: - /* Attempt P2P but failover to FC-AL */ - init_link->link_flags.enable_topology_failover = TRUE; - - if (sli_get_asic_type(sli4) == SLI4_ASIC_TYPE_LANCER) - init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_FAIL_OVER; - else - init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER; - - break; - case SLI4_READ_CFG_TOPO_FC_AL: - init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY; - if ((init_link->link_speed_selection_code == FC_LINK_SPEED_16G) || - (init_link->link_speed_selection_code == FC_LINK_SPEED_32G)) { - ocs_log_test(sli4->os, "unsupported FC-AL speed %d\n", speed); - return 0; - } - break; - case SLI4_READ_CFG_TOPO_FC_DA: - init_link->link_flags.topology = FC_TOPOLOGY_P2P; - break; - default: - ocs_log_test(sli4->os, "unsupported topology %#x\n", sli4->config.topology); - return 0; - } - init_link->link_flags.unfair = FALSE; init_link->link_flags.skip_lirp_lilp = FALSE; init_link->link_flags.gen_loop_validity_check = FALSE; init_link->link_flags.skip_lisa = FALSE; init_link->link_flags.select_hightest_al_pa = FALSE; + + //update topology in the link flags for link bring up + ocs_log_info(sli4->os, "bring up link with topology: %d, PTV: %d, TF: %d, PT: %d \n", + sli4->config.topology, sli4->config.ptv, sli4->config.tf, sli4->config.pt); + if (sli4->config.ptv) + rc = sli4_set_link_flags_persistent_topo(sli4, init_link); + else + rc = sli4_set_link_flags_config_topo(sli4, init_link); + } - return sizeof(sli4_cmd_init_link_t); + return rc ? 0 : sizeof(sli4_cmd_init_link_t); } /** * @ingroup sli * @brief Write an INIT_VFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param vfi VFI * @param fcfi FCFI * @param vpi VPI (Set to -1 if unused.) * * @return Returns the number of bytes written. */ int32_t sli_cmd_init_vfi(sli4_t *sli4, void *buf, size_t size, uint16_t vfi, uint16_t fcfi, uint16_t vpi) { sli4_cmd_init_vfi_t *init_vfi = buf; ocs_memset(buf, 0, size); init_vfi->hdr.command = SLI4_MBOX_COMMAND_INIT_VFI; init_vfi->vfi = vfi; init_vfi->fcfi = fcfi; /* * If the VPI is valid, initialize it at the same time as * the VFI */ if (0xffff != vpi) { init_vfi->vp = TRUE; init_vfi->vpi = vpi; } return sizeof(sli4_cmd_init_vfi_t); } /** * @ingroup sli * @brief Write an INIT_VPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param vpi VPI allocated. * @param vfi VFI associated with this VPI. * * @return Returns the number of bytes written. */ int32_t sli_cmd_init_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t vpi, uint16_t vfi) { sli4_cmd_init_vpi_t *init_vpi = buf; ocs_memset(buf, 0, size); init_vpi->hdr.command = SLI4_MBOX_COMMAND_INIT_VPI; init_vpi->vpi = vpi; init_vpi->vfi = vfi; return sizeof(sli4_cmd_init_vpi_t); } /** * @ingroup sli * @brief Write a POST_XRI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param xri_base Starting XRI value for range of XRI given to SLI Port. * @param xri_count Number of XRIs provided to the SLI Port. * * @return Returns the number of bytes written. */ int32_t sli_cmd_post_xri(sli4_t *sli4, void *buf, size_t size, uint16_t xri_base, uint16_t xri_count) { sli4_cmd_post_xri_t *post_xri = buf; ocs_memset(buf, 0, size); post_xri->hdr.command = SLI4_MBOX_COMMAND_POST_XRI; post_xri->xri_base = xri_base; post_xri->xri_count = xri_count; if (sli4->config.auto_xfer_rdy == 0) { post_xri->enx = TRUE; post_xri->val = TRUE; } return sizeof(sli4_cmd_post_xri_t); } /** * @ingroup sli * @brief Write a RELEASE_XRI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param num_xri The number of XRIs to be released. * * @return Returns the number of bytes written. */ int32_t sli_cmd_release_xri(sli4_t *sli4, void *buf, size_t size, uint8_t num_xri) { sli4_cmd_release_xri_t *release_xri = buf; ocs_memset(buf, 0, size); release_xri->hdr.command = SLI4_MBOX_COMMAND_RELEASE_XRI; release_xri->xri_count = num_xri; return sizeof(sli4_cmd_release_xri_t); } /** * @brief Write a READ_CONFIG command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes * * @return Returns the number of bytes written. */ static int32_t sli_cmd_read_config(sli4_t *sli4, void *buf, size_t size) { sli4_cmd_read_config_t *read_config = buf; ocs_memset(buf, 0, size); read_config->hdr.command = SLI4_MBOX_COMMAND_READ_CONFIG; return sizeof(sli4_cmd_read_config_t); } /** * @brief Write a READ_NVPARMS command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_nvparms(sli4_t *sli4, void *buf, size_t size) { sli4_cmd_read_nvparms_t *read_nvparms = buf; ocs_memset(buf, 0, size); read_nvparms->hdr.command = SLI4_MBOX_COMMAND_READ_NVPARMS; return sizeof(sli4_cmd_read_nvparms_t); } /** * @brief Write a WRITE_NVPARMS command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param wwpn WWPN to write - pointer to array of 8 uint8_t. * @param wwnn WWNN to write - pointer to array of 8 uint8_t. * @param hard_alpa Hard ALPA to write. * @param preferred_d_id Preferred D_ID to write. * * @return Returns the number of bytes written. */ int32_t sli_cmd_write_nvparms(sli4_t *sli4, void *buf, size_t size, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id) { sli4_cmd_write_nvparms_t *write_nvparms = buf; ocs_memset(buf, 0, size); write_nvparms->hdr.command = SLI4_MBOX_COMMAND_WRITE_NVPARMS; ocs_memcpy(write_nvparms->wwpn, wwpn, 8); ocs_memcpy(write_nvparms->wwnn, wwnn, 8); write_nvparms->hard_alpa = hard_alpa; write_nvparms->preferred_d_id = preferred_d_id; return sizeof(sli4_cmd_write_nvparms_t); } /** * @brief Write a READ_REV command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param vpd Pointer to the buffer. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_read_rev(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *vpd) { sli4_cmd_read_rev_t *read_rev = buf; ocs_memset(buf, 0, size); read_rev->hdr.command = SLI4_MBOX_COMMAND_READ_REV; if (vpd && vpd->size) { read_rev->vpd = TRUE; read_rev->available_length = vpd->size; read_rev->physical_address_low = ocs_addr32_lo(vpd->phys); read_rev->physical_address_high = ocs_addr32_hi(vpd->phys); } return sizeof(sli4_cmd_read_rev_t); } /** * @ingroup sli * @brief Write a READ_SPARM64 command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param dma DMA buffer for the service parameters. * @param vpi VPI used to determine the WWN. * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_sparm64(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t vpi) { sli4_cmd_read_sparm64_t *read_sparm64 = buf; ocs_memset(buf, 0, size); if (SLI4_READ_SPARM64_VPI_SPECIAL == vpi) { ocs_log_test(sli4->os, "special VPI not supported!!!\n"); return -1; } if (!dma || !dma->phys) { ocs_log_test(sli4->os, "bad DMA buffer\n"); return -1; } read_sparm64->hdr.command = SLI4_MBOX_COMMAND_READ_SPARM64; read_sparm64->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64; read_sparm64->bde_64.buffer_length = dma->size; read_sparm64->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); read_sparm64->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); read_sparm64->vpi = vpi; return sizeof(sli4_cmd_read_sparm64_t); } /** * @ingroup sli * @brief Write a READ_TOPOLOGY command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param dma DMA buffer for loop map (optional). * * @return Returns the number of bytes written. */ int32_t sli_cmd_read_topology(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_cmd_read_topology_t *read_topo = buf; ocs_memset(buf, 0, size); read_topo->hdr.command = SLI4_MBOX_COMMAND_READ_TOPOLOGY; if (dma && dma->size) { if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) { ocs_log_test(sli4->os, "loop map buffer too small %jd\n", dma->size); return 0; } ocs_memset(dma->virt, 0, dma->size); read_topo->bde_loop_map.bde_type = SLI4_BDE_TYPE_BDE_64; read_topo->bde_loop_map.buffer_length = dma->size; read_topo->bde_loop_map.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); read_topo->bde_loop_map.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } return sizeof(sli4_cmd_read_topology_t); } /** * @ingroup sli * @brief Write a REG_FCFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param index FCF index returned by READ_FCF_TABLE. * @param rq_cfg RQ_ID/R_CTL/TYPE routing information * @param vlan_id VLAN ID tag. * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t index, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t vlan_id) { sli4_cmd_reg_fcfi_t *reg_fcfi = buf; uint32_t i; ocs_memset(buf, 0, size); reg_fcfi->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI; reg_fcfi->fcf_index = index; for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { switch(i) { case 0: reg_fcfi->rq_id_0 = rq_cfg[0].rq_id; break; case 1: reg_fcfi->rq_id_1 = rq_cfg[1].rq_id; break; case 2: reg_fcfi->rq_id_2 = rq_cfg[2].rq_id; break; case 3: reg_fcfi->rq_id_3 = rq_cfg[3].rq_id; break; } reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask; reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match; } if (vlan_id) { reg_fcfi->vv = TRUE; reg_fcfi->vlan_tag = vlan_id; } return sizeof(sli4_cmd_reg_fcfi_t); } /** * @brief Write REG_FCFI_MRQ to provided command buffer * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param fcf_index FCF index returned by READ_FCF_TABLE. * @param vlan_id VLAN ID tag. * @param rr_quant Round robin quanta if RQ selection policy is 2 * @param rq_selection_policy RQ selection policy * @param num_rqs Array of count of RQs per filter * @param rq_ids Array of RQ ids per filter * @param rq_cfg RQ_ID/R_CTL/TYPE routing information * * @return returns 0 for success, a negative error code value for failure. */ int32_t sli_cmd_reg_fcfi_mrq(sli4_t *sli4, void *buf, size_t size, uint8_t mode, uint16_t fcf_index, uint16_t vlan_id, uint8_t rq_selection_policy, uint8_t mrq_bit_mask, uint16_t num_mrqs, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]) { sli4_cmd_reg_fcfi_mrq_t *reg_fcfi_mrq = buf; uint32_t i; ocs_memset(buf, 0, size); reg_fcfi_mrq->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI_MRQ; if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { reg_fcfi_mrq->fcf_index = fcf_index; if (vlan_id) { reg_fcfi_mrq->vv = TRUE; reg_fcfi_mrq->vlan_tag = vlan_id; } goto done; } reg_fcfi_mrq->mode = mode; for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask; reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match; switch(i) { case 3: reg_fcfi_mrq->rq_id_3 = rq_cfg[i].rq_id; break; case 2: reg_fcfi_mrq->rq_id_2 = rq_cfg[i].rq_id; break; case 1: reg_fcfi_mrq->rq_id_1 = rq_cfg[i].rq_id; break; case 0: reg_fcfi_mrq->rq_id_0 = rq_cfg[i].rq_id; break; } } reg_fcfi_mrq->rq_selection_policy = rq_selection_policy; reg_fcfi_mrq->mrq_filter_bitmask = mrq_bit_mask; reg_fcfi_mrq->num_mrq_pairs = num_mrqs; done: return sizeof(sli4_cmd_reg_fcfi_mrq_t); } /** * @ingroup sli * @brief Write a REG_RPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param nport_id Remote F/N_Port_ID. * @param rpi Previously-allocated Remote Port Indicator. * @param vpi Previously-allocated Virtual Port Indicator. * @param dma DMA buffer that contains the remote port's service parameters. * @param update Boolean indicating an update to an existing RPI (TRUE) * or a new registration (FALSE). * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_rpi(sli4_t *sli4, void *buf, size_t size, uint32_t nport_id, uint16_t rpi, uint16_t vpi, ocs_dma_t *dma, uint8_t update, uint8_t enable_t10_pi) { sli4_cmd_reg_rpi_t *reg_rpi = buf; ocs_memset(buf, 0, size); reg_rpi->hdr.command = SLI4_MBOX_COMMAND_REG_RPI; reg_rpi->rpi = rpi; reg_rpi->remote_n_port_id = nport_id; reg_rpi->upd = update; reg_rpi->etow = enable_t10_pi; reg_rpi->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64; reg_rpi->bde_64.buffer_length = SLI4_REG_RPI_BUF_LEN; reg_rpi->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); reg_rpi->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); reg_rpi->vpi = vpi; return sizeof(sli4_cmd_reg_rpi_t); } /** * @ingroup sli * @brief Write a REG_VFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param domain Pointer to the domain object. * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain) { sli4_cmd_reg_vfi_t *reg_vfi = buf; if (!sli4 || !buf || !domain) { return 0; } ocs_memset(buf, 0, size); reg_vfi->hdr.command = SLI4_MBOX_COMMAND_REG_VFI; reg_vfi->vfi = domain->indicator; reg_vfi->fcfi = domain->fcf_indicator; /* TODO contents of domain->dma only valid if topo == FABRIC */ reg_vfi->sparm.bde_type = SLI4_BDE_TYPE_BDE_64; reg_vfi->sparm.buffer_length = 0x70; reg_vfi->sparm.u.data.buffer_address_low = ocs_addr32_lo(domain->dma.phys); reg_vfi->sparm.u.data.buffer_address_high = ocs_addr32_hi(domain->dma.phys); reg_vfi->e_d_tov = sli4->config.e_d_tov; reg_vfi->r_a_tov = sli4->config.r_a_tov; reg_vfi->vp = TRUE; reg_vfi->vpi = domain->sport->indicator; ocs_memcpy(reg_vfi->wwpn, &domain->sport->sli_wwpn, sizeof(reg_vfi->wwpn)); reg_vfi->local_n_port_id = domain->sport->fc_id; return sizeof(sli4_cmd_reg_vfi_t); } /** * @ingroup sli * @brief Write a REG_VPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param sport Point to SLI Port object. * @param update Boolean indicating whether to update the existing VPI (true) * or create a new VPI (false). * * @return Returns the number of bytes written. */ int32_t sli_cmd_reg_vpi(sli4_t *sli4, void *buf, size_t size, ocs_sli_port_t *sport, uint8_t update) { sli4_cmd_reg_vpi_t *reg_vpi = buf; if (!sli4 || !buf || !sport) { return 0; } ocs_memset(buf, 0, size); reg_vpi->hdr.command = SLI4_MBOX_COMMAND_REG_VPI; reg_vpi->local_n_port_id = sport->fc_id; reg_vpi->upd = update != 0; ocs_memcpy(reg_vpi->wwpn, &sport->sli_wwpn, sizeof(reg_vpi->wwpn)); reg_vpi->vpi = sport->indicator; reg_vpi->vfi = sport->domain->indicator; return sizeof(sli4_cmd_reg_vpi_t); } /** * @brief Write a REQUEST_FEATURES command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param mask Features to request. * @param query Use feature query mode (does not change FW). * * @return Returns the number of bytes written. */ static int32_t sli_cmd_request_features(sli4_t *sli4, void *buf, size_t size, sli4_features_t mask, uint8_t query) { sli4_cmd_request_features_t *features = buf; ocs_memset(buf, 0, size); features->hdr.command = SLI4_MBOX_COMMAND_REQUEST_FEATURES; if (query) { features->qry = TRUE; } features->command.dword = mask.dword; return sizeof(sli4_cmd_request_features_t); } /** * @ingroup sli * @brief Write a SLI_CONFIG command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param length Length in bytes of attached command. * @param dma DMA buffer for non-embedded commands. * * @return Returns the number of bytes written. */ int32_t sli_cmd_sli_config(sli4_t *sli4, void *buf, size_t size, uint32_t length, ocs_dma_t *dma) { sli4_cmd_sli_config_t *sli_config = NULL; if ((length > sizeof(sli_config->payload.embed)) && (dma == NULL)) { ocs_log_test(sli4->os, "length(%d) > payload(%ld)\n", length, sizeof(sli_config->payload.embed)); return -1; } sli_config = buf; ocs_memset(buf, 0, size); sli_config->hdr.command = SLI4_MBOX_COMMAND_SLI_CONFIG; if (NULL == dma) { sli_config->emb = TRUE; sli_config->payload_length = length; } else { sli_config->emb = FALSE; sli_config->pmd_count = 1; sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys); sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys); sli_config->payload.mem.length = dma->size; sli_config->payload_length = dma->size; #if defined(OCS_INCLUDE_DEBUG) /* save pointer to DMA for BMBX dumping purposes */ sli4->bmbx_non_emb_pmd = dma; #endif } return offsetof(sli4_cmd_sli_config_t, payload.embed); } /** * @brief Initialize SLI Port control register. * * @param sli4 SLI context pointer. * @param endian Endian value to write. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian) { uint32_t iter; int32_t rc; rc = -1; /* Initialize port, endian */ sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, endian | SLI4_SLIPORT_CONTROL_IP); for (iter = 0; iter < 3000; iter ++) { ocs_udelay(SLI4_INIT_PORT_DELAY_US); if (sli_fw_ready(sli4) == 1) { rc = 0; break; } } if (rc != 0) { ocs_log_crit(sli4->os, "port failed to become ready after initialization\n"); } return rc; } /** * @ingroup sli * @brief Write a UNREG_FCFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param indicator Indicator value. * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator) { sli4_cmd_unreg_fcfi_t *unreg_fcfi = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); unreg_fcfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_FCFI; unreg_fcfi->fcfi = indicator; return sizeof(sli4_cmd_unreg_fcfi_t); } /** * @ingroup sli * @brief Write an UNREG_RPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param indicator Indicator value. * @param which Type of unregister, such as node, port, domain, or FCF. * @param fc_id FC address. * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_rpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, sli4_resource_e which, uint32_t fc_id) { sli4_cmd_unreg_rpi_t *unreg_rpi = buf; uint8_t index_indicator = 0; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); unreg_rpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_RPI; switch (which) { case SLI_RSRC_FCOE_RPI: index_indicator = SLI4_UNREG_RPI_II_RPI; if (fc_id != UINT32_MAX) { unreg_rpi->dp = TRUE; unreg_rpi->destination_n_port_id = fc_id & 0x00ffffff; } break; case SLI_RSRC_FCOE_VPI: index_indicator = SLI4_UNREG_RPI_II_VPI; break; case SLI_RSRC_FCOE_VFI: index_indicator = SLI4_UNREG_RPI_II_VFI; break; case SLI_RSRC_FCOE_FCFI: index_indicator = SLI4_UNREG_RPI_II_FCFI; break; default: ocs_log_test(sli4->os, "unknown type %#x\n", which); return 0; } unreg_rpi->ii = index_indicator; unreg_rpi->index = indicator; return sizeof(sli4_cmd_unreg_rpi_t); } /** * @ingroup sli * @brief Write an UNREG_VFI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param domain Pointer to the domain object * @param which Type of unregister, such as domain, FCFI, or everything. * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain, uint32_t which) { sli4_cmd_unreg_vfi_t *unreg_vfi = buf; if (!sli4 || !buf || !domain) { return 0; } ocs_memset(buf, 0, size); unreg_vfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VFI; switch (which) { case SLI4_UNREG_TYPE_DOMAIN: unreg_vfi->index = domain->indicator; break; case SLI4_UNREG_TYPE_FCF: unreg_vfi->index = domain->fcf_indicator; break; case SLI4_UNREG_TYPE_ALL: unreg_vfi->index = UINT16_MAX; break; default: return 0; } if (SLI4_UNREG_TYPE_DOMAIN != which) { unreg_vfi->ii = SLI4_UNREG_VFI_II_FCFI; } return sizeof(sli4_cmd_unreg_vfi_t); } /** * @ingroup sli * @brief Write an UNREG_VPI command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param indicator Indicator value. * @param which Type of unregister: port, domain, FCFI, everything * * @return Returns the number of bytes written. */ int32_t sli_cmd_unreg_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, uint32_t which) { sli4_cmd_unreg_vpi_t *unreg_vpi = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); unreg_vpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VPI; unreg_vpi->index = indicator; switch (which) { case SLI4_UNREG_TYPE_PORT: unreg_vpi->ii = SLI4_UNREG_VPI_II_VPI; break; case SLI4_UNREG_TYPE_DOMAIN: unreg_vpi->ii = SLI4_UNREG_VPI_II_VFI; break; case SLI4_UNREG_TYPE_FCF: unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI; break; case SLI4_UNREG_TYPE_ALL: unreg_vpi->index = UINT16_MAX; /* override indicator */ unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI; break; default: return 0; } return sizeof(sli4_cmd_unreg_vpi_t); } /** * @ingroup sli * @brief Write an CONFIG_AUTO_XFER_RDY command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param max_burst_len if the write FCP_DL is less than this size, * then the SLI port will generate the auto XFER_RDY. * * @return Returns the number of bytes written. */ int32_t sli_cmd_config_auto_xfer_rdy(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len) { sli4_cmd_config_auto_xfer_rdy_t *req = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY; req->max_burst_len = max_burst_len; return sizeof(sli4_cmd_config_auto_xfer_rdy_t); } /** * @ingroup sli * @brief Write an CONFIG_AUTO_XFER_RDY_HP command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to the destination buffer. * @param size Buffer size, in bytes. * @param max_burst_len if the write FCP_DL is less than this size, * @param esoc enable start offset computation, * @param block_size block size, * then the SLI port will generate the auto XFER_RDY. * * @return Returns the number of bytes written. */ int32_t sli_cmd_config_auto_xfer_rdy_hp(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len, uint32_t esoc, uint32_t block_size ) { sli4_cmd_config_auto_xfer_rdy_hp_t *req = buf; if (!sli4 || !buf) { return 0; } ocs_memset(buf, 0, size); req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP; req->max_burst_len = max_burst_len; req->esoc = esoc; req->block_size = block_size; return sizeof(sli4_cmd_config_auto_xfer_rdy_hp_t); } /** * @brief Write a COMMON_FUNCTION_RESET command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_function_reset(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_function_reset_t *reset = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_function_reset_t), sizeof(sli4_res_common_function_reset_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } reset = (sli4_req_common_function_reset_t *)((uint8_t *)buf + sli_config_off); reset->hdr.opcode = SLI4_OPC_COMMON_FUNCTION_RESET; reset->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; return(sli_config_off + sizeof(sli4_req_common_function_reset_t)); } /** * @brief Write a COMMON_CREATE_CQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param eq_id Associated EQ_ID * @param ignored This parameter carries the ULP which is only used for WQ and RQs * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t eq_id, uint16_t ignored) { sli4_req_common_create_cq_v0_t *cqv0 = NULL; sli4_req_common_create_cq_v2_t *cqv2 = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; uint32_t if_type = sli4->if_type; uint32_t page_bytes = 0; uint32_t num_pages = 0; uint32_t cmd_size = 0; uint32_t page_size = 0; uint32_t n_cqe = 0; /* First calculate number of pages and the mailbox cmd length */ switch (if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: page_bytes = SLI_PAGE_SIZE; num_pages = sli_page_count(qmem->size, page_bytes); cmd_size = sizeof(sli4_req_common_create_cq_v0_t) + (8 * num_pages); break; case SLI4_IF_TYPE_LANCER_FC_ETH: case SLI4_IF_TYPE_LANCER_G7: n_cqe = qmem->size / SLI4_CQE_BYTES; switch (n_cqe) { case 256: case 512: case 1024: case 2048: page_size = 1; break; case 4096: page_size = 2; break; default: return 0; } page_bytes = page_size * SLI_PAGE_SIZE; num_pages = sli_page_count(qmem->size, page_bytes); cmd_size = sizeof(sli4_req_common_create_cq_v2_t) + (8 * num_pages); break; default: ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type); return -1; } /* now that we have the mailbox command size, we can set SLI_CONFIG fields */ if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } switch (if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: cqv0 = (sli4_req_common_create_cq_v0_t *)((uint8_t *)buf + sli_config_off); cqv0->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ; cqv0->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cqv0->hdr.version = 0; cqv0->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ cqv0->num_pages = num_pages; switch (cqv0->num_pages) { case 1: cqv0->cqecnt = SLI4_CQ_CNT_256; break; case 2: cqv0->cqecnt = SLI4_CQ_CNT_512; break; case 4: cqv0->cqecnt = SLI4_CQ_CNT_1024; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv0->num_pages); return -1; } cqv0->evt = TRUE; cqv0->valid = TRUE; /* TODO cq->nodelay = ???; */ /* TODO cq->clswm = ???; */ cqv0->arm = FALSE; cqv0->eq_id = eq_id; for (p = 0, addr = qmem->phys; p < cqv0->num_pages; p++, addr += page_bytes) { cqv0->page_physical_address[p].low = ocs_addr32_lo(addr); cqv0->page_physical_address[p].high = ocs_addr32_hi(addr); } break; case SLI4_IF_TYPE_LANCER_FC_ETH: case SLI4_IF_TYPE_LANCER_G7: { cqv2 = (sli4_req_common_create_cq_v2_t *)((uint8_t *)buf + sli_config_off); cqv2->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ; cqv2->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cqv2->hdr.version = 2; cqv2->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); if (if_type == SLI4_IF_TYPE_LANCER_G7) cqv2->autovalid = TRUE; cqv2->page_size = page_size; /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ cqv2->num_pages = num_pages; if (!cqv2->num_pages || (cqv2->num_pages > SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES)) { return 0; } switch (cqv2->num_pages) { case 1: cqv2->cqecnt = SLI4_CQ_CNT_256; break; case 2: cqv2->cqecnt = SLI4_CQ_CNT_512; break; case 4: cqv2->cqecnt = SLI4_CQ_CNT_1024; break; case 8: cqv2->cqecnt = SLI4_CQ_CNT_LARGE; cqv2->cqe_count = n_cqe; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv2->num_pages); return -1; } cqv2->evt = TRUE; cqv2->valid = TRUE; /* TODO cq->nodelay = ???; */ /* TODO cq->clswm = ???; */ cqv2->arm = FALSE; cqv2->eq_id = eq_id; for (p = 0, addr = qmem->phys; p < cqv2->num_pages; p++, addr += page_bytes) { cqv2->page_physical_address[p].low = ocs_addr32_lo(addr); cqv2->page_physical_address[p].high = ocs_addr32_hi(addr); } } break; } return (sli_config_off + cmd_size); } /** * @brief Write a COMMON_DESTROY_CQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param cq_id CQ ID * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_destroy_cq(sli4_t *sli4, void *buf, size_t size, uint16_t cq_id) { sli4_req_common_destroy_cq_t *cq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_destroy_cq_t), sizeof(sli4_res_hdr_t)), NULL); } cq = (sli4_req_common_destroy_cq_t *)((uint8_t *)buf + sli_config_off); cq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_CQ; cq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cq->hdr.request_length = sizeof(sli4_req_common_destroy_cq_t) - sizeof(sli4_req_hdr_t); cq->cq_id = cq_id; return(sli_config_off + sizeof(sli4_req_common_destroy_cq_t)); } /** * @brief Write a COMMON_MODIFY_EQ_DELAY command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param q Queue object array. * @param num_q Queue object array count. * @param shift Phase shift for staggering interrupts. * @param delay_mult Delay multiplier for limiting interrupt frequency. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_modify_eq_delay(sli4_t *sli4, void *buf, size_t size, sli4_queue_t *q, int num_q, uint32_t shift, uint32_t delay_mult) { sli4_req_common_modify_eq_delay_t *modify_delay = NULL; uint32_t sli_config_off = 0; int i; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_modify_eq_delay_t), sizeof(sli4_res_hdr_t)), NULL); } modify_delay = (sli4_req_common_modify_eq_delay_t *)((uint8_t *)buf + sli_config_off); modify_delay->hdr.opcode = SLI4_OPC_COMMON_MODIFY_EQ_DELAY; modify_delay->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; modify_delay->hdr.request_length = sizeof(sli4_req_common_modify_eq_delay_t) - sizeof(sli4_req_hdr_t); modify_delay->num_eq = num_q; for (i = 0; ieq_delay_record[i].eq_id = q[i].id; modify_delay->eq_delay_record[i].phase = shift; modify_delay->eq_delay_record[i].delay_multiplier = delay_mult; } return(sli_config_off + sizeof(sli4_req_common_modify_eq_delay_t)); } /** * @brief Write a COMMON_CREATE_EQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param ignored1 Ignored (used for consistency among queue creation functions). * @param ignored2 Ignored (used for consistency among queue creation functions). * * @note Other queue creation routines use the last parameter to pass in * the associated Q_ID and ULP. EQ doesn't have an associated queue or ULP, * so these parameters are ignored * * @note This creates a Version 0 message * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_create_eq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t ignored1, uint16_t ignored2) { sli4_req_common_create_eq_t *eq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_create_eq_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } eq = (sli4_req_common_create_eq_t *)((uint8_t *)buf + sli_config_off); eq->hdr.opcode = SLI4_OPC_COMMON_CREATE_EQ; eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; eq->hdr.request_length = sizeof(sli4_req_common_create_eq_t) - sizeof(sli4_req_hdr_t); if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) { eq->hdr.version = 2; eq->autovalid = TRUE; } /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ eq->num_pages = qmem->size / SLI_PAGE_SIZE; switch (eq->num_pages) { case 1: eq->eqesz = SLI4_EQE_SIZE_4; eq->count = SLI4_EQ_CNT_1024; break; case 2: eq->eqesz = SLI4_EQE_SIZE_4; eq->count = SLI4_EQ_CNT_2048; break; case 4: eq->eqesz = SLI4_EQE_SIZE_4; eq->count = SLI4_EQ_CNT_4096; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", eq->num_pages); return -1; } eq->valid = TRUE; eq->arm = FALSE; eq->delay_multiplier = 32; for (p = 0, addr = qmem->phys; p < eq->num_pages; p++, addr += SLI_PAGE_SIZE) { eq->page_address[p].low = ocs_addr32_lo(addr); eq->page_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_common_create_eq_t)); } /** * @brief Write a COMMON_DESTROY_EQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param eq_id Queue ID to destroy. * * @note Other queue creation routines use the last parameter to pass in * the associated Q_ID. EQ doesn't have an associated queue so this * parameter is ignored. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_destroy_eq(sli4_t *sli4, void *buf, size_t size, uint16_t eq_id) { sli4_req_common_destroy_eq_t *eq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_destroy_eq_t), sizeof(sli4_res_hdr_t)), NULL); } eq = (sli4_req_common_destroy_eq_t *)((uint8_t *)buf + sli_config_off); eq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_EQ; eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; eq->hdr.request_length = sizeof(sli4_req_common_destroy_eq_t) - sizeof(sli4_req_hdr_t); eq->eq_id = eq_id; return(sli_config_off + sizeof(sli4_req_common_destroy_eq_t)); } /** * @brief Write a LOWLEVEL_SET_WATCHDOG command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param timeout watchdog timer timeout in seconds * * @return void */ void sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout) { sli4_req_lowlevel_set_watchdog_t *req = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_lowlevel_set_watchdog_t), sizeof(sli4_res_lowlevel_set_watchdog_t)), NULL); } req = (sli4_req_lowlevel_set_watchdog_t *)((uint8_t *)buf + sli_config_off); req->hdr.opcode = SLI4_OPC_LOWLEVEL_SET_WATCHDOG; req->hdr.subsystem = SLI4_SUBSYSTEM_LOWLEVEL; req->hdr.request_length = sizeof(sli4_req_lowlevel_set_watchdog_t) - sizeof(sli4_req_hdr_t); req->watchdog_timeout = timeout; return; } static int32_t sli_cmd_common_get_cntl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_hdr_t *hdr = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma); } if (dma == NULL) { return 0; } ocs_memset(dma->virt, 0, dma->size); hdr = dma->virt; hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = dma->size; return(sli_config_off + sizeof(sli4_req_hdr_t)); } /** * @brief Write a COMMON_GET_CNTL_ADDL_ATTRIBUTES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma DMA structure from which the data will be copied. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_get_cntl_addl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_hdr_t *hdr = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma); } if (dma == NULL) { return 0; } ocs_memset(dma->virt, 0, dma->size); hdr = dma->virt; hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = dma->size; return(sli_config_off + sizeof(sli4_req_hdr_t)); } /** * @brief Write a COMMON_CREATE_MQ_EXT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ignored This parameter carries the ULP which is only used for WQ and RQs * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_create_mq_ext(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored) { sli4_req_common_create_mq_ext_t *mq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_create_mq_ext_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } mq = (sli4_req_common_create_mq_ext_t *)((uint8_t *)buf + sli_config_off); mq->hdr.opcode = SLI4_OPC_COMMON_CREATE_MQ_EXT; mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; mq->hdr.request_length = sizeof(sli4_req_common_create_mq_ext_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ mq->num_pages = qmem->size / SLI_PAGE_SIZE; switch (mq->num_pages) { case 1: mq->ring_size = SLI4_MQE_SIZE_16; break; case 2: mq->ring_size = SLI4_MQE_SIZE_32; break; case 4: mq->ring_size = SLI4_MQE_SIZE_64; break; case 8: mq->ring_size = SLI4_MQE_SIZE_128; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", mq->num_pages); return -1; } /* TODO break this down by sli4->config.topology */ mq->async_event_bitmap = SLI4_ASYNC_EVT_FC_FCOE; if (sli4->config.mq_create_version) { mq->cq_id_v1 = cq_id; mq->hdr.version = 1; } else { mq->cq_id_v0 = cq_id; } mq->val = TRUE; for (p = 0, addr = qmem->phys; p < mq->num_pages; p++, addr += SLI_PAGE_SIZE) { mq->page_physical_address[p].low = ocs_addr32_lo(addr); mq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_common_create_mq_ext_t)); } /** * @brief Write a COMMON_DESTROY_MQ command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param mq_id MQ ID * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_destroy_mq(sli4_t *sli4, void *buf, size_t size, uint16_t mq_id) { sli4_req_common_destroy_mq_t *mq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_destroy_mq_t), sizeof(sli4_res_hdr_t)), NULL); } mq = (sli4_req_common_destroy_mq_t *)((uint8_t *)buf + sli_config_off); mq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_MQ; mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; mq->hdr.request_length = sizeof(sli4_req_common_destroy_mq_t) - sizeof(sli4_req_hdr_t); mq->mq_id = mq_id; return(sli_config_off + sizeof(sli4_req_common_destroy_mq_t)); } /** * @ingroup sli * @brief Write a COMMON_NOP command * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param context NOP context value (passed to response, except on FC/FCoE). * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_nop(sli4_t *sli4, void *buf, size_t size, uint64_t context) { sli4_req_common_nop_t *nop = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, /* Payload length must accommodate both request and response */ max(sizeof(sli4_req_common_nop_t), sizeof(sli4_res_common_nop_t)), NULL); } nop = (sli4_req_common_nop_t *)((uint8_t *)buf + sli_config_off); nop->hdr.opcode = SLI4_OPC_COMMON_NOP; nop->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; nop->hdr.request_length = 8; ocs_memcpy(&nop->context, &context, sizeof(context)); return(sli_config_off + sizeof(sli4_req_common_nop_t)); } /** * @ingroup sli * @brief Write a COMMON_GET_RESOURCE_EXTENT_INFO command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param rtype Resource type (for example, XRI, VFI, VPI, and RPI). * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_resource_extent_info(sli4_t *sli4, void *buf, size_t size, uint16_t rtype) { sli4_req_common_get_resource_extent_info_t *extent = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_common_get_resource_extent_info_t), NULL); } extent = (sli4_req_common_get_resource_extent_info_t *)((uint8_t *)buf + sli_config_off); extent->hdr.opcode = SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO; extent->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; extent->hdr.request_length = 4; extent->resource_type = rtype; return(sli_config_off + sizeof(sli4_req_common_get_resource_extent_info_t)); } /** * @ingroup sli * @brief Write a COMMON_GET_SLI4_PARAMETERS command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_sli4_parameters(sli4_t *sli4, void *buf, size_t size) { sli4_req_hdr_t *hdr = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_res_common_get_sli4_parameters_t), NULL); } hdr = (sli4_req_hdr_t *)((uint8_t *)buf + sli_config_off); hdr->opcode = SLI4_OPC_COMMON_GET_SLI4_PARAMETERS; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = 0x50; return(sli_config_off + sizeof(sli4_req_hdr_t)); } /** * @brief Write a COMMON_QUERY_FW_CONFIG command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to destination buffer. * @param size Buffer size in bytes. * * @return Returns the number of bytes written */ static int32_t sli_cmd_common_query_fw_config(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_query_fw_config_t *fw_config; uint32_t sli_config_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_query_fw_config_t), sizeof(sli4_res_common_query_fw_config_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } fw_config = (sli4_req_common_query_fw_config_t*)((uint8_t*)buf + sli_config_off); fw_config->hdr.opcode = SLI4_OPC_COMMON_QUERY_FW_CONFIG; fw_config->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; fw_config->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); return sli_config_off + sizeof(sli4_req_common_query_fw_config_t); } /** * @brief Write a COMMON_GET_PORT_NAME command to the provided buffer. * * @param sli4 SLI context pointer. * @param buf Virtual pointer to destination buffer. * @param size Buffer size in bytes. * * @note Function supports both version 0 and 1 forms of this command via * the IF_TYPE. * * @return Returns the number of bytes written. */ static int32_t sli_cmd_common_get_port_name(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_get_port_name_t *port_name; uint32_t sli_config_off = 0; uint32_t payload_size; uint8_t version = 0; uint8_t pt = 0; /* Select command version according to IF_TYPE */ switch (sli4->if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: case SLI4_IF_TYPE_BE3_SKH_VF: version = 0; break; case SLI4_IF_TYPE_LANCER_FC_ETH: case SLI4_IF_TYPE_LANCER_RDMA: case SLI4_IF_TYPE_LANCER_G7: version = 1; break; default: ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", sli4->if_type); return 0; } /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_get_port_name_t), sizeof(sli4_res_common_get_port_name_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); pt = 1; } port_name = (sli4_req_common_get_port_name_t *)((uint8_t *)buf + sli_config_off); port_name->hdr.opcode = SLI4_OPC_COMMON_GET_PORT_NAME; port_name->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; port_name->hdr.request_length = sizeof(sli4_req_hdr_t) + (version * sizeof(uint32_t)); port_name->hdr.version = version; /* Set the port type value (ethernet=0, FC=1) for V1 commands */ if (version == 1) { port_name->pt = pt; } return sli_config_off + port_name->hdr.request_length; } /** * @ingroup sli * @brief Write a COMMON_WRITE_OBJECT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param noc True if the object should be written but not committed to flash. * @param eof True if this is the last write for this object. * @param desired_write_length Number of bytes of data to write to the object. * @param offset Offset, in bytes, from the start of the object. * @param object_name Name of the object to write. * @param dma DMA structure from which the data will be copied. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_write_object(sli4_t *sli4, void *buf, size_t size, uint16_t noc, uint16_t eof, uint32_t desired_write_length, uint32_t offset, char *object_name, ocs_dma_t *dma) { sli4_req_common_write_object_t *wr_obj = NULL; uint32_t sli_config_off = 0; sli4_bde_t *host_buffer; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_write_object_t) + sizeof (sli4_bde_t), NULL); } wr_obj = (sli4_req_common_write_object_t *)((uint8_t *)buf + sli_config_off); wr_obj->hdr.opcode = SLI4_OPC_COMMON_WRITE_OBJECT; wr_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; wr_obj->hdr.request_length = sizeof(*wr_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t); wr_obj->hdr.timeout = 0; wr_obj->hdr.version = 0; wr_obj->noc = noc; wr_obj->eof = eof; wr_obj->desired_write_length = desired_write_length; wr_obj->write_offset = offset; ocs_strncpy(wr_obj->object_name, object_name, sizeof(wr_obj->object_name)); wr_obj->host_buffer_descriptor_count = 1; host_buffer = (sli4_bde_t *)wr_obj->host_buffer_descriptor; /* Setup to transfer xfer_size bytes to device */ host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64; host_buffer->buffer_length = desired_write_length; host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys); host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys); return(sli_config_off + sizeof(sli4_req_common_write_object_t) + sizeof (sli4_bde_t)); } /** * @ingroup sli * @brief Write a COMMON_DELETE_OBJECT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param object_name Name of the object to write. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_delete_object(sli4_t *sli4, void *buf, size_t size, char *object_name) { sli4_req_common_delete_object_t *del_obj = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_delete_object_t), NULL); } del_obj = (sli4_req_common_delete_object_t *)((uint8_t *)buf + sli_config_off); del_obj->hdr.opcode = SLI4_OPC_COMMON_DELETE_OBJECT; del_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; del_obj->hdr.request_length = sizeof(*del_obj); del_obj->hdr.timeout = 0; del_obj->hdr.version = 0; ocs_strncpy(del_obj->object_name, object_name, sizeof(del_obj->object_name)); return(sli_config_off + sizeof(sli4_req_common_delete_object_t)); } /** * @ingroup sli * @brief Write a COMMON_READ_OBJECT command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param desired_read_length Number of bytes of data to read from the object. * @param offset Offset, in bytes, from the start of the object. * @param object_name Name of the object to read. * @param dma DMA structure from which the data will be copied. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_read_object(sli4_t *sli4, void *buf, size_t size, uint32_t desired_read_length, uint32_t offset, char *object_name, ocs_dma_t *dma) { sli4_req_common_read_object_t *rd_obj = NULL; uint32_t sli_config_off = 0; sli4_bde_t *host_buffer; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_read_object_t) + sizeof (sli4_bde_t), NULL); } rd_obj = (sli4_req_common_read_object_t *)((uint8_t *)buf + sli_config_off); rd_obj->hdr.opcode = SLI4_OPC_COMMON_READ_OBJECT; rd_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; rd_obj->hdr.request_length = sizeof(*rd_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t); rd_obj->hdr.timeout = 0; rd_obj->hdr.version = 0; rd_obj->desired_read_length = desired_read_length; rd_obj->read_offset = offset; ocs_strncpy(rd_obj->object_name, object_name, sizeof(rd_obj->object_name)); rd_obj->host_buffer_descriptor_count = 1; host_buffer = (sli4_bde_t *)rd_obj->host_buffer_descriptor; /* Setup to transfer xfer_size bytes to device */ host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64; host_buffer->buffer_length = desired_read_length; if (dma != NULL) { host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys); host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys); } else { host_buffer->u.data.buffer_address_low = 0; host_buffer->u.data.buffer_address_high = 0; } return(sli_config_off + sizeof(sli4_req_common_read_object_t) + sizeof (sli4_bde_t)); } /** * @ingroup sli * @brief Write a DMTF_EXEC_CLP_CMD command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param cmd DMA structure that describes the buffer for the command. * @param resp DMA structure that describes the buffer for the response. * * @return Returns the number of bytes written. */ int32_t sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *cmd, ocs_dma_t *resp) { sli4_req_dmtf_exec_clp_cmd_t *clp_cmd = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_dmtf_exec_clp_cmd_t), NULL); } clp_cmd = (sli4_req_dmtf_exec_clp_cmd_t*)((uint8_t *)buf + sli_config_off); clp_cmd->hdr.opcode = SLI4_OPC_DMTF_EXEC_CLP_CMD; clp_cmd->hdr.subsystem = SLI4_SUBSYSTEM_DMTF; clp_cmd->hdr.request_length = sizeof(sli4_req_dmtf_exec_clp_cmd_t) - sizeof(sli4_req_hdr_t); clp_cmd->hdr.timeout = 0; clp_cmd->hdr.version = 0; clp_cmd->cmd_buf_length = cmd->size; clp_cmd->cmd_buf_addr_low = ocs_addr32_lo(cmd->phys); clp_cmd->cmd_buf_addr_high = ocs_addr32_hi(cmd->phys); clp_cmd->resp_buf_length = resp->size; clp_cmd->resp_buf_addr_low = ocs_addr32_lo(resp->phys); clp_cmd->resp_buf_addr_high = ocs_addr32_hi(resp->phys); return(sli_config_off + sizeof(sli4_req_dmtf_exec_clp_cmd_t)); } /** * @ingroup sli * @brief Write a COMMON_SET_DUMP_LOCATION command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param query Zero to set dump location, non-zero to query dump size * @param is_buffer_list Set to one if the buffer is a set of buffer descriptors or * set to 0 if the buffer is a contiguous dump area. * @param buffer DMA structure to which the dump will be copied. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size, uint8_t query, uint8_t is_buffer_list, ocs_dma_t *buffer, uint8_t fdb) { sli4_req_common_set_dump_location_t *set_dump_loc = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_set_dump_location_t), NULL); } set_dump_loc = (sli4_req_common_set_dump_location_t *)((uint8_t *)buf + sli_config_off); set_dump_loc->hdr.opcode = SLI4_OPC_COMMON_SET_DUMP_LOCATION; set_dump_loc->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; set_dump_loc->hdr.request_length = sizeof(sli4_req_common_set_dump_location_t) - sizeof(sli4_req_hdr_t); set_dump_loc->hdr.timeout = 0; set_dump_loc->hdr.version = 0; set_dump_loc->blp = is_buffer_list; set_dump_loc->qry = query; set_dump_loc->fdb = fdb; if (buffer) { set_dump_loc->buf_addr_low = ocs_addr32_lo(buffer->phys); set_dump_loc->buf_addr_high = ocs_addr32_hi(buffer->phys); set_dump_loc->buffer_length = buffer->len; } else { set_dump_loc->buf_addr_low = 0; set_dump_loc->buf_addr_high = 0; set_dump_loc->buffer_length = 0; } return(sli_config_off + sizeof(sli4_req_common_set_dump_location_t)); } /** * @ingroup sli * @brief Write a COMMON_SET_FEATURES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param feature Feature to set. * @param param_len Length of the parameter (must be a multiple of 4 bytes). * @param parameter Pointer to the parameter value. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_features(sli4_t *sli4, void *buf, size_t size, uint32_t feature, uint32_t param_len, void* parameter) { sli4_req_common_set_features_t *cmd = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_set_features_t), NULL); } cmd = (sli4_req_common_set_features_t *)((uint8_t *)buf + sli_config_off); cmd->hdr.opcode = SLI4_OPC_COMMON_SET_FEATURES; cmd->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; cmd->hdr.request_length = sizeof(sli4_req_common_set_features_t) - sizeof(sli4_req_hdr_t); cmd->hdr.timeout = 0; cmd->hdr.version = 0; cmd->feature = feature; cmd->param_len = param_len; ocs_memcpy(cmd->params, parameter, param_len); return(sli_config_off + sizeof(sli4_req_common_set_features_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_GET_PROFILE_CONFIG command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param dma DMA capable memory used to retrieve profile. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_common_get_profile_config_t *req = NULL; uint32_t sli_config_off = 0; uint32_t payload_size; if (SLI4_PORT_TYPE_FC == sli4->port_type) { sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_get_profile_config_t), dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_get_profile_config_t *)((uint8_t *)buf + sli_config_off); payload_size = sizeof(sli4_req_common_get_profile_config_t); } req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_CONFIG; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 1; return(sli_config_off + sizeof(sli4_req_common_get_profile_config_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_SET_PROFILE_CONFIG command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma DMA capable memory containing profile. * @param profile_id Profile ID to configure. * @param descriptor_count Number of descriptors in DMA buffer. * @param isap Implicit Set Active Profile value to use. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint8_t profile_id, uint32_t descriptor_count, uint8_t isap) { sli4_req_common_set_profile_config_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_set_profile_config_t), dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_set_profile_config_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_set_profile_config_t); } req->hdr.opcode = SLI4_OPC_COMMON_SET_PROFILE_CONFIG; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 1; req->profile_id = profile_id; req->desc_count = descriptor_count; req->isap = isap; return(cmd_off + sizeof(sli4_req_common_set_profile_config_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_GET_PROFILE_LIST command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param start_profile_index First profile index to return. * @param dma Buffer into which the list will be written. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size, uint32_t start_profile_index, ocs_dma_t *dma) { sli4_req_common_get_profile_list_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, sizeof (sli4_req_common_get_profile_list_t), dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_get_profile_list_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_get_profile_list_t); } req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_LIST; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; req->start_profile_index = start_profile_index; return(cmd_off + sizeof(sli4_req_common_get_profile_list_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_GET_ACTIVE_PROFILE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size) { sli4_req_common_get_active_profile_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_get_active_profile_t), sizeof(sli4_res_common_get_active_profile_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } req = (sli4_req_common_get_active_profile_t *) ((uint8_t*)buf + cmd_off); req->hdr.opcode = SLI4_OPC_COMMON_GET_ACTIVE_PROFILE; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; return(cmd_off + sizeof(sli4_req_common_get_active_profile_t)); } /** * @ingroup sli * @brief Write a COMMON_COMMON_SET_ACTIVE_PROFILE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param fd If non-zero, set profile to factory default. * @param active_profile_id ID of new active profile. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size, uint32_t fd, uint32_t active_profile_id) { sli4_req_common_set_active_profile_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_set_active_profile_t), sizeof(sli4_res_common_set_active_profile_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } req = (sli4_req_common_set_active_profile_t *) ((uint8_t*)buf + cmd_off); req->hdr.opcode = SLI4_OPC_COMMON_SET_ACTIVE_PROFILE; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; req->fd = fd; req->active_profile_id = active_profile_id; return(cmd_off + sizeof(sli4_req_common_set_active_profile_t)); } /** * @ingroup sli * @brief Write a COMMON_GET_RECONFIG_LINK_INFO command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size in bytes. * @param dma Buffer to store the supported link configuration modes from the physical device. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) { sli4_req_common_get_reconfig_link_info_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_get_reconfig_link_info_t), sizeof(sli4_res_common_get_reconfig_link_info_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_get_reconfig_link_info_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_get_reconfig_link_info_t); } req->hdr.opcode = SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; return(cmd_off + sizeof(sli4_req_common_get_reconfig_link_info_t)); } /** * @ingroup sli * @brief Write a COMMON_SET_RECONFIG_LINK_ID command. * * @param sli4 SLI context. * @param buf destination buffer for the command. * @param size buffer size in bytes. * @param fd If non-zero, set link config to factory default. * @param active_link_config_id ID of new active profile. * @param dma Buffer to assign the link configuration mode that is to become active from the physical device. * * @return Returns the number of bytes written. */ int32_t sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint32_t fd, uint32_t active_link_config_id) { sli4_req_common_set_reconfig_link_id_t *req = NULL; uint32_t cmd_off = 0; uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_common_set_reconfig_link_id_t), sizeof(sli4_res_common_set_reconfig_link_id_t)); if (SLI4_PORT_TYPE_FC == sli4->port_type) { cmd_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } if (dma != NULL) { req = dma->virt; ocs_memset(req, 0, dma->size); payload_size = dma->size; } else { req = (sli4_req_common_set_reconfig_link_id_t *)((uint8_t *)buf + cmd_off); payload_size = sizeof(sli4_req_common_set_reconfig_link_id_t); } req->hdr.opcode = SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID; req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); req->hdr.version = 0; req->fd = fd; req->next_link_config_id = active_link_config_id; return(cmd_off + sizeof(sli4_req_common_set_reconfig_link_id_t)); } /** * @ingroup sli * @brief Check the mailbox/queue completion entry. * * @param buf Pointer to the MCQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_cqe_mq(void *buf) { sli4_mcqe_t *mcqe = buf; /* * Firmware can split mbx completions into two MCQEs: first with only * the "consumed" bit set and a second with the "complete" bit set. * Thus, ignore MCQE unless "complete" is set. */ if (!mcqe->cmp) { return -2; } if (mcqe->completion_status) { ocs_log_debug(NULL, "bad status (cmpl=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n", mcqe->completion_status, mcqe->extended_status, mcqe->con, mcqe->cmp, mcqe->ae, mcqe->val); } return mcqe->completion_status; } /** * @ingroup sli * @brief Check the asynchronous event completion entry. * * @param sli4 SLI context. * @param buf Pointer to the ACQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_cqe_async(sli4_t *sli4, void *buf) { sli4_acqe_t *acqe = buf; int32_t rc = -1; if (!sli4 || !buf) { ocs_log_err(NULL, "bad parameter sli4=%p buf=%p\n", sli4, buf); return -1; } switch (acqe->event_code) { case SLI4_ACQE_EVENT_CODE_LINK_STATE: rc = sli_fc_process_link_state(sli4, buf); break; case SLI4_ACQE_EVENT_CODE_FCOE_FIP: rc = sli_fc_process_fcoe(sli4, buf); break; case SLI4_ACQE_EVENT_CODE_GRP_5: /*TODO*/ocs_log_debug(sli4->os, "ACQE GRP5\n"); break; case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT: ocs_log_debug(sli4->os,"ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n", acqe->event_type, acqe->event_data[0], acqe->event_data[1]); #if defined(OCS_INCLUDE_DEBUG) ocs_dump32(OCS_DEBUG_ALWAYS, sli4->os, "acq", acqe, sizeof(*acqe)); #endif break; case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT: rc = sli_fc_process_link_attention(sli4, buf); break; default: /*TODO*/ocs_log_test(sli4->os, "ACQE unknown=%#x\n", acqe->event_code); } return rc; } /** * @brief Check the SLI_CONFIG response. * * @par Description * Function checks the SLI_CONFIG response and the payload status. * * @param buf Pointer to SLI_CONFIG response. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_res_sli_config(void *buf) { sli4_cmd_sli_config_t *sli_config = buf; if (!buf || (SLI4_MBOX_COMMAND_SLI_CONFIG != sli_config->hdr.command)) { ocs_log_err(NULL, "bad parameter buf=%p cmd=%#x\n", buf, buf ? sli_config->hdr.command : -1); return -1; } if (sli_config->hdr.status) { return sli_config->hdr.status; } if (sli_config->emb) { return sli_config->payload.embed[4]; } else { ocs_log_test(NULL, "external buffers not supported\n"); return -1; } } /** * @brief Issue a COMMON_FUNCTION_RESET command. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_common_function_reset(sli4_t *sli4) { if (sli_cmd_common_function_reset(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COM_FUNC_RESET)\n"); return -1; } if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status COM_FUNC_RESET\n"); return -1; } } else { ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); return -1; } return 0; } /** * @brief check to see if the FW is ready. * * @par Description * Based on SLI-4 Architecture Specification, Revision 4.x0-13 (2012).. * * @param sli4 SLI context. * @param timeout_ms Time, in milliseconds, to wait for the port to be ready * before failing. * * @return Returns TRUE for ready, or FALSE otherwise. */ static int32_t sli_wait_for_fw_ready(sli4_t *sli4, uint32_t timeout_ms) { uint32_t iter = timeout_ms / (SLI4_INIT_PORT_DELAY_US / 1000); uint32_t ready = FALSE; do { iter--; ocs_udelay(SLI4_INIT_PORT_DELAY_US); if (sli_fw_ready(sli4) == 1) { ready = TRUE; } } while (!ready && (iter > 0)); return ready; } /** * @brief Initialize the firmware. * * @par Description * Based on SLI-4 Architecture Specification, Revision 4.x0-13 (2012).. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_fw_init(sli4_t *sli4) { uint32_t ready; uint32_t endian; /* * Is firmware ready for operation? */ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!ready) { ocs_log_crit(sli4->os, "FW status is NOT ready\n"); return -1; } /* * Reset port to a known state */ switch (sli4->if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: case SLI4_IF_TYPE_BE3_SKH_VF: /* No SLIPORT_CONTROL register so use command sequence instead */ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n"); return -1; } if (sli_cmd_fw_initialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_INIT)\n"); return -1; } } else { ocs_log_crit(sli4->os, "bad FW_INIT write\n"); return -1; } if (sli_common_function_reset(sli4)) { ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); return -1; } break; case SLI4_IF_TYPE_LANCER_FC_ETH: case SLI4_IF_TYPE_LANCER_G7: #if BYTE_ORDER == LITTLE_ENDIAN endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN; #else endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN; #endif if (sli_sliport_control(sli4, endian)) return -1; break; default: ocs_log_test(sli4->os, "if_type %d not supported\n", sli4->if_type); return -1; } return 0; } /** * @brief Terminate the firmware. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_fw_term(sli4_t *sli4) { uint32_t endian; if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF || sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF) { /* No SLIPORT_CONTROL register so use command sequence instead */ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n"); return -1; } if (sli_common_function_reset(sli4)) { ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); return -1; } if (sli_cmd_fw_deinitialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_DEINIT)\n"); return -1; } } else { ocs_log_test(sli4->os, "bad FW_DEINIT write\n"); return -1; } } else { #if BYTE_ORDER == LITTLE_ENDIAN endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN; #else endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN; #endif /* type 2 etc. use SLIPORT_CONTROL to initialize port */ sli_sliport_control(sli4, endian); } return 0; } /** * @brief Write the doorbell register associated with the queue object. * * @param sli4 SLI context. * @param q Queue object. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_queue_doorbell(sli4_t *sli4, sli4_queue_t *q) { uint32_t val = 0; switch (q->type) { case SLI_QTYPE_EQ: if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) val = sli_iftype6_eq_doorbell(q->n_posted, q->id, FALSE); else val = sli_eq_doorbell(q->n_posted, q->id, FALSE); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; case SLI_QTYPE_CQ: if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) val = sli_iftype6_cq_doorbell(q->n_posted, q->id, FALSE); else val = sli_cq_doorbell(q->n_posted, q->id, FALSE); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; case SLI_QTYPE_MQ: val = SLI4_MQ_DOORBELL(q->n_posted, q->id); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; case SLI_QTYPE_RQ: { uint32_t n_posted = q->n_posted; /* * FC/FCoE has different rules for Receive Queues. The host * should only update the doorbell of the RQ-pair containing * the headers since the header / payload RQs are treated * as a matched unit. */ if (SLI4_PORT_TYPE_FC == sli4->port_type) { /* * In RQ-pair, an RQ either contains the FC header * (i.e. is_hdr == TRUE) or the payload. * * Don't ring doorbell for payload RQ */ if (!q->u.flag.is_hdr) { break; } /* * Some RQ cannot be incremented one entry at a time. Instead, * the driver collects a number of entries and updates the * RQ in batches. */ if (q->u.flag.rq_batch) { if (((q->index + q->n_posted) % SLI4_QUEUE_RQ_BATCH)) { break; } n_posted = SLI4_QUEUE_RQ_BATCH; } } val = SLI4_RQ_DOORBELL(n_posted, q->id); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; } case SLI_QTYPE_WQ: if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) { val = SLI4_WQ_DOORBELL(q->n_posted, 0, q->id); } else { /* For iftype = 2 and 3, q->index value is ignored */ val = SLI4_WQ_DOORBELL(q->n_posted, q->index, q->id); } ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); break; default: ocs_log_test(sli4->os, "bad queue type %d\n", q->type); return -1; } return 0; } static int32_t sli_request_features(sli4_t *sli4, sli4_features_t *features, uint8_t query) { if (sli_cmd_request_features(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, *features, query)) { sli4_cmd_request_features_t *req_features = sli4->bmbx.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (REQUEST_FEATURES)\n"); return -1; } if (req_features->hdr.status) { ocs_log_err(sli4->os, "REQUEST_FEATURES bad status %#x\n", req_features->hdr.status); return -1; } features->dword = req_features->response.dword; } else { ocs_log_err(sli4->os, "bad REQUEST_FEATURES write\n"); return -1; } return 0; } /** * @brief Calculate max queue entries. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ void sli_calc_max_qentries(sli4_t *sli4) { sli4_qtype_e q; uint32_t alloc_size, qentries, qentry_size; for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { sli4->config.max_qentries[q] = sli_convert_mask_to_count(sli4->config.count_method[q], sli4->config.count_mask[q]); } /* single, continguous DMA allocations will be called for each queue * of size (max_qentries * queue entry size); since these can be large, * check against the OS max DMA allocation size */ for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { qentries = sli4->config.max_qentries[q]; qentry_size = sli_get_queue_entry_size(sli4, q); alloc_size = qentries * qentry_size; if (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) { while (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) { /* cut the qentries in hwf until alloc_size <= max DMA alloc size */ qentries >>= 1; alloc_size = qentries * qentry_size; } ocs_log_debug(sli4->os, "[%s]: max_qentries from %d to %d (max dma %d)\n", SLI_QNAME[q], sli4->config.max_qentries[q], qentries, ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)); sli4->config.max_qentries[q] = qentries; } } } /** * @brief Issue a FW_CONFIG mailbox command and store the results. * * @param sli4 SLI context. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t sli_query_fw_config(sli4_t *sli4) { /* * Read the device configuration * * Note: Only ulp0 fields contain values */ if (sli_cmd_common_query_fw_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_common_query_fw_config_t *fw_config = (sli4_res_common_query_fw_config_t *) (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (QUERY_FW_CONFIG)\n"); return -1; } if (fw_config->hdr.status) { ocs_log_err(sli4->os, "COMMON_QUERY_FW_CONFIG bad status %#x\n", fw_config->hdr.status); return -1; } sli4->physical_port = fw_config->physical_port; sli4->config.dual_ulp_capable = ((fw_config->function_mode & SLI4_FUNCTION_MODE_DUA_MODE) == 0 ? 0 : 1); sli4->config.is_ulp_fc[0] = ((fw_config->ulp0_mode & (SLI4_ULP_MODE_FCOE_INI | SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1); sli4->config.is_ulp_fc[1] = ((fw_config->ulp1_mode & (SLI4_ULP_MODE_FCOE_INI | SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1); if (sli4->config.dual_ulp_capable) { /* * Lancer will not support this, so we use the values * from the READ_CONFIG. */ if (sli4->config.is_ulp_fc[0] && sli4->config.is_ulp_fc[1]) { sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total + fw_config->ulp1_toe_wq_total; sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total + fw_config->ulp1_toe_defrq_total; } else if (sli4->config.is_ulp_fc[0]) { sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total; sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total; } else { sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp1_toe_wq_total; sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp1_toe_defrq_total; } } } else { ocs_log_err(sli4->os, "bad QUERY_FW_CONFIG write\n"); return -1; } return 0; } static int32_t sli_get_config(sli4_t *sli4) { ocs_dma_t get_cntl_addl_data; /* * Read the device configuration */ if (sli_cmd_read_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_read_config_t *read_config = sli4->bmbx.virt; uint32_t i; uint32_t total; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_CONFIG)\n"); return -1; } if (read_config->hdr.status) { ocs_log_err(sli4->os, "READ_CONFIG bad status %#x\n", read_config->hdr.status); return -1; } sli4->config.has_extents = read_config->ext; if (FALSE == sli4->config.has_extents) { uint32_t i = 0; uint32_t *base = sli4->config.extent[0].base; if (!base) { if (NULL == (base = ocs_malloc(sli4->os, SLI_RSRC_MAX * sizeof(uint32_t), OCS_M_ZERO | OCS_M_NOWAIT))) { ocs_log_err(sli4->os, "memory allocation failed for sli4_resource_t\n"); return -1; } } for (i = 0; i < SLI_RSRC_MAX; i++) { sli4->config.extent[i].number = 1; sli4->config.extent[i].n_alloc = 0; sli4->config.extent[i].base = &base[i]; } sli4->config.extent[SLI_RSRC_FCOE_VFI].base[0] = read_config->vfi_base; sli4->config.extent[SLI_RSRC_FCOE_VFI].size = read_config->vfi_count; sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] = read_config->vpi_base; sli4->config.extent[SLI_RSRC_FCOE_VPI].size = read_config->vpi_count; sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0] = read_config->rpi_base; sli4->config.extent[SLI_RSRC_FCOE_RPI].size = read_config->rpi_count; sli4->config.extent[SLI_RSRC_FCOE_XRI].base[0] = read_config->xri_base; sli4->config.extent[SLI_RSRC_FCOE_XRI].size = OCS_MIN(255,read_config->xri_count); sli4->config.extent[SLI_RSRC_FCOE_FCFI].base[0] = 0; sli4->config.extent[SLI_RSRC_FCOE_FCFI].size = read_config->fcfi_count; } else { /* TODO extents*/ ; } for (i = 0; i < SLI_RSRC_MAX; i++) { total = sli4->config.extent[i].number * sli4->config.extent[i].size; sli4->config.extent[i].use_map = ocs_bitmap_alloc(total); if (NULL == sli4->config.extent[i].use_map) { ocs_log_err(sli4->os, "bitmap memory allocation failed " "resource %d\n", i); return -1; } sli4->config.extent[i].map_size = total; } sli4->config.topology = read_config->topology; + sli4->config.ptv = read_config->ptv; + if (sli4->config.ptv){ + sli4->config.tf = read_config->tf; + sli4->config.pt = read_config->pt; + } + ocs_log_info(sli4->os, "Persistent Topology: PTV: %d, TF: %d, PT: %d \n", + sli4->config.topology, sli4->config.ptv, sli4->config.tf, sli4->config.pt); + switch (sli4->config.topology) { case SLI4_READ_CFG_TOPO_FCOE: ocs_log_debug(sli4->os, "FCoE\n"); break; case SLI4_READ_CFG_TOPO_FC: ocs_log_debug(sli4->os, "FC (unknown)\n"); break; case SLI4_READ_CFG_TOPO_FC_DA: ocs_log_debug(sli4->os, "FC (direct attach)\n"); break; case SLI4_READ_CFG_TOPO_FC_AL: ocs_log_debug(sli4->os, "FC (arbitrated loop)\n"); break; default: ocs_log_test(sli4->os, "bad topology %#x\n", sli4->config.topology); } sli4->config.e_d_tov = read_config->e_d_tov; sli4->config.r_a_tov = read_config->r_a_tov; sli4->config.link_module_type = read_config->lmt; sli4->config.max_qcount[SLI_QTYPE_EQ] = read_config->eq_count; sli4->config.max_qcount[SLI_QTYPE_CQ] = read_config->cq_count; sli4->config.max_qcount[SLI_QTYPE_WQ] = read_config->wq_count; sli4->config.max_qcount[SLI_QTYPE_RQ] = read_config->rq_count; /* * READ_CONFIG doesn't give the max number of MQ. Applications * will typically want 1, but we may need another at some future * date. Dummy up a "max" MQ count here. */ sli4->config.max_qcount[SLI_QTYPE_MQ] = SLI_USER_MQ_COUNT; } else { ocs_log_err(sli4->os, "bad READ_CONFIG write\n"); return -1; } if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_common_get_sli4_parameters_t *parms = (sli4_res_common_get_sli4_parameters_t *) (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_SLI4_PARAMETERS)\n"); return -1; } else if (parms->hdr.status) { ocs_log_err(sli4->os, "COMMON_GET_SLI4_PARAMETERS bad status %#x att'l %#x\n", parms->hdr.status, parms->hdr.additional_status); return -1; } sli4->config.auto_reg = parms->areg; sli4->config.auto_xfer_rdy = parms->agxf; sli4->config.hdr_template_req = parms->hdrr; sli4->config.t10_dif_inline_capable = parms->timm; sli4->config.t10_dif_separate_capable = parms->tsmm; sli4->config.mq_create_version = parms->mqv; sli4->config.cq_create_version = parms->cqv; sli4->config.rq_min_buf_size = parms->min_rq_buffer_size; sli4->config.rq_max_buf_size = parms->max_rq_buffer_size; sli4->config.qpage_count[SLI_QTYPE_EQ] = parms->eq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_CQ] = parms->cq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_MQ] = parms->mq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_WQ] = parms->wq_page_cnt; sli4->config.qpage_count[SLI_QTYPE_RQ] = parms->rq_page_cnt; /* save count methods and masks for each queue type */ sli4->config.count_mask[SLI_QTYPE_EQ] = parms->eqe_count_mask; sli4->config.count_method[SLI_QTYPE_EQ] = parms->eqe_count_method; sli4->config.count_mask[SLI_QTYPE_CQ] = parms->cqe_count_mask; sli4->config.count_method[SLI_QTYPE_CQ] = parms->cqe_count_method; sli4->config.count_mask[SLI_QTYPE_MQ] = parms->mqe_count_mask; sli4->config.count_method[SLI_QTYPE_MQ] = parms->mqe_count_method; sli4->config.count_mask[SLI_QTYPE_WQ] = parms->wqe_count_mask; sli4->config.count_method[SLI_QTYPE_WQ] = parms->wqe_count_method; sli4->config.count_mask[SLI_QTYPE_RQ] = parms->rqe_count_mask; sli4->config.count_method[SLI_QTYPE_RQ] = parms->rqe_count_method; /* now calculate max queue entries */ sli_calc_max_qentries(sli4); sli4->config.max_sgl_pages = parms->sgl_page_cnt; /* max # of pages */ sli4->config.sgl_page_sizes = parms->sgl_page_sizes; /* bit map of available sizes */ /* ignore HLM here. Use value from REQUEST_FEATURES */ sli4->config.sge_supported_length = parms->sge_supported_length; if (sli4->config.sge_supported_length > OCS_MAX_SGE_SIZE) sli4->config.sge_supported_length = OCS_MAX_SGE_SIZE; sli4->config.sgl_pre_registration_required = parms->sglr; /* default to using pre-registered SGL's */ sli4->config.sgl_pre_registered = TRUE; sli4->config.perf_hint = parms->phon; sli4->config.perf_wq_id_association = parms->phwq; sli4->config.rq_batch = parms->rq_db_window; /* save the fields for skyhawk SGL chaining */ sli4->config.sgl_chaining_params.chaining_capable = (parms->sglc == 1); sli4->config.sgl_chaining_params.frag_num_field_offset = parms->frag_num_field_offset; sli4->config.sgl_chaining_params.frag_num_field_mask = (1ull << parms->frag_num_field_size) - 1; sli4->config.sgl_chaining_params.sgl_index_field_offset = parms->sgl_index_field_offset; sli4->config.sgl_chaining_params.sgl_index_field_mask = (1ull << parms->sgl_index_field_size) - 1; sli4->config.sgl_chaining_params.chain_sge_initial_value_lo = parms->chain_sge_initial_value_lo; sli4->config.sgl_chaining_params.chain_sge_initial_value_hi = parms->chain_sge_initial_value_hi; /* Use the highest available WQE size. */ if (parms->wqe_sizes & SLI4_128BYTE_WQE_SUPPORT) { sli4->config.wqe_size = SLI4_WQE_EXT_BYTES; } else { sli4->config.wqe_size = SLI4_WQE_BYTES; } } if (sli_query_fw_config(sli4)) { ocs_log_err(sli4->os, "Error sending QUERY_FW_CONFIG\n"); return -1; } sli4->config.port_number = 0; /* * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily * uses VPD DMA buffer as the response won't fit in the embedded * buffer. */ if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) { sli4_res_common_get_cntl_attributes_t *attr = sli4->vpd.data.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ATTRIBUTES)\n"); return -1; } else if (attr->hdr.status) { ocs_log_err(sli4->os, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x att'l %#x\n", attr->hdr.status, attr->hdr.additional_status); return -1; } sli4->config.port_number = attr->port_number; ocs_memcpy(sli4->config.bios_version_string, attr->bios_version_string, sizeof(sli4->config.bios_version_string)); } else { ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ATTRIBUTES write\n"); return -1; } if (ocs_dma_alloc(sli4->os, &get_cntl_addl_data, sizeof(sli4_res_common_get_cntl_addl_attributes_t), OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(sli4->os, "Failed to allocate memory for GET_CNTL_ADDL_ATTR data\n"); } else { if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &get_cntl_addl_data)) { sli4_res_common_get_cntl_addl_attributes_t *attr = get_cntl_addl_data.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ADDL_ATTRIBUTES)\n"); ocs_dma_free(sli4->os, &get_cntl_addl_data); return -1; } if (attr->hdr.status) { ocs_log_err(sli4->os, "COMMON_GET_CNTL_ADDL_ATTRIBUTES bad status %#x\n", attr->hdr.status); ocs_dma_free(sli4->os, &get_cntl_addl_data); return -1; } ocs_memcpy(sli4->config.ipl_name, attr->ipl_file_name, sizeof(sli4->config.ipl_name)); ocs_log_debug(sli4->os, "IPL:%s \n", (char*)sli4->config.ipl_name); } else { ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ADDL_ATTRIBUTES write\n"); ocs_dma_free(sli4->os, &get_cntl_addl_data); return -1; } ocs_dma_free(sli4->os, &get_cntl_addl_data); } if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_res_common_get_port_name_t *port_name = (sli4_res_common_get_port_name_t *)(((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_PORT_NAME)\n"); return -1; } sli4->config.port_name[0] = port_name->port_name[sli4->config.port_number]; } sli4->config.port_name[1] = '\0'; if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) { sli4_cmd_read_rev_t *read_rev = sli4->bmbx.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_REV)\n"); return -1; } if (read_rev->hdr.status) { ocs_log_err(sli4->os, "READ_REV bad status %#x\n", read_rev->hdr.status); return -1; } sli4->config.fw_rev[0] = read_rev->first_fw_id; ocs_memcpy(sli4->config.fw_name[0],read_rev->first_fw_name, sizeof(sli4->config.fw_name[0])); sli4->config.fw_rev[1] = read_rev->second_fw_id; ocs_memcpy(sli4->config.fw_name[1],read_rev->second_fw_name, sizeof(sli4->config.fw_name[1])); sli4->config.hw_rev[0] = read_rev->first_hw_revision; sli4->config.hw_rev[1] = read_rev->second_hw_revision; sli4->config.hw_rev[2] = read_rev->third_hw_revision; ocs_log_debug(sli4->os, "FW1:%s (%08x) / FW2:%s (%08x)\n", read_rev->first_fw_name, read_rev->first_fw_id, read_rev->second_fw_name, read_rev->second_fw_id); ocs_log_debug(sli4->os, "HW1: %08x / HW2: %08x\n", read_rev->first_hw_revision, read_rev->second_hw_revision); /* Check that all VPD data was returned */ if (read_rev->returned_vpd_length != read_rev->actual_vpd_length) { ocs_log_test(sli4->os, "VPD length: available=%d returned=%d actual=%d\n", read_rev->available_length, read_rev->returned_vpd_length, read_rev->actual_vpd_length); } sli4->vpd.length = read_rev->returned_vpd_length; } else { ocs_log_err(sli4->os, "bad READ_REV write\n"); return -1; } if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { sli4_cmd_read_nvparms_t *read_nvparms = sli4->bmbx.virt; if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_NVPARMS)\n"); return -1; } if (read_nvparms->hdr.status) { ocs_log_err(sli4->os, "READ_NVPARMS bad status %#x\n", read_nvparms->hdr.status); return -1; } ocs_memcpy(sli4->config.wwpn, read_nvparms->wwpn, sizeof(sli4->config.wwpn)); ocs_memcpy(sli4->config.wwnn, read_nvparms->wwnn, sizeof(sli4->config.wwnn)); ocs_log_debug(sli4->os, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", sli4->config.wwpn[0], sli4->config.wwpn[1], sli4->config.wwpn[2], sli4->config.wwpn[3], sli4->config.wwpn[4], sli4->config.wwpn[5], sli4->config.wwpn[6], sli4->config.wwpn[7]); ocs_log_debug(sli4->os, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", sli4->config.wwnn[0], sli4->config.wwnn[1], sli4->config.wwnn[2], sli4->config.wwnn[3], sli4->config.wwnn[4], sli4->config.wwnn[5], sli4->config.wwnn[6], sli4->config.wwnn[7]); } else { ocs_log_err(sli4->os, "bad READ_NVPARMS write\n"); return -1; } return 0; } /**************************************************************************** * Public functions */ /** * @ingroup sli * @brief Set up the SLI context. * * @param sli4 SLI context. * @param os Device abstraction. * @param port_type Protocol type of port (for example, FC and NIC). * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_setup(sli4_t *sli4, ocs_os_handle_t os, sli4_port_type_e port_type) { uint32_t sli_intf = UINT32_MAX; uint32_t pci_class_rev = 0; uint32_t rev_id = 0; uint32_t family = 0; uint32_t i; sli4_asic_entry_t *asic; ocs_memset(sli4, 0, sizeof(sli4_t)); sli4->os = os; sli4->port_type = port_type; /* * Read the SLI_INTF register to discover the register layout * and other capability information */ sli_intf = ocs_config_read32(os, SLI4_INTF_REG); if (sli_intf_valid_check(sli_intf)) { ocs_log_err(os, "SLI_INTF is not valid\n"); return -1; } /* driver only support SLI-4 */ sli4->sli_rev = sli_intf_sli_revision(sli_intf); if (4 != sli4->sli_rev) { ocs_log_err(os, "Unsupported SLI revision (intf=%#x)\n", sli_intf); return -1; } sli4->sli_family = sli_intf_sli_family(sli_intf); sli4->if_type = sli_intf_if_type(sli_intf); if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) || (SLI4_IF_TYPE_LANCER_G7 == sli4->if_type)) { ocs_log_debug(os, "status=%#x error1=%#x error2=%#x\n", sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1), sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2)); } /* * set the ASIC type and revision */ pci_class_rev = ocs_config_read32(os, SLI4_PCI_CLASS_REVISION); rev_id = sli_pci_rev_id(pci_class_rev); family = sli4->sli_family; if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) { uint32_t asic_id = ocs_config_read32(os, SLI4_ASIC_ID_REG); family = sli_asic_gen(asic_id); } for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) { if ((rev_id == asic->rev_id) && (family == asic->family)) { sli4->asic_type = asic->type; sli4->asic_rev = asic->rev; break; } } /* Fail if no matching asic type/rev was found */ if( (sli4->asic_type == 0) || (sli4->asic_rev == 0)) { ocs_log_err(os, "no matching asic family/rev found: %02x/%02x\n", family, rev_id); return -1; } /* * The bootstrap mailbox is equivalent to a MQ with a single 256 byte * entry, a CQ with a single 16 byte entry, and no event queue. * Alignment must be 16 bytes as the low order address bits in the * address register are also control / status. */ if (ocs_dma_alloc(sli4->os, &sli4->bmbx, SLI4_BMBX_SIZE + sizeof(sli4_mcqe_t), 16)) { ocs_log_err(os, "bootstrap mailbox allocation failed\n"); return -1; } if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) { ocs_log_err(os, "bad alignment for bootstrap mailbox\n"); return -1; } ocs_log_debug(os, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt, ocs_addr32_hi(sli4->bmbx.phys), ocs_addr32_lo(sli4->bmbx.phys), sli4->bmbx.size); /* TODO 4096 is arbitrary. What should this value actually be? */ if (ocs_dma_alloc(sli4->os, &sli4->vpd.data, 4096/*TODO*/, 4096)) { /* Note that failure isn't fatal in this specific case */ sli4->vpd.data.size = 0; ocs_log_test(os, "VPD buffer allocation failed\n"); } if (sli_fw_init(sli4)) { ocs_log_err(sli4->os, "FW initialization failed\n"); return -1; } /* * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true * in addition to any other desired features */ sli4->config.features.flag.iaab = TRUE; sli4->config.features.flag.npiv = TRUE; sli4->config.features.flag.dif = TRUE; sli4->config.features.flag.vf = TRUE; sli4->config.features.flag.fcpc = TRUE; sli4->config.features.flag.iaar = TRUE; sli4->config.features.flag.hlm = TRUE; sli4->config.features.flag.perfh = TRUE; sli4->config.features.flag.rxseq = TRUE; sli4->config.features.flag.rxri = TRUE; sli4->config.features.flag.mrqp = TRUE; /* use performance hints if available */ if (sli4->config.perf_hint) { sli4->config.features.flag.perfh = TRUE; } if (sli_request_features(sli4, &sli4->config.features, TRUE)) { return -1; } if (sli_get_config(sli4)) { return -1; } return 0; } +bool +sli_persist_topology_enabled(sli4_t *sli4) +{ + return (sli4->config.ptv); +} + int32_t sli_init(sli4_t *sli4) { if (sli4->config.has_extents) { /* TODO COMMON_ALLOC_RESOURCE_EXTENTS */; ocs_log_test(sli4->os, "XXX need to implement extent allocation\n"); return -1; } sli4->config.features.flag.hlm = sli4->config.high_login_mode; sli4->config.features.flag.rxseq = FALSE; sli4->config.features.flag.rxri = FALSE; if (sli_request_features(sli4, &sli4->config.features, FALSE)) { return -1; } return 0; } int32_t sli_reset(sli4_t *sli4) { uint32_t i; if (sli_fw_init(sli4)) { ocs_log_crit(sli4->os, "FW initialization failed\n"); return -1; } if (sli4->config.extent[0].base) { ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t)); sli4->config.extent[0].base = NULL; } for (i = 0; i < SLI_RSRC_MAX; i++) { if (sli4->config.extent[i].use_map) { ocs_bitmap_free(sli4->config.extent[i].use_map); sli4->config.extent[i].use_map = NULL; } sli4->config.extent[i].base = NULL; } if (sli_get_config(sli4)) { return -1; } return 0; } /** * @ingroup sli * @brief Issue a Firmware Reset. * * @par Description * Issues a Firmware Reset to the chip. This reset affects the entire chip, * so all PCI function on the same PCI bus and device are affected. * @n @n This type of reset can be used to activate newly downloaded firmware. * @n @n The driver should be considered to be in an unknown state after this * reset and should be reloaded. * * @param sli4 SLI context. * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_fw_reset(sli4_t *sli4) { uint32_t val; uint32_t ready; /* * Firmware must be ready before issuing the reset. */ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!ready) { ocs_log_crit(sli4->os, "FW status is NOT ready\n"); return -1; } switch(sli4->if_type) { case SLI4_IF_TYPE_BE3_SKH_PF: /* BE3 / Skyhawk use PCICFG_SOFT_RESET_CSR */ val = ocs_config_read32(sli4->os, SLI4_PCI_SOFT_RESET_CSR); val |= SLI4_PCI_SOFT_RESET_MASK; ocs_config_write32(sli4->os, SLI4_PCI_SOFT_RESET_CSR, val); break; case SLI4_IF_TYPE_LANCER_FC_ETH: /* Lancer uses PHYDEV_CONTROL */ val = SLI4_PHYDEV_CONTROL_FRST; sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, val); break; default: ocs_log_test(sli4->os, "Unexpected iftype %d\n", sli4->if_type); return -1; break; } /* wait for the FW to become ready after the reset */ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!ready) { ocs_log_crit(sli4->os, "Failed to become ready after firmware reset\n"); return -1; } return 0; } /** * @ingroup sli * @brief Tear down a SLI context. * * @param sli4 SLI context. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_teardown(sli4_t *sli4) { uint32_t i; if (sli4->config.extent[0].base) { ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t)); sli4->config.extent[0].base = NULL; } for (i = 0; i < SLI_RSRC_MAX; i++) { if (sli4->config.has_extents) { /* TODO COMMON_DEALLOC_RESOURCE_EXTENTS */; } sli4->config.extent[i].base = NULL; ocs_bitmap_free(sli4->config.extent[i].use_map); sli4->config.extent[i].use_map = NULL; } if (sli_fw_term(sli4)) { ocs_log_err(sli4->os, "FW deinitialization failed\n"); } ocs_dma_free(sli4->os, &sli4->vpd.data); ocs_dma_free(sli4->os, &sli4->bmbx); return 0; } /** * @ingroup sli * @brief Register a callback for the given event. * * @param sli4 SLI context. * @param which Event of interest. * @param func Function to call when the event occurs. * @param arg Argument passed to the callback function. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_callback(sli4_t *sli4, sli4_callback_e which, void *func, void *arg) { if (!sli4 || !func || (which >= SLI4_CB_MAX)) { ocs_log_err(NULL, "bad parameter sli4=%p which=%#x func=%p\n", sli4, which, func); return -1; } switch (which) { case SLI4_CB_LINK: sli4->link = func; sli4->link_arg = arg; break; case SLI4_CB_FIP: sli4->fip = func; sli4->fip_arg = arg; break; default: ocs_log_test(sli4->os, "unknown callback %#x\n", which); return -1; } return 0; } /** * @ingroup sli * @brief Initialize a queue object. * * @par Description * This initializes the sli4_queue_t object members, including the underlying * DMA memory. * * @param sli4 SLI context. * @param q Pointer to queue object. * @param qtype Type of queue to create. * @param size Size of each entry. * @param n_entries Number of entries to allocate. * @param align Starting memory address alignment. * * @note Checks if using the existing DMA memory (if any) is possible. If not, * it frees the existing memory and re-allocates. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t __sli_queue_init(sli4_t *sli4, sli4_queue_t *q, uint32_t qtype, size_t size, uint32_t n_entries, uint32_t align) { if ((q->dma.virt == NULL) || (size != q->size) || (n_entries != q->length)) { if (q->dma.size) { ocs_dma_free(sli4->os, &q->dma); } ocs_memset(q, 0, sizeof(sli4_queue_t)); if (ocs_dma_alloc(sli4->os, &q->dma, size * n_entries, align)) { ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]); return -1; } ocs_memset(q->dma.virt, 0, size * n_entries); ocs_lock_init(sli4->os, &q->lock, "%s lock[%d:%p]", SLI_QNAME[qtype], ocs_instance(sli4->os), &q->lock); q->type = qtype; q->size = size; q->length = n_entries; /* Limit to hwf the queue size per interrupt */ q->proc_limit = n_entries / 2; if ( (q->type == SLI_QTYPE_EQ) || (q->type == SLI_QTYPE_CQ) ) { /* For prism, phase will be flipped after a sweep through eq and cq */ q->phase = 1; } switch(q->type) { case SLI_QTYPE_EQ: q->posted_limit = q->length / 2; break; default: if ((sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) || (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF)) { /* For Skyhawk, ring the doorbell more often */ q->posted_limit = 8; } else { q->posted_limit = 64; } break; } } return 0; } /** * @ingroup sli * @brief Issue the command to create a queue. * * @param sli4 SLI context. * @param q Pointer to queue object. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t __sli_create_queue(sli4_t *sli4, sli4_queue_t *q) { sli4_res_common_create_queue_t *res_q = NULL; if (sli_bmbx_command(sli4)){ ocs_log_crit(sli4->os, "bootstrap mailbox write fail %s\n", SLI_QNAME[q->type]); ocs_dma_free(sli4->os, &q->dma); return -1; } if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status create %s\n", SLI_QNAME[q->type]); ocs_dma_free(sli4->os, &q->dma); return -1; } res_q = (void *)((uint8_t *)sli4->bmbx.virt + offsetof(sli4_cmd_sli_config_t, payload)); if (res_q->hdr.status) { ocs_log_err(sli4->os, "bad create %s status=%#x addl=%#x\n", SLI_QNAME[q->type], res_q->hdr.status, res_q->hdr.additional_status); ocs_dma_free(sli4->os, &q->dma); return -1; } else { q->id = res_q->q_id; q->doorbell_offset = res_q->db_offset; q->doorbell_rset = res_q->db_rs; switch (q->type) { case SLI_QTYPE_EQ: /* No doorbell information in response for EQs */ q->doorbell_offset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].rset; break; case SLI_QTYPE_CQ: /* No doorbell information in response for CQs */ q->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset; break; case SLI_QTYPE_MQ: /* No doorbell information in response for MQs */ q->doorbell_offset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].rset; break; case SLI_QTYPE_RQ: /* set the doorbell for non-skyhawks */ if (!sli4->config.dual_ulp_capable) { q->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset; } break; case SLI_QTYPE_WQ: /* set the doorbell for non-skyhawks */ if (!sli4->config.dual_ulp_capable) { q->doorbell_offset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].off; q->doorbell_rset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].rset; } break; default: break; } } return 0; } /** * @ingroup sli * @brief Get queue entry size. * * Get queue entry size given queue type. * * @param sli4 SLI context * @param qtype Type for which the entry size is returned. * * @return Returns > 0 on success (queue entry size), or a negative value on failure. */ int32_t sli_get_queue_entry_size(sli4_t *sli4, uint32_t qtype) { uint32_t size = 0; if (!sli4) { ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4); return -1; } switch (qtype) { case SLI_QTYPE_EQ: size = sizeof(uint32_t); break; case SLI_QTYPE_CQ: size = 16; break; case SLI_QTYPE_MQ: size = 256; break; case SLI_QTYPE_WQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { size = sli4->config.wqe_size; } else { /* TODO */ ocs_log_test(sli4->os, "unsupported queue entry size\n"); return -1; } break; case SLI_QTYPE_RQ: size = SLI4_FCOE_RQE_SIZE; break; default: ocs_log_test(sli4->os, "unknown queue type %d\n", qtype); return -1; } return size; } /** * @ingroup sli * @brief Modify the delay timer for all the EQs * * @param sli4 SLI context. * @param eq Array of EQs. * @param num_eq Count of EQs. * @param shift Phase shift for staggering interrupts. * @param delay_mult Delay multiplier for limiting interrupt frequency. * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult) { sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, eq, num_eq, shift, delay_mult); if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n"); return -1; } if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status MODIFY EQ DELAY\n"); return -1; } return 0; } /** * @ingroup sli * @brief Allocate a queue. * * @par Description * Allocates DMA memory and configures the requested queue type. * * @param sli4 SLI context. * @param qtype Type of queue to create. * @param q Pointer to the queue object. * @param n_entries Number of entries to allocate. * @param assoc Associated queue (that is, the EQ for a CQ, the CQ for a MQ, and so on). * @param ulp The ULP to bind, which is only used for WQ and RQs * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_queue_alloc(sli4_t *sli4, uint32_t qtype, sli4_queue_t *q, uint32_t n_entries, sli4_queue_t *assoc, uint16_t ulp) { int32_t size; uint32_t align = 0; sli4_create_q_fn_t create = NULL; if (!sli4 || !q) { ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q); return -1; } /* get queue size */ size = sli_get_queue_entry_size(sli4, qtype); if (size < 0) return -1; align = SLI_PAGE_SIZE; switch (qtype) { case SLI_QTYPE_EQ: create = sli_cmd_common_create_eq; break; case SLI_QTYPE_CQ: create = sli_cmd_common_create_cq; break; case SLI_QTYPE_MQ: /* Validate the number of entries */ switch (n_entries) { case 16: case 32: case 64: case 128: break; default: ocs_log_test(sli4->os, "illegal n_entries value %d for MQ\n", n_entries); return -1; } assoc->u.flag.is_mq = TRUE; create = sli_cmd_common_create_mq_ext; break; case SLI_QTYPE_WQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) { create = sli_cmd_fcoe_wq_create; } else { create = sli_cmd_fcoe_wq_create_v1; } } else { /* TODO */ ocs_log_test(sli4->os, "unsupported WQ create\n"); return -1; } break; default: ocs_log_test(sli4->os, "unknown queue type %d\n", qtype); return -1; } if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) { ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]); return -1; } if (create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, assoc ? assoc->id : 0, ulp)) { if (__sli_create_queue(sli4, q)) { ocs_log_err(sli4->os, "create %s failed\n", SLI_QNAME[qtype]); return -1; } q->ulp = ulp; } else { ocs_log_err(sli4->os, "cannot create %s\n", SLI_QNAME[qtype]); return -1; } return 0; } /** * @ingroup sli * @brief Allocate a c queue set. * * @param sli4 SLI context. * @param num_cqs to create * @param qs Pointers to the queue objects. * @param n_entries Number of entries to allocate per CQ. * @param eqs Associated event queues * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs, uint32_t n_entries, sli4_queue_t *eqs[]) { uint32_t i, offset = 0, page_bytes = 0, payload_size, cmd_size = 0; uint32_t p = 0, page_size = 0, n_cqe = 0, num_pages_cq; uintptr_t addr; ocs_dma_t dma; sli4_req_common_create_cq_set_v0_t *req = NULL; sli4_res_common_create_queue_set_t *res = NULL; if (!sli4) { ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4); return -1; } memset(&dma, 0, sizeof(dma)); /* Align the queue DMA memory */ for (i = 0; i < num_cqs; i++) { if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES, n_entries, SLI_PAGE_SIZE)) { ocs_log_err(sli4->os, "Queue init failed.\n"); goto error; } } n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; switch (n_cqe) { case 256: case 512: case 1024: case 2048: page_size = 1; break; case 4096: page_size = 2; break; default: return -1; } page_bytes = page_size * SLI_PAGE_SIZE; num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); cmd_size = sizeof(sli4_req_common_create_cq_set_v0_t) + (8 * num_pages_cq * num_cqs); payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_set_t)); if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) { ocs_log_err(sli4->os, "DMA allocation failed\n"); goto error; } ocs_memset(dma.virt, 0, payload_size); if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, payload_size, &dma) == -1) { goto error; } /* Fill the request structure */ req = (sli4_req_common_create_cq_set_v0_t *)((uint8_t *)dma.virt); req->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ_SET; req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; req->hdr.version = 0; req->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); req->page_size = page_size; req->num_pages = num_pages_cq; switch (req->num_pages) { case 1: req->cqecnt = SLI4_CQ_CNT_256; break; case 2: req->cqecnt = SLI4_CQ_CNT_512; break; case 4: req->cqecnt = SLI4_CQ_CNT_1024; break; case 8: req->cqecnt = SLI4_CQ_CNT_LARGE; req->cqe_count = n_cqe; break; default: ocs_log_test(sli4->os, "num_pages %d not valid\n", req->num_pages); goto error; } req->evt = TRUE; req->valid = TRUE; req->arm = FALSE; req->num_cq_req = num_cqs; if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) req->autovalid = TRUE; /* Fill page addresses of all the CQs. */ for (i = 0; i < num_cqs; i++) { req->eq_id[i] = eqs[i]->id; for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += page_bytes) { req->page_physical_address[offset].low = ocs_addr32_lo(addr); req->page_physical_address[offset].high = ocs_addr32_hi(addr); offset++; } } if (sli_bmbx_command(sli4)) { ocs_log_crit(sli4->os, "bootstrap mailbox write fail CQSet\n"); goto error; } res = (void *)((uint8_t *)dma.virt); if (res->hdr.status) { ocs_log_err(sli4->os, "bad create CQSet status=%#x addl=%#x\n", res->hdr.status, res->hdr.additional_status); goto error; } else { /* Check if we got all requested CQs. */ if (res->num_q_allocated != num_cqs) { ocs_log_crit(sli4->os, "Requested count CQs doesnt match.\n"); goto error; } /* Fill the resp cq ids. */ for (i = 0; i < num_cqs; i++) { qs[i]->id = res->q_id + i; qs[i]->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off; qs[i]->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset; } } ocs_dma_free(sli4->os, &dma); return 0; error: for (i = 0; i < num_cqs; i++) { if (qs[i]->dma.size) { ocs_dma_free(sli4->os, &qs[i]->dma); } } if (dma.size) { ocs_dma_free(sli4->os, &dma); } return -1; } /** * @ingroup sli * @brief Free a queue. * * @par Description * Frees DMA memory and de-registers the requested queue. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param destroy_queues Non-zero if the mailbox commands should be sent to destroy the queues. * @param free_memory Non-zero if the DMA memory associated with the queue should be freed. * * @return Returns 0 on success, or -1 otherwise. */ int32_t sli_queue_free(sli4_t *sli4, sli4_queue_t *q, uint32_t destroy_queues, uint32_t free_memory) { sli4_destroy_q_fn_t destroy = NULL; int32_t rc = -1; if (!sli4 || !q) { ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q); return -1; } if (destroy_queues) { switch (q->type) { case SLI_QTYPE_EQ: destroy = sli_cmd_common_destroy_eq; break; case SLI_QTYPE_CQ: destroy = sli_cmd_common_destroy_cq; break; case SLI_QTYPE_MQ: destroy = sli_cmd_common_destroy_mq; break; case SLI_QTYPE_WQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { destroy = sli_cmd_fcoe_wq_destroy; } else { /* TODO */ ocs_log_test(sli4->os, "unsupported WQ destroy\n"); return -1; } break; case SLI_QTYPE_RQ: if (SLI4_PORT_TYPE_FC == sli4->port_type) { destroy = sli_cmd_fcoe_rq_destroy; } else { /* TODO */ ocs_log_test(sli4->os, "unsupported RQ destroy\n"); return -1; } break; default: ocs_log_test(sli4->os, "bad queue type %d\n", q->type); return -1; } /* * Destroying queues makes BE3 sad (version 0 interface type). Rely * on COMMON_FUNCTION_RESET to free host allocated queue resources * inside the SLI Port. */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { destroy = NULL; } /* Destroy the queue if the operation is defined */ if (destroy && destroy(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, q->id)) { sli4_res_hdr_t *res = NULL; if (sli_bmbx_command(sli4)){ ocs_log_crit(sli4->os, "bootstrap mailbox write fail destroy %s\n", SLI_QNAME[q->type]); } else if (sli_res_sli_config(sli4->bmbx.virt)) { ocs_log_err(sli4->os, "bad status destroy %s\n", SLI_QNAME[q->type]); } else { res = (void *)((uint8_t *)sli4->bmbx.virt + offsetof(sli4_cmd_sli_config_t, payload)); if (res->status) { ocs_log_err(sli4->os, "bad destroy %s status=%#x addl=%#x\n", SLI_QNAME[q->type], res->status, res->additional_status); } else { rc = 0; } } } } if (free_memory) { ocs_lock_free(&q->lock); if (ocs_dma_free(sli4->os, &q->dma)) { ocs_log_err(sli4->os, "%s queue ID %d free failed\n", SLI_QNAME[q->type], q->id); rc = -1; } } return rc; } int32_t sli_queue_reset(sli4_t *sli4, sli4_queue_t *q) { ocs_lock(&q->lock); q->index = 0; q->n_posted = 0; if (SLI_QTYPE_MQ == q->type) { q->u.r_idx = 0; } if (q->dma.virt != NULL) { ocs_memset(q->dma.virt, 0, (q->size * (uint64_t)q->length)); } ocs_unlock(&q->lock); return 0; } /** * @ingroup sli * @brief Check if the given queue is empty. * * @par Description * If the valid bit of the current entry is unset, the queue is empty. * * @param sli4 SLI context. * @param q Pointer to the queue object. * * @return Returns TRUE if empty, or FALSE otherwise. */ int32_t sli_queue_is_empty(sli4_t *sli4, sli4_queue_t *q) { int32_t rc = TRUE; uint8_t *qe = q->dma.virt; ocs_lock(&q->lock); ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD); qe += q->index * q->size; rc = !sli_queue_entry_is_valid(q, qe, FALSE); ocs_unlock(&q->lock); return rc; } /** * @ingroup sli * @brief Arm an EQ. * * @param sli4 SLI context. * @param q Pointer to queue object. * @param arm If TRUE, arm the EQ. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_queue_eq_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm) { uint32_t val = 0; ocs_lock(&q->lock); if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm); else val = sli_eq_doorbell(q->n_posted, q->id, arm); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); q->n_posted = 0; ocs_unlock(&q->lock); return 0; } /** * @ingroup sli * @brief Arm a queue. * * @param sli4 SLI context. * @param q Pointer to queue object. * @param arm If TRUE, arm the queue. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_queue_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm) { uint32_t val = 0; ocs_lock(&q->lock); switch (q->type) { case SLI_QTYPE_EQ: if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm); else val = sli_eq_doorbell(q->n_posted, q->id, arm); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); q->n_posted = 0; break; case SLI_QTYPE_CQ: if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) val = sli_iftype6_cq_doorbell(q->n_posted, q->id, arm); else val = sli_cq_doorbell(q->n_posted, q->id, arm); ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); q->n_posted = 0; break; default: ocs_log_test(sli4->os, "should only be used for EQ/CQ, not %s\n", SLI_QNAME[q->type]); } ocs_unlock(&q->lock); return 0; } /** * @ingroup sli * @brief Write an entry to the queue object. * * Note: Assumes the q->lock will be locked and released by the caller. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param entry Pointer to the entry contents. * * @return Returns queue index on success, or negative error value otherwise. */ int32_t _sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) { int32_t rc = 0; uint8_t *qe = q->dma.virt; uint32_t qindex; qindex = q->index; qe += q->index * q->size; if (entry) { if ((SLI_QTYPE_WQ == q->type) && sli4->config.perf_wq_id_association) { sli_set_wq_id_association(entry, q->id); } #if defined(OCS_INCLUDE_DEBUG) switch (q->type) { case SLI_QTYPE_WQ: { ocs_dump32(OCS_DEBUG_ENABLE_WQ_DUMP, sli4->os, "wqe", entry, q->size); break; } case SLI_QTYPE_MQ: /* Note: we don't really need to dump the whole * 256 bytes, just do 64 */ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mqe outbound", entry, 64); break; default: break; } #endif ocs_memcpy(qe, entry, q->size); q->n_posted = 1; } ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); rc = sli_queue_doorbell(sli4, q); q->index = (q->index + q->n_posted) & (q->length - 1); q->n_posted = 0; if (rc < 0) { /* failure */ return rc; } else if (rc > 0) { /* failure, but we need to return a negative value on failure */ return -rc; } else { return qindex; } } /** * @ingroup sli * @brief Write an entry to the queue object. * * Note: Assumes the q->lock will be locked and released by the caller. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param entry Pointer to the entry contents. * * @return Returns queue index on success, or negative error value otherwise. */ int32_t sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) { int32_t rc; ocs_lock(&q->lock); rc = _sli_queue_write(sli4, q, entry); ocs_unlock(&q->lock); return rc; } /** * @brief Check if the current queue entry is valid. * * @param q Pointer to the queue object. * @param qe Pointer to the queue entry. * @param clear Boolean to clear valid bit. * * @return Returns TRUE if the entry is valid, or FALSE otherwise. */ static uint8_t sli_queue_entry_is_valid(sli4_queue_t *q, uint8_t *qe, uint8_t clear) { uint8_t valid = FALSE; uint8_t valid_bit_set = 0; switch (q->type) { case SLI_QTYPE_EQ: valid = (((sli4_eqe_t *)qe)->vld == q->phase) ? 1 : 0; if (valid && clear) { ((sli4_eqe_t *)qe)->vld = 0; } break; case SLI_QTYPE_CQ: /* * For both MCQE and WCQE/RCQE, the valid bit * is bit 31 of dword 3 (0 based) */ valid_bit_set = (qe[15] & 0x80) != 0; if (valid_bit_set == q->phase) valid = 1; if (valid & clear) { qe[15] &= ~0x80; } break; case SLI_QTYPE_MQ: valid = q->index != q->u.r_idx; break; case SLI_QTYPE_RQ: valid = TRUE; clear = FALSE; break; default: ocs_log_test(NULL, "doesn't handle type=%#x\n", q->type); } if (clear) { ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); } return valid; } /** * @ingroup sli * @brief Read an entry from the queue object. * * @param sli4 SLI context. * @param q Pointer to the queue object. * @param entry Destination pointer for the queue entry contents. * * @return Returns 0 on success, or non-zero otherwise. */ int32_t sli_queue_read(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) { int32_t rc = 0; uint8_t *qe = q->dma.virt; uint32_t *qindex = NULL; uint8_t clear = (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) ? FALSE : TRUE; if (SLI_QTYPE_MQ == q->type) { qindex = &q->u.r_idx; } else { qindex = &q->index; } ocs_lock(&q->lock); ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD); qe += *qindex * q->size; if (!sli_queue_entry_is_valid(q, qe, clear)) { ocs_unlock(&q->lock); return -1; } if (entry) { ocs_memcpy(entry, qe, q->size); #if defined(OCS_INCLUDE_DEBUG) switch(q->type) { case SLI_QTYPE_CQ: ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "cq", entry, q->size); break; case SLI_QTYPE_MQ: ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mq Compl", entry, 64); break; case SLI_QTYPE_EQ: ocs_dump32(OCS_DEBUG_ENABLE_EQ_DUMP, sli4->os, "eq Compl", entry, q->size); break; default: break; } #endif } switch (q->type) { case SLI_QTYPE_EQ: case SLI_QTYPE_CQ: case SLI_QTYPE_MQ: *qindex = (*qindex + 1) & (q->length - 1); if (SLI_QTYPE_MQ != q->type) { q->n_posted++; /* * For prism, the phase value will be used to check the validity of eq/cq entries. * The value toggles after a complete sweep through the queue. */ if ((SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) && (*qindex == 0)) { q->phase ^= (uint16_t) 0x1; } } break; default: /* reads don't update the index */ break; } ocs_unlock(&q->lock); return rc; } int32_t sli_queue_index(sli4_t *sli4, sli4_queue_t *q) { if (q) { return q->index; } else { return -1; } } int32_t sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry) { int32_t rc; ocs_lock(&q->lock); rc = _sli_queue_poke(sli4, q, index, entry); ocs_unlock(&q->lock); return rc; } int32_t _sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry) { int32_t rc = 0; uint8_t *qe = q->dma.virt; if (index >= q->length) { return -1; } qe += index * q->size; if (entry) { ocs_memcpy(qe, entry, q->size); } ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); return rc; } /** * @ingroup sli * @brief Allocate SLI Port resources. * * @par Description * Allocate port-related resources, such as VFI, RPI, XRI, and so on. * Resources are modeled using extents, regardless of whether the underlying * device implements resource extents. If the device does not implement * extents, the SLI layer models this as a single (albeit large) extent. * * @param sli4 SLI context. * @param rtype Resource type (for example, RPI or XRI) * @param rid Allocated resource ID. * @param index Index into the bitmap. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_resource_alloc(sli4_t *sli4, sli4_resource_e rtype, uint32_t *rid, uint32_t *index) { int32_t rc = 0; uint32_t size; uint32_t extent_idx; uint32_t item_idx; int status; *rid = UINT32_MAX; *index = UINT32_MAX; switch (rtype) { case SLI_RSRC_FCOE_VFI: case SLI_RSRC_FCOE_VPI: case SLI_RSRC_FCOE_RPI: case SLI_RSRC_FCOE_XRI: status = ocs_bitmap_find(sli4->config.extent[rtype].use_map, sli4->config.extent[rtype].map_size); if (status < 0) { ocs_log_err(sli4->os, "out of resource %d (alloc=%d)\n", rtype, sli4->config.extent[rtype].n_alloc); rc = -1; break; } else { *index = status; } size = sli4->config.extent[rtype].size; extent_idx = *index / size; item_idx = *index % size; *rid = sli4->config.extent[rtype].base[extent_idx] + item_idx; sli4->config.extent[rtype].n_alloc++; break; default: rc = -1; } return rc; } /** * @ingroup sli * @brief Free the SLI Port resources. * * @par Description * Free port-related resources, such as VFI, RPI, XRI, and so. See discussion of * "extent" usage in sli_resource_alloc. * * @param sli4 SLI context. * @param rtype Resource type (for example, RPI or XRI). * @param rid Allocated resource ID. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_resource_free(sli4_t *sli4, sli4_resource_e rtype, uint32_t rid) { int32_t rc = -1; uint32_t x; uint32_t size, *base; switch (rtype) { case SLI_RSRC_FCOE_VFI: case SLI_RSRC_FCOE_VPI: case SLI_RSRC_FCOE_RPI: case SLI_RSRC_FCOE_XRI: /* * Figure out which extent contains the resource ID. I.e. find * the extent such that * extent->base <= resource ID < extent->base + extent->size */ base = sli4->config.extent[rtype].base; size = sli4->config.extent[rtype].size; /* * In the case of FW reset, this may be cleared but the force_free path will * still attempt to free the resource. Prevent a NULL pointer access. */ if (base != NULL) { for (x = 0; x < sli4->config.extent[rtype].number; x++) { if ((rid >= base[x]) && (rid < (base[x] + size))) { rid -= base[x]; ocs_bitmap_clear(sli4->config.extent[rtype].use_map, (x * size) + rid); rc = 0; break; } } } break; default: ; } return rc; } int32_t sli_resource_reset(sli4_t *sli4, sli4_resource_e rtype) { int32_t rc = -1; uint32_t i; switch (rtype) { case SLI_RSRC_FCOE_VFI: case SLI_RSRC_FCOE_VPI: case SLI_RSRC_FCOE_RPI: case SLI_RSRC_FCOE_XRI: for (i = 0; i < sli4->config.extent[rtype].map_size; i++) { ocs_bitmap_clear(sli4->config.extent[rtype].use_map, i); } rc = 0; break; default: ; } return rc; } /** * @ingroup sli * @brief Parse an EQ entry to retrieve the CQ_ID for this event. * * @param sli4 SLI context. * @param buf Pointer to the EQ entry. * @param cq_id CQ_ID for this entry (only valid on success). * * @return * - 0 if success. * - < 0 if error. * - > 0 if firmware detects EQ overflow. */ int32_t sli_eq_parse(sli4_t *sli4, uint8_t *buf, uint16_t *cq_id) { sli4_eqe_t *eqe = (void *)buf; int32_t rc = 0; if (!sli4 || !buf || !cq_id) { ocs_log_err(NULL, "bad parameters sli4=%p buf=%p cq_id=%p\n", sli4, buf, cq_id); return -1; } switch (eqe->major_code) { case SLI4_MAJOR_CODE_STANDARD: *cq_id = eqe->resource_id; break; case SLI4_MAJOR_CODE_SENTINEL: ocs_log_debug(sli4->os, "sentinel EQE\n"); rc = 1; break; default: ocs_log_test(sli4->os, "Unsupported EQE: major %x minor %x\n", eqe->major_code, eqe->minor_code); rc = -1; } return rc; } /** * @ingroup sli * @brief Parse a CQ entry to retrieve the event type and the associated queue. * * @param sli4 SLI context. * @param cq CQ to process. * @param cqe Pointer to the CQ entry. * @param etype CQ event type. * @param q_id Queue ID associated with this completion message * (that is, MQ_ID, RQ_ID, and so on). * * @return * - 0 if call completed correctly and CQE status is SUCCESS. * - -1 if call failed (no CQE status). * - Other value if call completed correctly and return value is a CQE status value. */ int32_t sli_cq_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype, uint16_t *q_id) { int32_t rc = 0; if (!sli4 || !cq || !cqe || !etype) { ocs_log_err(NULL, "bad parameters sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", sli4, cq, cqe, etype, q_id); return -1; } if (cq->u.flag.is_mq) { sli4_mcqe_t *mcqe = (void *)cqe; if (mcqe->ae) { *etype = SLI_QENTRY_ASYNC; } else { *etype = SLI_QENTRY_MQ; rc = sli_cqe_mq(mcqe); } *q_id = -1; } else if (SLI4_PORT_TYPE_FC == sli4->port_type) { rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); } else { ocs_log_test(sli4->os, "implement CQE parsing type = %#x\n", sli4->port_type); rc = -1; } return rc; } /** * @ingroup sli * @brief Cause chip to enter an unrecoverable error state. * * @par Description * Cause chip to enter an unrecoverable error state. This is * used when detecting unexpected FW behavior so FW can be * hwted from the driver as soon as error is detected. * * @param sli4 SLI context. * @param dump Generate dump as part of reset. * * @return Returns 0 if call completed correctly, or -1 if call failed (unsupported chip). */ int32_t sli_raise_ue(sli4_t *sli4, uint8_t dump) { #define FDD 2 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { switch(sli_get_asic_type(sli4)) { case SLI4_ASIC_TYPE_BE3: { sli_reg_write(sli4, SLI4_REG_SW_UE_CSR1, 0xffffffff); sli_reg_write(sli4, SLI4_REG_SW_UE_CSR2, 0); break; } case SLI4_ASIC_TYPE_SKYHAWK: { uint32_t value; value = ocs_config_read32(sli4->os, SLI4_SW_UE_REG); ocs_config_write32(sli4->os, SLI4_SW_UE_REG, (value | (1U << 24))); break; } default: ocs_log_test(sli4->os, "invalid asic type %d\n", sli_get_asic_type(sli4)); return -1; } } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) || (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) { if (FDD == dump) { sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, SLI4_SLIPORT_CONTROL_FDD | SLI4_SLIPORT_CONTROL_IP); } else { uint32_t value = SLI4_PHYDEV_CONTROL_FRST; if (dump == 1) { value |= SLI4_PHYDEV_CONTROL_DD; } sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, value); } } else { ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4)); return -1; } return 0; } /** * @ingroup sli * @brief Read the SLIPORT_STATUS register to to check if a dump is present. * * @param sli4 SLI context. * * @return Returns 1 if the chip is ready, or 0 if the chip is not ready, 2 if fdp is present. */ int32_t sli_dump_is_ready(sli4_t *sli4) { int32_t rc = 0; uint32_t port_val; uint32_t bmbx_val; uint32_t uerr_lo; uint32_t uerr_hi; uint32_t uerr_mask_lo; uint32_t uerr_mask_hi; if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { /* for iftype=0, dump ready when UE is encountered */ uerr_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO); uerr_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI); uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO); uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI); if ((uerr_lo & ~uerr_mask_lo) || (uerr_hi & ~uerr_mask_hi)) { rc = 1; } } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) || (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) { /* * Ensure that the port is ready AND the mailbox is * ready before signaling that the dump is ready to go. */ port_val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); bmbx_val = sli_reg_read(sli4, SLI4_REG_BMBX); if ((bmbx_val & SLI4_BMBX_RDY) && SLI4_PORT_STATUS_READY(port_val)) { if(SLI4_PORT_STATUS_DUMP_PRESENT(port_val)) { rc = 1; }else if( SLI4_PORT_STATUS_FDP_PRESENT(port_val)) { rc = 2; } } } else { ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4)); return -1; } return rc; } /** * @ingroup sli * @brief Read the SLIPORT_STATUS register to check if a dump is present. * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and no dump is present. * - 1 if call completed and dump is present. * - -1 if call failed (unsupported chip). */ int32_t sli_dump_is_present(sli4_t *sli4) { uint32_t val; uint32_t ready; if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(sli4)) && (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(sli4))) { ocs_log_test(sli4->os, "Function only supported for I/F type 2"); return -1; } /* If the chip is not ready, then there cannot be a dump */ ready = sli_wait_for_fw_ready(sli4, SLI4_INIT_PORT_DELAY_US); if (!ready) { return 0; } val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); if (UINT32_MAX == val) { ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n"); return -1; } else { return ((val & SLI4_PORT_STATUS_DIP) ? 1 : 0); } } /** * @ingroup sli * @brief Read the SLIPORT_STATUS register to check if the reset required is set. * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and reset is not required. * - 1 if call completed and reset is required. * - -1 if call failed. */ int32_t sli_reset_required(sli4_t *sli4) { uint32_t val; if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { ocs_log_test(sli4->os, "reset required N/A for iftype 0\n"); return 0; } val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); if (UINT32_MAX == val) { ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n"); return -1; } else { return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0); } } /** * @ingroup sli * @brief Read the SLIPORT_SEMAPHORE and SLIPORT_STATUS registers to check if * the port status indicates that a FW error has occurred. * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and no FW error occurred. * - > 0 which indicates that a FW error has occurred. * - -1 if call failed. */ int32_t sli_fw_error_status(sli4_t *sli4) { uint32_t sliport_semaphore; int32_t rc = 0; sliport_semaphore = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE); if (UINT32_MAX == sliport_semaphore) { ocs_log_err(sli4->os, "error reading SLIPORT_SEMAPHORE register\n"); return 1; } rc = (SLI4_PORT_SEMAPHORE_IN_ERR(sliport_semaphore) ? 1 : 0); if (rc == 0) { if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type || (SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type)) { uint32_t uerr_mask_lo, uerr_mask_hi; uint32_t uerr_status_lo, uerr_status_hi; uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO); uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI); uerr_status_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO); uerr_status_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI); if ((uerr_mask_lo & uerr_status_lo) != 0 || (uerr_mask_hi & uerr_status_hi) != 0) { rc = 1; } } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type || SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) { uint32_t sliport_status; sliport_status = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); rc = (SLI4_PORT_STATUS_ERROR(sliport_status) ? 1 : 0); } } return rc; } /** * @ingroup sli * @brief Determine if the chip FW is in a ready state * * @param sli4 SLI context. * * @return * - 0 if call completed correctly and FW is not ready. * - 1 if call completed correctly and FW is ready. * - -1 if call failed. */ int32_t sli_fw_ready(sli4_t *sli4) { uint32_t val; int32_t rc = -1; /* * Is firmware ready for operation? Check needed depends on IF_TYPE */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type || SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type) { val = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE); rc = ((SLI4_PORT_SEMAPHORE_STATUS_POST_READY == SLI4_PORT_SEMAPHORE_PORT(val)) && (!SLI4_PORT_SEMAPHORE_IN_ERR(val)) ? 1 : 0); } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type || SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) { val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); rc = (SLI4_PORT_STATUS_READY(val) ? 1 : 0); } return rc; } /** * @ingroup sli * @brief Determine if the link can be configured * * @param sli4 SLI context. * * @return * - 0 if link is not configurable. * - 1 if link is configurable. */ int32_t sli_link_is_configurable(sli4_t *sli) { int32_t rc = 0; /* * Link config works on: Skyhawk and Lancer * Link config does not work on: LancerG6 */ switch (sli_get_asic_type(sli)) { case SLI4_ASIC_TYPE_SKYHAWK: case SLI4_ASIC_TYPE_LANCER: case SLI4_ASIC_TYPE_CORSAIR: rc = 1; break; case SLI4_ASIC_TYPE_LANCERG6: case SLI4_ASIC_TYPE_LANCERG7: case SLI4_ASIC_TYPE_BE3: default: rc = 0; break; } return rc; } /* vim: set noexpandtab textwidth=120: */ /** * @ingroup sli_fc * @brief Write an FCOE_WQ_CREATE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ulp The ULP to bind * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_wq_create(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp) { sli4_req_fcoe_wq_create_t *wq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_wq_create_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } wq = (sli4_req_fcoe_wq_create_t *)((uint8_t *)buf + sli_config_off); wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE; wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1-4 (sec 4.5.1) */ wq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES)) { return 0; } wq->cq_id = cq_id; if (sli4->config.dual_ulp_capable) { wq->dua = 1; wq->bqu = 1; wq->ulp = ulp; } for (p = 0, addr = qmem->phys; p < wq->num_pages; p++, addr += SLI_PAGE_SIZE) { wq->page_physical_address[p].low = ocs_addr32_lo(addr); wq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_WQ_CREATE_V1 command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ignored This parameter carries the ULP for WQ (ignored for V1) * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_wq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored) { sli4_req_fcoe_wq_create_v1_t *wq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; uint32_t page_size = 0; uint32_t page_bytes = 0; uint32_t n_wqe = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_wq_create_v1_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } wq = (sli4_req_fcoe_wq_create_v1_t *)((uint8_t *)buf + sli_config_off); wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE; wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_v1_t) - sizeof(sli4_req_hdr_t); wq->hdr.version = 1; n_wqe = qmem->size / sli4->config.wqe_size; /* This heuristic to determine the page size is simplistic * but could be made more sophisticated */ switch (qmem->size) { case 4096: case 8192: case 16384: case 32768: page_size = 1; break; case 65536: page_size = 2; break; case 131072: page_size = 4; break; case 262144: page_size = 8; break; case 524288: page_size = 10; break; default: return 0; } page_bytes = page_size * SLI_PAGE_SIZE; /* valid values for number of pages: 1-8 */ wq->num_pages = sli_page_count(qmem->size, page_bytes); if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES)) { return 0; } wq->cq_id = cq_id; wq->page_size = page_size; if (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) { wq->wqe_size = SLI4_WQE_EXT_SIZE; } else { wq->wqe_size = SLI4_WQE_SIZE; } wq->wqe_count = n_wqe; for (p = 0, addr = qmem->phys; p < wq->num_pages; p++, addr += page_bytes) { wq->page_physical_address[p].low = ocs_addr32_lo(addr); wq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_v1_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_WQ_DESTROY command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param wq_id WQ_ID. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_wq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t wq_id) { sli4_req_fcoe_wq_destroy_t *wq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_wq_destroy_t), sizeof(sli4_res_hdr_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } wq = (sli4_req_fcoe_wq_destroy_t *)((uint8_t *)buf + sli_config_off); wq->hdr.opcode = SLI4_OPC_FCOE_WQ_DESTROY; wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_destroy_t) - sizeof(sli4_req_hdr_t); wq->wq_id = wq_id; return(sli_config_off + sizeof(sli4_req_fcoe_wq_destroy_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_POST_SGL_PAGES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param xri starting XRI * @param xri_count XRI * @param page0 First SGL memory page. * @param page1 Second SGL memory page (optional). * @param dma DMA buffer for non-embedded mailbox command (options) * * if non-embedded mbx command is used, dma buffer must be at least (32 + xri_count*16) in length * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_post_sgl_pages(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint32_t xri_count, ocs_dma_t *page0[], ocs_dma_t *page1[], ocs_dma_t *dma) { sli4_req_fcoe_post_sgl_pages_t *post = NULL; uint32_t sli_config_off = 0; uint32_t i; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_post_sgl_pages_t), sizeof(sli4_res_hdr_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); } if (dma) { post = dma->virt; ocs_memset(post, 0, dma->size); } else { post = (sli4_req_fcoe_post_sgl_pages_t *)((uint8_t *)buf + sli_config_off); } post->hdr.opcode = SLI4_OPC_FCOE_POST_SGL_PAGES; post->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; /* payload size calculation * 4 = xri_start + xri_count * xri_count = # of XRI's registered * sizeof(uint64_t) = physical address size * 2 = # of physical addresses per page set */ post->hdr.request_length = 4 + (xri_count * (sizeof(uint64_t) * 2)); post->xri_start = xri; post->xri_count = xri_count; for (i = 0; i < xri_count; i++) { post->page_set[i].page0_low = ocs_addr32_lo(page0[i]->phys); post->page_set[i].page0_high = ocs_addr32_hi(page0[i]->phys); } if (page1) { for (i = 0; i < xri_count; i++) { post->page_set[i].page1_low = ocs_addr32_lo(page1[i]->phys); post->page_set[i].page1_high = ocs_addr32_hi(page1[i]->phys); } } return dma ? sli_config_off : (sli_config_off + sizeof(sli4_req_fcoe_post_sgl_pages_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_RQ_CREATE command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ulp This parameter carries the ULP for the RQ * @param buffer_size Buffer size pointed to by each RQE. * * @note This creates a Version 0 message. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_rq_create(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size) { sli4_req_fcoe_rq_create_t *rq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_rq_create_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } rq = (sli4_req_fcoe_rq_create_t *)((uint8_t *)buf + sli_config_off); rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_t) - sizeof(sli4_req_hdr_t); /* valid values for number of pages: 1-8 (sec 4.5.6) */ rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES)) { ocs_log_test(sli4->os, "num_pages %d not valid\n", rq->num_pages); return 0; } /* * RQE count is the log base 2 of the total number of entries */ rq->rqe_count = ocs_lg2(qmem->size / SLI4_FCOE_RQE_SIZE); if ((buffer_size < SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE) || (buffer_size > SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE)) { ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n", buffer_size, SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE, SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE); return -1; } rq->buffer_size = buffer_size; rq->cq_id = cq_id; if (sli4->config.dual_ulp_capable) { rq->dua = 1; rq->bqu = 1; rq->ulp = ulp; } for (p = 0, addr = qmem->phys; p < rq->num_pages; p++, addr += SLI_PAGE_SIZE) { rq->page_physical_address[p].low = ocs_addr32_lo(addr); rq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_RQ_CREATE_V1 command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param qmem DMA memory for the queue. * @param cq_id Associated CQ_ID. * @param ulp This parameter carries the ULP for RQ (ignored for V1) * @param buffer_size Buffer size pointed to by each RQE. * * @note This creates a Version 0 message * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_rq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size) { sli4_req_fcoe_rq_create_v1_t *rq = NULL; uint32_t sli_config_off = 0; uint32_t p; uintptr_t addr; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_rq_create_v1_t), sizeof(sli4_res_common_create_queue_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } rq = (sli4_req_fcoe_rq_create_v1_t *)((uint8_t *)buf + sli_config_off); rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v1_t) - sizeof(sli4_req_hdr_t); rq->hdr.version = 1; /* Disable "no buffer warnings" to avoid Lancer bug */ rq->dnb = TRUE; /* valid values for number of pages: 1-8 (sec 4.5.6) */ rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES)) { ocs_log_test(sli4->os, "num_pages %d not valid, max %d\n", rq->num_pages, SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES); return 0; } /* * RQE count is the total number of entries (note not lg2(# entries)) */ rq->rqe_count = qmem->size / SLI4_FCOE_RQE_SIZE; rq->rqe_size = SLI4_FCOE_RQE_SIZE_8; rq->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096; if ((buffer_size < sli4->config.rq_min_buf_size) || (buffer_size > sli4->config.rq_max_buf_size)) { ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n", buffer_size, sli4->config.rq_min_buf_size, sli4->config.rq_max_buf_size); return -1; } rq->buffer_size = buffer_size; rq->cq_id = cq_id; for (p = 0, addr = qmem->phys; p < rq->num_pages; p++, addr += SLI_PAGE_SIZE) { rq->page_physical_address[p].low = ocs_addr32_lo(addr); rq->page_physical_address[p].high = ocs_addr32_hi(addr); } return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_v1_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_RQ_DESTROY command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param rq_id RQ_ID. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_rq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t rq_id) { sli4_req_fcoe_rq_destroy_t *rq = NULL; uint32_t sli_config_off = 0; if (SLI4_PORT_TYPE_FC == sli4->port_type) { uint32_t payload_size; /* Payload length must accommodate both request and response */ payload_size = max(sizeof(sli4_req_fcoe_rq_destroy_t), sizeof(sli4_res_hdr_t)); sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); } rq = (sli4_req_fcoe_rq_destroy_t *)((uint8_t *)buf + sli_config_off); rq->hdr.opcode = SLI4_OPC_FCOE_RQ_DESTROY; rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_destroy_t) - sizeof(sli4_req_hdr_t); rq->rq_id = rq_id; return(sli_config_off + sizeof(sli4_req_fcoe_rq_destroy_t)); } /** * @ingroup sli_fc * @brief Write an FCOE_READ_FCF_TABLE command. * * @note * The response of this command exceeds the size of an embedded * command and requires an external buffer with DMA capability to hold the results. * The caller should allocate the ocs_dma_t structure / memory. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma Pointer to DMA memory structure. This is allocated by the caller. * @param index FCF table index to retrieve. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_read_fcf_table(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t index) { sli4_req_fcoe_read_fcf_table_t *read_fcf = NULL; if (SLI4_PORT_TYPE_FC != sli4->port_type) { ocs_log_test(sli4->os, "FCOE_READ_FCF_TABLE only supported on FC\n"); return -1; } read_fcf = dma->virt; ocs_memset(read_fcf, 0, sizeof(sli4_req_fcoe_read_fcf_table_t)); read_fcf->hdr.opcode = SLI4_OPC_FCOE_READ_FCF_TABLE; read_fcf->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; read_fcf->hdr.request_length = dma->size - sizeof(sli4_req_fcoe_read_fcf_table_t); read_fcf->fcf_index = index; return sli_cmd_sli_config(sli4, buf, size, 0, dma); } /** * @ingroup sli_fc * @brief Write an FCOE_POST_HDR_TEMPLATES command. * * @param sli4 SLI context. * @param buf Destination buffer for the command. * @param size Buffer size, in bytes. * @param dma Pointer to DMA memory structure. This is allocated by the caller. * @param rpi Starting RPI index for the header templates. * @param payload_dma Pointer to DMA memory used to hold larger descriptor counts. * * @return Returns the number of bytes written. */ int32_t sli_cmd_fcoe_post_hdr_templates(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t rpi, ocs_dma_t *payload_dma) { sli4_req_fcoe_post_hdr_templates_t *template = NULL; uint32_t sli_config_off = 0; uintptr_t phys = 0; uint32_t i = 0; uint32_t page_count; uint32_t payload_size; page_count = sli_page_count(dma->size, SLI_PAGE_SIZE); payload_size = sizeof(sli4_req_fcoe_post_hdr_templates_t) + page_count * sizeof(sli4_physical_page_descriptor_t); if (page_count > 16) { /* We can't fit more than 16 descriptors into an embedded mailbox command, it has to be non-embedded */ if (ocs_dma_alloc(sli4->os, payload_dma, payload_size, 4)) { ocs_log_err(sli4->os, "mailbox payload memory allocation fail\n"); return 0; } sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, payload_dma); template = (sli4_req_fcoe_post_hdr_templates_t *)payload_dma->virt; } else { sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); template = (sli4_req_fcoe_post_hdr_templates_t *)((uint8_t *)buf + sli_config_off); } if (UINT16_MAX == rpi) { rpi = sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0]; } template->hdr.opcode = SLI4_OPC_FCOE_POST_HDR_TEMPLATES; template->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; template->hdr.request_length = sizeof(sli4_req_fcoe_post_hdr_templates_t) - sizeof(sli4_req_hdr_t); template->rpi_offset = rpi; template->page_count = page_count; phys = dma->phys; for (i = 0; i < template->page_count; i++) { template->page_descriptor[i].low = ocs_addr32_lo(phys); template->page_descriptor[i].high = ocs_addr32_hi(phys); phys += SLI_PAGE_SIZE; } return(sli_config_off + payload_size); } int32_t sli_cmd_fcoe_rediscover_fcf(sli4_t *sli4, void *buf, size_t size, uint16_t index) { sli4_req_fcoe_rediscover_fcf_t *redisc = NULL; uint32_t sli_config_off = 0; sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_fcoe_rediscover_fcf_t), NULL); redisc = (sli4_req_fcoe_rediscover_fcf_t *)((uint8_t *)buf + sli_config_off); redisc->hdr.opcode = SLI4_OPC_FCOE_REDISCOVER_FCF; redisc->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; redisc->hdr.request_length = sizeof(sli4_req_fcoe_rediscover_fcf_t) - sizeof(sli4_req_hdr_t); if (index == UINT16_MAX) { redisc->fcf_count = 0; } else { redisc->fcf_count = 1; redisc->fcf_index[0] = index; } return(sli_config_off + sizeof(sli4_req_fcoe_rediscover_fcf_t)); } /** * @ingroup sli_fc * @brief Write an ABORT_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param type Abort type, such as XRI, abort tag, and request tag. * @param send_abts Boolean to cause the hardware to automatically generate an ABTS. * @param ids ID of IOs to abort. * @param mask Mask applied to the ID values to abort. * @param tag Tag value associated with this abort. * @param cq_id The id of the completion queue where the WQE response is sent. * @param dnrx When set to 1, this field indicates that the SLI Port must not return the associated XRI to the SLI * Port's optimized write XRI pool. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_abort_wqe(sli4_t *sli4, void *buf, size_t size, sli4_abort_type_e type, uint32_t send_abts, uint32_t ids, uint32_t mask, uint16_t tag, uint16_t cq_id) { sli4_abort_wqe_t *abort = buf; ocs_memset(buf, 0, size); switch (type) { case SLI_ABORT_XRI: abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; if (mask) { ocs_log_warn(sli4->os, "warning non-zero mask %#x when aborting XRI %#x\n", mask, ids); mask = 0; } break; case SLI_ABORT_ABORT_ID: abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG; break; case SLI_ABORT_REQUEST_ID: abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG; break; default: ocs_log_test(sli4->os, "unsupported type %#x\n", type); return -1; } abort->ia = send_abts ? 0 : 1; /* Suppress ABTS retries */ abort->ir = 1; abort->t_mask = mask; abort->t_tag = ids; abort->command = SLI4_WQE_ABORT; abort->request_tag = tag; abort->qosd = TRUE; abort->cq_id = cq_id; abort->cmd_type = SLI4_CMD_ABORT_WQE; return 0; } /** * @ingroup sli_fc * @brief Write an ELS_REQUEST64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the ELS request. * @param req_type ELS request type. * @param req_len Length of ELS request in bytes. * @param max_rsp_len Max length of ELS response in bytes. * @param timeout Time, in seconds, before an IO times out. Zero means 2 * R_A_TOV. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rnode Destination of ELS request (that is, the remote node). * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_els_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint8_t req_type, uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode) { sli4_els_request64_wqe_t *els = buf; sli4_sge_t *sge = sgl->virt; uint8_t is_fabric = FALSE; ocs_memset(buf, 0, size); if (sli4->config.sgl_pre_registered) { els->xbl = FALSE; els->dbde = TRUE; els->els_request_payload.bde_type = SLI4_BDE_TYPE_BDE_64; els->els_request_payload.buffer_length = req_len; els->els_request_payload.u.data.buffer_address_low = sge[0].buffer_address_low; els->els_request_payload.u.data.buffer_address_high = sge[0].buffer_address_high; } else { els->xbl = TRUE; els->els_request_payload.bde_type = SLI4_BDE_TYPE_BLP; els->els_request_payload.buffer_length = 2 * sizeof(sli4_sge_t); els->els_request_payload.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); els->els_request_payload.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } els->els_request_payload_length = req_len; els->max_response_payload_length = max_rsp_len; els->xri_tag = xri; els->timer = timeout; els->class = SLI4_ELS_REQUEST64_CLASS_3; els->command = SLI4_WQE_ELS_REQUEST64; els->request_tag = tag; if (rnode->node_group) { els->hlm = TRUE; els->remote_id = rnode->fc_id & 0x00ffffff; } els->iod = SLI4_ELS_REQUEST64_DIR_READ; els->qosd = TRUE; /* figure out the ELS_ID value from the request buffer */ switch (req_type) { case FC_ELS_CMD_LOGO: els->els_id = SLI4_ELS_REQUEST64_LOGO; if (rnode->attached) { els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; els->context_tag = rnode->indicator; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; } if (FC_ADDR_FABRIC == rnode->fc_id) { is_fabric = TRUE; } break; case FC_ELS_CMD_FDISC: if (FC_ADDR_FABRIC == rnode->fc_id) { is_fabric = TRUE; } if (0 == rnode->sport->fc_id) { els->els_id = SLI4_ELS_REQUEST64_FDISC; is_fabric = TRUE; } else { els->els_id = SLI4_ELS_REQUEST64_OTHER; } els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; els->sp = TRUE; break; case FC_ELS_CMD_FLOGI: els->els_id = SLI4_ELS_REQUEST64_FLOGIN; is_fabric = TRUE; if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { if (!rnode->sport->domain) { ocs_log_test(sli4->os, "invalid domain handle\n"); return -1; } /* * IF_TYPE 0 skips INIT_VFI/INIT_VPI and therefore must use the * FCFI here */ els->ct = SLI4_ELS_REQUEST64_CONTEXT_FCFI; els->context_tag = rnode->sport->domain->fcf_indicator; els->sp = TRUE; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; /* * Set SP here ... we haven't done a REG_VPI yet * TODO: need to maybe not set this when we have * completed VFI/VPI registrations ... * * Use the FC_ID of the SPORT if it has been allocated, otherwise * use an S_ID of zero. */ els->sp = TRUE; if (rnode->sport->fc_id != UINT32_MAX) { els->sid = rnode->sport->fc_id; } } break; case FC_ELS_CMD_PLOGI: els->els_id = SLI4_ELS_REQUEST64_PLOGI; els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; break; case FC_ELS_CMD_SCR: els->els_id = SLI4_ELS_REQUEST64_OTHER; els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; break; default: els->els_id = SLI4_ELS_REQUEST64_OTHER; if (rnode->attached) { els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; els->context_tag = rnode->indicator; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; } break; } if (is_fabric) { els->cmd_type = SLI4_ELS_REQUEST64_CMD_FABRIC; } else { els->cmd_type = SLI4_ELS_REQUEST64_CMD_NON_FABRIC; } els->cq_id = cq_id; if (SLI4_ELS_REQUEST64_CONTEXT_RPI != els->ct) { els->remote_id = rnode->fc_id; } if (SLI4_ELS_REQUEST64_CONTEXT_VPI == els->ct) { els->temporary_rpi = rnode->indicator; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_ICMND64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (that is, the remote node). * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_icmnd64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint16_t xri, uint16_t tag, uint16_t cq_id, uint32_t rpi, ocs_remote_node_t *rnode, uint8_t timeout) { sli4_fcp_icmnd64_wqe_t *icmnd = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { icmnd->xbl = FALSE; icmnd->dbde = TRUE; icmnd->bde.bde_type = SLI4_BDE_TYPE_BDE_64; icmnd->bde.buffer_length = sge[0].buffer_length; icmnd->bde.u.data.buffer_address_low = sge[0].buffer_address_low; icmnd->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { icmnd->xbl = TRUE; icmnd->bde.bde_type = SLI4_BDE_TYPE_BLP; icmnd->bde.buffer_length = sgl->size; icmnd->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); icmnd->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } icmnd->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; icmnd->xri_tag = xri; icmnd->context_tag = rpi; icmnd->timer = timeout; icmnd->pu = 2; /* WQE word 4 contains read transfer length */ icmnd->class = SLI4_ELS_REQUEST64_CLASS_3; icmnd->command = SLI4_WQE_FCP_ICMND64; icmnd->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; icmnd->abort_tag = xri; icmnd->request_tag = tag; icmnd->len_loc = 3; if (rnode->node_group) { icmnd->hlm = TRUE; icmnd->remote_n_port_id = rnode->fc_id & 0x00ffffff; } if (((ocs_node_t *)rnode->node)->fcp2device) { icmnd->erp = TRUE; } icmnd->cmd_type = SLI4_CMD_FCP_ICMND64_WQE; icmnd->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Write an FCP_IREAD64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_iread64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint32_t rpi, ocs_remote_node_t *rnode, uint8_t dif, uint8_t bs, uint8_t timeout) { sli4_fcp_iread64_wqe_t *iread = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { iread->xbl = FALSE; iread->dbde = TRUE; iread->bde.bde_type = SLI4_BDE_TYPE_BDE_64; iread->bde.buffer_length = sge[0].buffer_length; iread->bde.u.data.buffer_address_low = sge[0].buffer_address_low; iread->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { iread->xbl = TRUE; iread->bde.bde_type = SLI4_BDE_TYPE_BLP; iread->bde.buffer_length = sgl->size; iread->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); iread->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); /* fill out fcp_cmnd buffer len and change resp buffer to be of type * "skip" (note: response will still be written to sge[1] if necessary) */ iread->fcp_cmd_buffer_length = sge[0].buffer_length; sge[1].sge_type = SLI4_SGE_TYPE_SKIP; } iread->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; iread->total_transfer_length = xfer_len; iread->xri_tag = xri; iread->context_tag = rpi; iread->timer = timeout; iread->pu = 2; /* WQE word 4 contains read transfer length */ iread->class = SLI4_ELS_REQUEST64_CLASS_3; iread->command = SLI4_WQE_FCP_IREAD64; iread->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; iread->dif = dif; iread->bs = bs; iread->abort_tag = xri; iread->request_tag = tag; iread->len_loc = 3; if (rnode->node_group) { iread->hlm = TRUE; iread->remote_n_port_id = rnode->fc_id & 0x00ffffff; } if (((ocs_node_t *)rnode->node)->fcp2device) { iread->erp = TRUE; } iread->iod = 1; iread->cmd_type = SLI4_CMD_FCP_IREAD64_WQE; iread->cq_id = cq_id; if (sli4->config.perf_hint) { iread->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; iread->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; iread->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; iread->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_IWRITE64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param xfer_len Data transfer length. * @param first_burst The number of first burst bytes * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node) * @param dif T10 DIF operation, or 0 to disable * @param bs T10 DIF block size, or 0 if DIF is disabled * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_iwrite64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t xfer_len, uint32_t first_burst, uint16_t xri, uint16_t tag, uint16_t cq_id, uint32_t rpi, ocs_remote_node_t *rnode, uint8_t dif, uint8_t bs, uint8_t timeout) { sli4_fcp_iwrite64_wqe_t *iwrite = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { iwrite->xbl = FALSE; iwrite->dbde = TRUE; iwrite->bde.bde_type = SLI4_BDE_TYPE_BDE_64; iwrite->bde.buffer_length = sge[0].buffer_length; iwrite->bde.u.data.buffer_address_low = sge[0].buffer_address_low; iwrite->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { iwrite->xbl = TRUE; iwrite->bde.bde_type = SLI4_BDE_TYPE_BLP; iwrite->bde.buffer_length = sgl->size; iwrite->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); iwrite->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); /* fill out fcp_cmnd buffer len and change resp buffer to be of type * "skip" (note: response will still be written to sge[1] if necessary) */ iwrite->fcp_cmd_buffer_length = sge[0].buffer_length; sge[1].sge_type = SLI4_SGE_TYPE_SKIP; } iwrite->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; iwrite->total_transfer_length = xfer_len; iwrite->initial_transfer_length = MIN(xfer_len, first_burst); iwrite->xri_tag = xri; iwrite->context_tag = rpi; iwrite->timer = timeout; iwrite->pu = 2; /* WQE word 4 contains read transfer length */ iwrite->class = SLI4_ELS_REQUEST64_CLASS_3; iwrite->command = SLI4_WQE_FCP_IWRITE64; iwrite->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; iwrite->dif = dif; iwrite->bs = bs; iwrite->abort_tag = xri; iwrite->request_tag = tag; iwrite->len_loc = 3; if (rnode->node_group) { iwrite->hlm = TRUE; iwrite->remote_n_port_id = rnode->fc_id & 0x00ffffff; } if (((ocs_node_t *)rnode->node)->fcp2device) { iwrite->erp = TRUE; } iwrite->cmd_type = SLI4_CMD_FCP_IWRITE64_WQE; iwrite->cq_id = cq_id; if (sli4->config.perf_hint) { iwrite->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; iwrite->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; iwrite->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; iwrite->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_TRECEIVE64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the Scatter-Gather List. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param relative_off Relative offset of the IO (if any). * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param xid OX_ID for the exchange. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active. * - AUTO RSP - Automatically generate a good FCP_RSP. * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param csctl value of csctl field. * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) { sli4_fcp_treceive64_wqe_t *trecv = buf; sli4_fcp_128byte_wqe_t *trecv_128 = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { trecv->xbl = FALSE; trecv->dbde = TRUE; trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64; trecv->bde.buffer_length = sge[0].buffer_length; trecv->bde.u.data.buffer_address_low = sge[0].buffer_address_low; trecv->bde.u.data.buffer_address_high = sge[0].buffer_address_high; trecv->payload_offset_length = sge[0].buffer_length; } else { trecv->xbl = TRUE; /* if data is a single physical address, use a BDE */ if (!dif && (xfer_len <= sge[2].buffer_length)) { trecv->dbde = TRUE; trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64; trecv->bde.buffer_length = sge[2].buffer_length; trecv->bde.u.data.buffer_address_low = sge[2].buffer_address_low; trecv->bde.u.data.buffer_address_high = sge[2].buffer_address_high; } else { trecv->bde.bde_type = SLI4_BDE_TYPE_BLP; trecv->bde.buffer_length = sgl->size; trecv->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); trecv->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } } trecv->relative_offset = relative_off; if (flags & SLI4_IO_CONTINUATION) { trecv->xc = TRUE; } trecv->xri_tag = xri; trecv->context_tag = rpi; trecv->pu = TRUE; /* WQE uses relative offset */ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { trecv->ar = TRUE; } trecv->command = SLI4_WQE_FCP_TRECEIVE64; trecv->class = SLI4_ELS_REQUEST64_CLASS_3; trecv->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; trecv->dif = dif; trecv->bs = bs; trecv->remote_xid = xid; trecv->request_tag = tag; trecv->iod = 1; trecv->len_loc = 0x2; if (rnode->node_group) { trecv->hlm = TRUE; trecv->dword5.dword = rnode->fc_id & 0x00ffffff; } trecv->cmd_type = SLI4_CMD_FCP_TRECEIVE64_WQE; trecv->cq_id = cq_id; trecv->fcp_data_receive_length = xfer_len; if (sli4->config.perf_hint) { trecv->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; trecv->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; trecv->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; trecv->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } /* The upper 7 bits of csctl is the priority */ if (csctl & SLI4_MASK_CCP) { trecv->ccpe = 1; trecv->ccp = (csctl & SLI4_MASK_CCP); } if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trecv->eat) { trecv->app_id_valid = 1; trecv->wqes = 1; trecv_128->dw[31] = app_id; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_CONT_TRECEIVE64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the Scatter-Gather List. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param relative_off Relative offset of the IO (if any). * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param sec_xri Secondary XRI for this exchange. (BZ 161832 workaround) * @param tag IO tag value. * @param xid OX_ID for the exchange. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active. * - AUTO RSP - Automatically generate a good FCP_RSP. * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param csctl value of csctl field. * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_cont_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t sec_xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) { int32_t rc; rc = sli_fcp_treceive64_wqe(sli4, buf, size, sgl, first_data_sge, relative_off, xfer_len, xri, tag, cq_id, xid, rpi, rnode, flags, dif, bs, csctl, app_id); if (rc == 0) { sli4_fcp_treceive64_wqe_t *trecv = buf; trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64; trecv->dword5.sec_xri_tag = sec_xri; } return rc; } /** * @ingroup sli_fc * @brief Write an FCP_TRSP64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the Scatter-Gather List. * @param rsp_len Response data length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param xid OX_ID for the exchange. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active * - AUTO RSP - Automatically generate a good FCP_RSP. * @param csctl value of csctl field. * @param port_owned 0/1 to indicate if the XRI is port owned (used to set XBL=0) * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_trsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t csctl, uint8_t port_owned, uint32_t app_id) { sli4_fcp_trsp64_wqe_t *trsp = buf; sli4_fcp_128byte_wqe_t *trsp_128 = buf; ocs_memset(buf, 0, size); if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { trsp->ag = TRUE; /* * The SLI-4 documentation states that the BDE is ignored when * using auto-good response, but, at least for IF_TYPE 0 devices, * this does not appear to be true. */ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { trsp->bde.buffer_length = 12; /* byte size of RSP */ } } else { sli4_sge_t *sge = sgl->virt; if (sli4->config.sgl_pre_registered || port_owned) { trsp->dbde = TRUE; } else { trsp->xbl = TRUE; } trsp->bde.bde_type = SLI4_BDE_TYPE_BDE_64; trsp->bde.buffer_length = sge[0].buffer_length; trsp->bde.u.data.buffer_address_low = sge[0].buffer_address_low; trsp->bde.u.data.buffer_address_high = sge[0].buffer_address_high; trsp->fcp_response_length = rsp_len; } if (flags & SLI4_IO_CONTINUATION) { trsp->xc = TRUE; } if (rnode->node_group) { trsp->hlm = TRUE; trsp->dword5 = rnode->fc_id & 0x00ffffff; } trsp->xri_tag = xri; trsp->rpi = rpi; trsp->command = SLI4_WQE_FCP_TRSP64; trsp->class = SLI4_ELS_REQUEST64_CLASS_3; trsp->remote_xid = xid; trsp->request_tag = tag; trsp->dnrx = ((flags & SLI4_IO_DNRX) == 0 ? 0 : 1); trsp->len_loc = 0x1; trsp->cq_id = cq_id; trsp->cmd_type = SLI4_CMD_FCP_TRSP64_WQE; /* The upper 7 bits of csctl is the priority */ if (csctl & SLI4_MASK_CCP) { trsp->ccpe = 1; trsp->ccp = (csctl & SLI4_MASK_CCP); } if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trsp->eat) { trsp->app_id_valid = 1; trsp->wqes = 1; trsp_128->dw[31] = app_id; } return 0; } /** * @ingroup sli_fc * @brief Write an FCP_TSEND64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the scatter gather list. * @param first_data_sge Index of first data sge (used if perf hints are enabled) * @param relative_off Relative offset of the IO (if any). * @param xfer_len Data transfer length. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param xid OX_ID for the exchange. * @param rpi remote node indicator (RPI) * @param rnode Destination request (i.e. remote node). * @param flags Optional attributes, including: * - ACTIVE - IO is already active. * - AUTO RSP - Automatically generate a good FCP_RSP. * @param dif T10 DIF operation, or 0 to disable. * @param bs T10 DIF block size, or 0 if DIF is disabled. * @param csctl value of csctl field. * @param app_id value for VM application header. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fcp_tsend64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) { sli4_fcp_tsend64_wqe_t *tsend = buf; sli4_fcp_128byte_wqe_t *tsend_128 = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { tsend->xbl = FALSE; tsend->dbde = TRUE; tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64; /* TSEND64_WQE specifies first two SGE are skipped * (i.e. 3rd is valid) */ tsend->bde.buffer_length = sge[2].buffer_length; tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low; tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high; } else { tsend->xbl = TRUE; /* if data is a single physical address, use a BDE */ if (!dif && (xfer_len <= sge[2].buffer_length)) { tsend->dbde = TRUE; tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64; /* TSEND64_WQE specifies first two SGE are skipped * (i.e. 3rd is valid) */ tsend->bde.buffer_length = sge[2].buffer_length; tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low; tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high; } else { tsend->bde.bde_type = SLI4_BDE_TYPE_BLP; tsend->bde.buffer_length = sgl->size; tsend->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); tsend->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } } tsend->relative_offset = relative_off; if (flags & SLI4_IO_CONTINUATION) { tsend->xc = TRUE; } tsend->xri_tag = xri; tsend->rpi = rpi; tsend->pu = TRUE; /* WQE uses relative offset */ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { tsend->ar = TRUE; } tsend->command = SLI4_WQE_FCP_TSEND64; tsend->class = SLI4_ELS_REQUEST64_CLASS_3; tsend->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; tsend->dif = dif; tsend->bs = bs; tsend->remote_xid = xid; tsend->request_tag = tag; tsend->len_loc = 0x2; if (rnode->node_group) { tsend->hlm = TRUE; tsend->dword5 = rnode->fc_id & 0x00ffffff; } tsend->cq_id = cq_id; tsend->cmd_type = SLI4_CMD_FCP_TSEND64_WQE; tsend->fcp_data_transmit_length = xfer_len; if (sli4->config.perf_hint) { tsend->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; tsend->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; tsend->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; tsend->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; } /* The upper 7 bits of csctl is the priority */ if (csctl & SLI4_MASK_CCP) { tsend->ccpe = 1; tsend->ccp = (csctl & SLI4_MASK_CCP); } if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !tsend->eat) { tsend->app_id_valid = 1; tsend->wqes = 1; tsend_128->dw[31] = app_id; } return 0; } /** * @ingroup sli_fc * @brief Write a GEN_REQUEST64 work queue entry. * * @note This WQE is only used to send FC-CT commands. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sgl DMA memory for the request. * @param req_len Length of request. * @param max_rsp_len Max length of response. * @param timeout Time, in seconds, before an IO times out. Zero means infinite. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rnode Destination of request (that is, the remote node). * @param r_ctl R_CTL value for sequence. * @param type TYPE value for sequence. * @param df_ctl DF_CTL value for sequence. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_gen_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint8_t r_ctl, uint8_t type, uint8_t df_ctl) { sli4_gen_request64_wqe_t *gen = buf; sli4_sge_t *sge = NULL; ocs_memset(buf, 0, size); if (!sgl || !sgl->virt) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -1; } sge = sgl->virt; if (sli4->config.sgl_pre_registered) { gen->xbl = FALSE; gen->dbde = TRUE; gen->bde.bde_type = SLI4_BDE_TYPE_BDE_64; gen->bde.buffer_length = req_len; gen->bde.u.data.buffer_address_low = sge[0].buffer_address_low; gen->bde.u.data.buffer_address_high = sge[0].buffer_address_high; } else { gen->xbl = TRUE; gen->bde.bde_type = SLI4_BDE_TYPE_BLP; gen->bde.buffer_length = 2 * sizeof(sli4_sge_t); gen->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); gen->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); } gen->request_payload_length = req_len; gen->max_response_payload_length = max_rsp_len; gen->df_ctl = df_ctl; gen->type = type; gen->r_ctl = r_ctl; gen->xri_tag = xri; gen->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; gen->context_tag = rnode->indicator; gen->class = SLI4_ELS_REQUEST64_CLASS_3; gen->command = SLI4_WQE_GEN_REQUEST64; gen->timer = timeout; gen->request_tag = tag; gen->iod = SLI4_ELS_REQUEST64_DIR_READ; gen->qosd = TRUE; if (rnode->node_group) { gen->hlm = TRUE; gen->remote_n_port_id = rnode->fc_id & 0x00ffffff; } gen->cmd_type = SLI4_CMD_GEN_REQUEST64_WQE; gen->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Write a SEND_FRAME work queue entry * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param sof Start of frame value * @param eof End of frame value * @param hdr Pointer to FC header data * @param payload DMA memory for the payload. * @param req_len Length of payload. * @param timeout Time, in seconds, before an IO times out. Zero means infinite. * @param xri XRI for this exchange. * @param req_tag IO tag value. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr, ocs_dma_t *payload, uint32_t req_len, uint8_t timeout, uint16_t xri, uint16_t req_tag) { sli4_send_frame_wqe_t *sf = buf; ocs_memset(buf, 0, size); sf->dbde = TRUE; sf->bde.buffer_length = req_len; sf->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); sf->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); /* Copy FC header */ sf->fc_header_0_1[0] = hdr[0]; sf->fc_header_0_1[1] = hdr[1]; sf->fc_header_2_5[0] = hdr[2]; sf->fc_header_2_5[1] = hdr[3]; sf->fc_header_2_5[2] = hdr[4]; sf->fc_header_2_5[3] = hdr[5]; sf->frame_length = req_len; sf->xri_tag = xri; sf->pu = 0; sf->context_tag = 0; sf->ct = 0; sf->command = SLI4_WQE_SEND_FRAME; sf->class = SLI4_ELS_REQUEST64_CLASS_3; sf->timer = timeout; sf->request_tag = req_tag; sf->eof = eof; sf->sof = sof; sf->qosd = 0; sf->lenloc = 1; sf->xc = 0; sf->xbl = 1; sf->cmd_type = SLI4_CMD_SEND_FRAME_WQE; sf->cq_id = 0xffff; return 0; } /** * @ingroup sli_fc * @brief Write a XMIT_SEQUENCE64 work queue entry. * * This WQE is used to send FC-CT response frames. * * @note This API implements a restricted use for this WQE, a TODO: would * include passing in sequence initiative, and full SGL's * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param payload DMA memory for the request. * @param payload_len Length of request. * @param timeout Time, in seconds, before an IO times out. Zero means infinite. * @param ox_id originator exchange ID * @param xri XRI for this exchange. * @param tag IO tag value. * @param rnode Destination of request (that is, the remote node). * @param r_ctl R_CTL value for sequence. * @param type TYPE value for sequence. * @param df_ctl DF_CTL value for sequence. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_xmit_sequence64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload, uint32_t payload_len, uint8_t timeout, uint16_t ox_id, uint16_t xri, uint16_t tag, ocs_remote_node_t *rnode, uint8_t r_ctl, uint8_t type, uint8_t df_ctl) { sli4_xmit_sequence64_wqe_t *xmit = buf; ocs_memset(buf, 0, size); if ((payload == NULL) || (payload->virt == NULL)) { ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", payload, payload ? payload->virt : NULL); return -1; } if (sli4->config.sgl_pre_registered) { xmit->dbde = TRUE; } else { xmit->xbl = TRUE; } xmit->bde.bde_type = SLI4_BDE_TYPE_BDE_64; xmit->bde.buffer_length = payload_len; xmit->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); xmit->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); xmit->sequence_payload_len = payload_len; xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff; xmit->relative_offset = 0; xmit->si = 0; /* sequence initiative - this matches what is seen from * FC switches in response to FCGS commands */ xmit->ft = 0; /* force transmit */ xmit->xo = 0; /* exchange responder */ xmit->ls = 1; /* last in seqence */ xmit->df_ctl = df_ctl; xmit->type = type; xmit->r_ctl = r_ctl; xmit->xri_tag = xri; xmit->context_tag = rnode->indicator; xmit->dif = 0; xmit->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; xmit->bs = 0; xmit->command = SLI4_WQE_XMIT_SEQUENCE64; xmit->class = SLI4_ELS_REQUEST64_CLASS_3; xmit->pu = 0; xmit->timer = timeout; xmit->abort_tag = 0; xmit->request_tag = tag; xmit->remote_xid = ox_id; xmit->iod = SLI4_ELS_REQUEST64_DIR_READ; if (rnode->node_group) { xmit->hlm = TRUE; xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff; } xmit->cmd_type = SLI4_CMD_XMIT_SEQUENCE64_WQE; xmit->len_loc = 2; xmit->cq_id = 0xFFFF; return 0; } /** * @ingroup sli_fc * @brief Write a REQUEUE_XRI_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_requeue_xri_wqe(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint16_t tag, uint16_t cq_id) { sli4_requeue_xri_wqe_t *requeue = buf; ocs_memset(buf, 0, size); requeue->command = SLI4_WQE_REQUEUE_XRI; requeue->xri_tag = xri; requeue->request_tag = tag; requeue->xc = 1; requeue->qosd = 1; requeue->cq_id = cq_id; requeue->cmd_type = SLI4_CMD_REQUEUE_XRI_WQE; return 0; } int32_t sli_xmit_bcast64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload, uint32_t payload_len, uint8_t timeout, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint8_t r_ctl, uint8_t type, uint8_t df_ctl) { sli4_xmit_bcast64_wqe_t *bcast = buf; /* Command requires a temporary RPI (i.e. unused remote node) */ if (rnode->attached) { ocs_log_test(sli4->os, "remote node %d in use\n", rnode->indicator); return -1; } ocs_memset(buf, 0, size); bcast->dbde = TRUE; bcast->sequence_payload.bde_type = SLI4_BDE_TYPE_BDE_64; bcast->sequence_payload.buffer_length = payload_len; bcast->sequence_payload.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); bcast->sequence_payload.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); bcast->sequence_payload_length = payload_len; bcast->df_ctl = df_ctl; bcast->type = type; bcast->r_ctl = r_ctl; bcast->xri_tag = xri; bcast->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; bcast->context_tag = rnode->sport->indicator; bcast->class = SLI4_ELS_REQUEST64_CLASS_3; bcast->command = SLI4_WQE_XMIT_BCAST64; bcast->timer = timeout; bcast->request_tag = tag; bcast->temporary_rpi = rnode->indicator; bcast->len_loc = 0x1; bcast->iod = SLI4_ELS_REQUEST64_DIR_WRITE; bcast->cmd_type = SLI4_CMD_XMIT_BCAST64_WQE; bcast->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Write an XMIT_BLS_RSP64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param payload Contents of the BLS payload to be sent. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param rnode Destination of request (that is, the remote node). * @param s_id Source ID to use in the response. If UINT32_MAX, use SLI Port's ID. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_xmit_bls_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, sli_bls_payload_t *payload, uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint32_t s_id) { sli4_xmit_bls_rsp_wqe_t *bls = buf; /* * Callers can either specify RPI or S_ID, but not both */ if (rnode->attached && (s_id != UINT32_MAX)) { ocs_log_test(sli4->os, "S_ID specified for attached remote node %d\n", rnode->indicator); return -1; } ocs_memset(buf, 0, size); if (SLI_BLS_ACC == payload->type) { bls->payload_word0 = (payload->u.acc.seq_id_last << 16) | (payload->u.acc.seq_id_validity << 24); bls->high_seq_cnt = payload->u.acc.high_seq_cnt; bls->low_seq_cnt = payload->u.acc.low_seq_cnt; } else if (SLI_BLS_RJT == payload->type) { bls->payload_word0 = *((uint32_t *)&payload->u.rjt); bls->ar = TRUE; } else { ocs_log_test(sli4->os, "bad BLS type %#x\n", payload->type); return -1; } bls->ox_id = payload->ox_id; bls->rx_id = payload->rx_id; if (rnode->attached) { bls->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; bls->context_tag = rnode->indicator; } else { bls->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; bls->context_tag = rnode->sport->indicator; if (UINT32_MAX != s_id) { bls->local_n_port_id = s_id & 0x00ffffff; } else { bls->local_n_port_id = rnode->sport->fc_id & 0x00ffffff; } bls->remote_id = rnode->fc_id & 0x00ffffff; bls->temporary_rpi = rnode->indicator; } bls->xri_tag = xri; bls->class = SLI4_ELS_REQUEST64_CLASS_3; bls->command = SLI4_WQE_XMIT_BLS_RSP; bls->request_tag = tag; bls->qosd = TRUE; if (rnode->node_group) { bls->hlm = TRUE; bls->remote_id = rnode->fc_id & 0x00ffffff; } bls->cq_id = cq_id; bls->cmd_type = SLI4_CMD_XMIT_BLS_RSP64_WQE; return 0; } /** * @ingroup sli_fc * @brief Write a XMIT_ELS_RSP64_WQE work queue entry. * * @param sli4 SLI context. * @param buf Destination buffer for the WQE. * @param size Buffer size, in bytes. * @param rsp DMA memory for the ELS response. * @param rsp_len Length of ELS response, in bytes. * @param xri XRI for this exchange. * @param tag IO tag value. * @param cq_id The id of the completion queue where the WQE response is sent. * @param ox_id OX_ID of the exchange containing the request. * @param rnode Destination of the ELS response (that is, the remote node). * @param flags Optional attributes, including: * - SLI4_IO_CONTINUATION - IO is already active. * @param s_id S_ID used for special responses. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_xmit_els_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *rsp, uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t ox_id, ocs_remote_node_t *rnode, uint32_t flags, uint32_t s_id) { sli4_xmit_els_rsp64_wqe_t *els = buf; ocs_memset(buf, 0, size); if (sli4->config.sgl_pre_registered) { els->dbde = TRUE; } else { els->xbl = TRUE; } els->els_response_payload.bde_type = SLI4_BDE_TYPE_BDE_64; els->els_response_payload.buffer_length = rsp_len; els->els_response_payload.u.data.buffer_address_low = ocs_addr32_lo(rsp->phys); els->els_response_payload.u.data.buffer_address_high = ocs_addr32_hi(rsp->phys); els->els_response_payload_length = rsp_len; els->xri_tag = xri; els->class = SLI4_ELS_REQUEST64_CLASS_3; els->command = SLI4_WQE_ELS_RSP64; els->request_tag = tag; els->ox_id = ox_id; els->iod = SLI4_ELS_REQUEST64_DIR_WRITE; els->qosd = TRUE; if (flags & SLI4_IO_CONTINUATION) { els->xc = TRUE; } if (rnode->attached) { els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; els->context_tag = rnode->indicator; } else { els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; els->context_tag = rnode->sport->indicator; els->remote_id = rnode->fc_id & 0x00ffffff; els->temporary_rpi = rnode->indicator; if (UINT32_MAX != s_id) { els->sp = TRUE; els->s_id = s_id & 0x00ffffff; } } if (rnode->node_group) { els->hlm = TRUE; els->remote_id = rnode->fc_id & 0x00ffffff; } els->cmd_type = SLI4_ELS_REQUEST64_CMD_GEN; els->cq_id = cq_id; return 0; } /** * @ingroup sli_fc * @brief Process an asynchronous Link State event entry. * * @par Description * Parses Asynchronous Completion Queue Entry (ACQE), * creates an abstracted event, and calls registered callback functions. * * @param sli4 SLI context. * @param acqe Pointer to the ACQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_process_link_state(sli4_t *sli4, void *acqe) { sli4_link_state_t *link_state = acqe; sli4_link_event_t event = { 0 }; int32_t rc = 0; if (!sli4->link) { /* bail if there is no callback */ return 0; } if (SLI4_LINK_TYPE_ETHERNET == link_state->link_type) { event.topology = SLI_LINK_TOPO_NPORT; event.medium = SLI_LINK_MEDIUM_ETHERNET; } else { /* TODO is this supported for anything other than FCoE? */ ocs_log_test(sli4->os, "unsupported link type %#x\n", link_state->link_type); event.topology = SLI_LINK_TOPO_MAX; event.medium = SLI_LINK_MEDIUM_MAX; rc = -1; } switch (link_state->port_link_status) { case SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN: case SLI4_PORT_LINK_STATUS_LOGICAL_DOWN: event.status = SLI_LINK_STATUS_DOWN; break; case SLI4_PORT_LINK_STATUS_PHYSICAL_UP: case SLI4_PORT_LINK_STATUS_LOGICAL_UP: event.status = SLI_LINK_STATUS_UP; break; default: ocs_log_test(sli4->os, "unsupported link status %#x\n", link_state->port_link_status); event.status = SLI_LINK_STATUS_MAX; rc = -1; } switch (link_state->port_speed) { case 0: event.speed = 0; break; case 1: event.speed = 10; break; case 2: event.speed = 100; break; case 3: event.speed = 1000; break; case 4: event.speed = 10000; break; case 5: event.speed = 20000; break; case 6: event.speed = 25000; break; case 7: event.speed = 40000; break; case 8: event.speed = 100000; break; default: ocs_log_test(sli4->os, "unsupported port_speed %#x\n", link_state->port_speed); rc = -1; } sli4->link(sli4->link_arg, (void *)&event); return rc; } /** * @ingroup sli_fc * @brief Process an asynchronous Link Attention event entry. * * @par Description * Parses Asynchronous Completion Queue Entry (ACQE), * creates an abstracted event, and calls the registered callback functions. * * @param sli4 SLI context. * @param acqe Pointer to the ACQE. * * @todo XXX all events return LINK_UP. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_process_link_attention(sli4_t *sli4, void *acqe) { sli4_link_attention_t *link_attn = acqe; sli4_link_event_t event = { 0 }; ocs_log_debug(sli4->os, "link_number=%d attn_type=%#x topology=%#x port_speed=%#x " "port_fault=%#x shared_link_status=%#x logical_link_speed=%#x " "event_tag=%#x\n", link_attn->link_number, link_attn->attn_type, link_attn->topology, link_attn->port_speed, link_attn->port_fault, link_attn->shared_link_status, link_attn->logical_link_speed, link_attn->event_tag); if (!sli4->link) { return 0; } event.medium = SLI_LINK_MEDIUM_FC; switch (link_attn->attn_type) { case SLI4_LINK_ATTN_TYPE_LINK_UP: event.status = SLI_LINK_STATUS_UP; break; case SLI4_LINK_ATTN_TYPE_LINK_DOWN: event.status = SLI_LINK_STATUS_DOWN; break; case SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA: ocs_log_debug(sli4->os, "attn_type: no hard alpa\n"); event.status = SLI_LINK_STATUS_NO_ALPA; break; default: ocs_log_test(sli4->os, "attn_type: unknown\n"); break; } switch (link_attn->event_type) { case SLI4_FC_EVENT_LINK_ATTENTION: break; case SLI4_FC_EVENT_SHARED_LINK_ATTENTION: ocs_log_debug(sli4->os, "event_type: FC shared link event \n"); break; default: ocs_log_test(sli4->os, "event_type: unknown\n"); break; } switch (link_attn->topology) { case SLI4_LINK_ATTN_P2P: event.topology = SLI_LINK_TOPO_NPORT; break; case SLI4_LINK_ATTN_FC_AL: event.topology = SLI_LINK_TOPO_LOOP; break; case SLI4_LINK_ATTN_INTERNAL_LOOPBACK: ocs_log_debug(sli4->os, "topology Internal loopback\n"); event.topology = SLI_LINK_TOPO_LOOPBACK_INTERNAL; break; case SLI4_LINK_ATTN_SERDES_LOOPBACK: ocs_log_debug(sli4->os, "topology serdes loopback\n"); event.topology = SLI_LINK_TOPO_LOOPBACK_EXTERNAL; break; default: ocs_log_test(sli4->os, "topology: unknown\n"); break; } event.speed = link_attn->port_speed * 1000; sli4->link(sli4->link_arg, (void *)&event); return 0; } /** * @ingroup sli_fc * @brief Parse an FC/FCoE work queue CQ entry. * * @param sli4 SLI context. * @param cq CQ to process. * @param cqe Pointer to the CQ entry. * @param etype CQ event type. * @param r_id Resource ID associated with this completion message (such as the IO tag). * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_cqe_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype, uint16_t *r_id) { uint8_t code = cqe[SLI4_CQE_CODE_OFFSET]; int32_t rc = -1; switch (code) { case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION: { sli4_fc_wcqe_t *wcqe = (void *)cqe; *etype = SLI_QENTRY_WQ; *r_id = wcqe->request_tag; rc = wcqe->status; /* Flag errors except for FCP_RSP_FAILURE */ if (rc && (rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE)) { ocs_log_test(sli4->os, "WCQE: status=%#x hw_status=%#x tag=%#x w1=%#x w2=%#x xb=%d\n", wcqe->status, wcqe->hw_status, wcqe->request_tag, wcqe->wqe_specific_1, wcqe->wqe_specific_2, wcqe->xb); ocs_log_test(sli4->os, " %08X %08X %08X %08X\n", ((uint32_t*) cqe)[0], ((uint32_t*) cqe)[1], ((uint32_t*) cqe)[2], ((uint32_t*) cqe)[3]); } /* TODO: need to pass additional status back out of here as well * as status (could overload rc as status/addlstatus are only 8 bits each) */ break; } case SLI4_CQE_CODE_RQ_ASYNC: { sli4_fc_async_rcqe_t *rcqe = (void *)cqe; *etype = SLI_QENTRY_RQ; *r_id = rcqe->rq_id; rc = rcqe->status; break; } case SLI4_CQE_CODE_RQ_ASYNC_V1: { sli4_fc_async_rcqe_v1_t *rcqe = (void *)cqe; *etype = SLI_QENTRY_RQ; *r_id = rcqe->rq_id; rc = rcqe->status; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: { sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe; *etype = SLI_QENTRY_OPT_WRITE_CMD; *r_id = optcqe->rq_id; rc = optcqe->status; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA: { sli4_fc_optimized_write_data_cqe_t *dcqe = (void *)cqe; *etype = SLI_QENTRY_OPT_WRITE_DATA; *r_id = dcqe->xri; rc = dcqe->status; /* Flag errors */ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) { ocs_log_test(sli4->os, "Optimized DATA CQE: status=%#x hw_status=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n", dcqe->status, dcqe->hw_status, dcqe->xri, dcqe->total_data_placed, ((uint32_t*) cqe)[3], dcqe->xb); } break; } case SLI4_CQE_CODE_RQ_COALESCING: { sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe; *etype = SLI_QENTRY_RQ; *r_id = rcqe->rq_id; rc = rcqe->status; break; } case SLI4_CQE_CODE_XRI_ABORTED: { sli4_fc_xri_aborted_cqe_t *xa = (void *)cqe; *etype = SLI_QENTRY_XABT; *r_id = xa->xri; rc = 0; break; } case SLI4_CQE_CODE_RELEASE_WQE: { sli4_fc_wqec_t *wqec = (void*) cqe; *etype = SLI_QENTRY_WQ_RELEASE; *r_id = wqec->wq_id; rc = 0; break; } default: ocs_log_test(sli4->os, "CQE completion code %d not handled\n", code); *etype = SLI_QENTRY_MAX; *r_id = UINT16_MAX; } return rc; } /** * @ingroup sli_fc * @brief Return the ELS/CT response length. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * * @return Returns the length, in bytes. */ uint32_t sli_fc_response_length(sli4_t *sli4, uint8_t *cqe) { sli4_fc_wcqe_t *wcqe = (void *)cqe; return wcqe->wqe_specific_1; } /** * @ingroup sli_fc * @brief Return the FCP IO length. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * * @return Returns the length, in bytes. */ uint32_t sli_fc_io_length(sli4_t *sli4, uint8_t *cqe) { sli4_fc_wcqe_t *wcqe = (void *)cqe; return wcqe->wqe_specific_1; } /** * @ingroup sli_fc * @brief Retrieve the D_ID from the completion. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * @param d_id Pointer where the D_ID is written. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_els_did(sli4_t *sli4, uint8_t *cqe, uint32_t *d_id) { sli4_fc_wcqe_t *wcqe = (void *)cqe; *d_id = 0; if (wcqe->status) { return -1; } else { *d_id = wcqe->wqe_specific_2 & 0x00ffffff; return 0; } } uint32_t sli_fc_ext_status(sli4_t *sli4, uint8_t *cqe) { sli4_fc_wcqe_t *wcqe = (void *)cqe; uint32_t mask; switch (wcqe->status) { case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: mask = UINT32_MAX; break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: case SLI4_FC_WCQE_STATUS_CMD_REJECT: mask = 0xff; break; case SLI4_FC_WCQE_STATUS_NPORT_RJT: case SLI4_FC_WCQE_STATUS_FABRIC_RJT: case SLI4_FC_WCQE_STATUS_NPORT_BSY: case SLI4_FC_WCQE_STATUS_FABRIC_BSY: case SLI4_FC_WCQE_STATUS_LS_RJT: mask = UINT32_MAX; break; case SLI4_FC_WCQE_STATUS_DI_ERROR: mask = UINT32_MAX; break; default: mask = 0; } return wcqe->wqe_specific_2 & mask; } /** * @ingroup sli_fc * @brief Retrieve the RQ index from the completion. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * @param rq_id Pointer where the rq_id is written. * @param index Pointer where the index is written. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_rqe_rqid_and_index(sli4_t *sli4, uint8_t *cqe, uint16_t *rq_id, uint32_t *index) { sli4_fc_async_rcqe_t *rcqe = (void *)cqe; sli4_fc_async_rcqe_v1_t *rcqe_v1 = (void *)cqe; int32_t rc = -1; uint8_t code = 0; *rq_id = 0; *index = UINT32_MAX; code = cqe[SLI4_CQE_CODE_OFFSET]; if (code == SLI4_CQE_CODE_RQ_ASYNC) { *rq_id = rcqe->rq_id; if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) { *index = rcqe->rq_element_index; rc = 0; } else { *index = rcqe->rq_element_index; rc = rcqe->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length); } } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) { *rq_id = rcqe_v1->rq_id; if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe_v1->status) { *index = rcqe_v1->rq_element_index; rc = 0; } else { *index = rcqe_v1->rq_element_index; rc = rcqe_v1->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status), rcqe_v1->rq_id, rcqe_v1->rq_element_index, rcqe_v1->payload_data_placement_length, rcqe_v1->sof_byte, rcqe_v1->eof_byte, rcqe_v1->header_data_placement_length); } } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) { sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe; *rq_id = optcqe->rq_id; if (SLI4_FC_ASYNC_RQ_SUCCESS == optcqe->status) { *index = optcqe->rq_element_index; rc = 0; } else { *index = optcqe->rq_element_index; rc = optcqe->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x hdpl=%x oox=%d agxr=%d xri=0x%x rpi=0x%x\n", optcqe->status, sli_fc_get_status_string(optcqe->status), optcqe->rq_id, optcqe->rq_element_index, optcqe->payload_data_placement_length, optcqe->header_data_placement_length, optcqe->oox, optcqe->agxr, optcqe->xri, optcqe->rpi); } } else if (code == SLI4_CQE_CODE_RQ_COALESCING) { sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe; *rq_id = rcqe->rq_id; if (SLI4_FC_COALESCE_RQ_SUCCESS == rcqe->status) { *index = rcqe->rq_element_index; rc = 0; } else { *index = UINT32_MAX; rc = rcqe->status; ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x rq_id=%#x sdpl=%x\n", rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id, rcqe->rq_element_index, rcqe->rq_id, rcqe->sequence_reporting_placement_length); } } else { *index = UINT32_MAX; rc = rcqe->status; ocs_log_debug(sli4->os, "status=%02x rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", rcqe->status, rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length); } return rc; } /** * @ingroup sli_fc * @brief Process an asynchronous FCoE event entry. * * @par Description * Parses Asynchronous Completion Queue Entry (ACQE), * creates an abstracted event, and calls the registered callback functions. * * @param sli4 SLI context. * @param acqe Pointer to the ACQE. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t sli_fc_process_fcoe(sli4_t *sli4, void *acqe) { sli4_fcoe_fip_t *fcoe = acqe; sli4_fip_event_t event = { 0 }; uint32_t mask = UINT32_MAX; ocs_log_debug(sli4->os, "ACQE FCoE FIP type=%02x count=%d tag=%#x\n", fcoe->event_type, fcoe->fcf_count, fcoe->event_tag); if (!sli4->fip) { return 0; } event.type = fcoe->event_type; event.index = UINT32_MAX; switch (fcoe->event_type) { case SLI4_FCOE_FIP_FCF_DISCOVERED: ocs_log_debug(sli4->os, "FCF Discovered index=%d\n", fcoe->event_information); break; case SLI4_FCOE_FIP_FCF_TABLE_FULL: ocs_log_debug(sli4->os, "FCF Table Full\n"); mask = 0; break; case SLI4_FCOE_FIP_FCF_DEAD: ocs_log_debug(sli4->os, "FCF Dead/Gone index=%d\n", fcoe->event_information); break; case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: mask = UINT16_MAX; ocs_log_debug(sli4->os, "Clear VLINK Received VPI=%#x\n", fcoe->event_information & mask); break; case SLI4_FCOE_FIP_FCF_MODIFIED: ocs_log_debug(sli4->os, "FCF Modified\n"); break; default: ocs_log_test(sli4->os, "bad FCoE type %#x", fcoe->event_type); mask = 0; } if (mask != 0) { event.index = fcoe->event_information & mask; } sli4->fip(sli4->fip_arg, &event); return 0; } /** * @ingroup sli_fc * @brief Allocate a receive queue. * * @par Description * Allocates DMA memory and configures the requested queue type. * * @param sli4 SLI context. * @param q Pointer to the queue object for the header. * @param n_entries Number of entries to allocate. * @param buffer_size buffer size for the queue. * @param cq Associated CQ. * @param ulp The ULP to bind * @param is_hdr Used to validate the rq_id and set the type of queue * * @return Returns 0 on success, or -1 on failure. */ int32_t sli_fc_rq_alloc(sli4_t *sli4, sli4_queue_t *q, uint32_t n_entries, uint32_t buffer_size, sli4_queue_t *cq, uint16_t ulp, uint8_t is_hdr) { int32_t (*rq_create)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t); if ((sli4 == NULL) || (q == NULL)) { void *os = sli4 != NULL ? sli4->os : NULL; ocs_log_err(os, "bad parameter sli4=%p q=%p\n", sli4, q); return -1; } if (__sli_queue_init(sli4, q, SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE, n_entries, SLI_PAGE_SIZE)) { return -1; } if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) { rq_create = sli_cmd_fcoe_rq_create; } else { rq_create = sli_cmd_fcoe_rq_create_v1; } if (rq_create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, cq->id, ulp, buffer_size)) { if (__sli_create_queue(sli4, q)) { ocs_dma_free(sli4->os, &q->dma); return -1; } if (is_hdr && q->id & 1) { ocs_log_test(sli4->os, "bad header RQ_ID %d\n", q->id); ocs_dma_free(sli4->os, &q->dma); return -1; } else if (!is_hdr && (q->id & 1) == 0) { ocs_log_test(sli4->os, "bad data RQ_ID %d\n", q->id); ocs_dma_free(sli4->os, &q->dma); return -1; } } else { return -1; } q->u.flag.is_hdr = is_hdr; if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { q->u.flag.rq_batch = TRUE; } return 0; } /** * @ingroup sli_fc * @brief Allocate a receive queue set. * * @param sli4 SLI context. * @param num_rq_pairs to create * @param qs Pointers to the queue objects for both header and data. * Length of this arrays should be 2 * num_rq_pairs * @param base_cq_id. Assumes base_cq_id : (base_cq_id + num_rq_pairs) cqs as allotted. * @param n_entries number of entries in each RQ queue. * @param header_buffer_size * @param payload_buffer_size * @param ulp The ULP to bind * * @return Returns 0 on success, or -1 on failure. */ int32_t sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs, sli4_queue_t *qs[], uint32_t base_cq_id, uint32_t n_entries, uint32_t header_buffer_size, uint32_t payload_buffer_size, uint16_t ulp) { uint32_t i, p, offset = 0; uint32_t payload_size, total_page_count = 0; uintptr_t addr; ocs_dma_t dma; sli4_res_common_create_queue_set_t *rsp = NULL; sli4_req_fcoe_rq_create_v2_t *req = NULL; ocs_memset(&dma, 0, sizeof(dma)); for (i = 0; i < (num_rq_pairs * 2); i++) { if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE, n_entries, SLI_PAGE_SIZE)) { goto error; } } total_page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rq_pairs * 2; /* Payload length must accommodate both request and response */ payload_size = max((sizeof(sli4_req_fcoe_rq_create_v1_t) + (8 * total_page_count)), sizeof(sli4_res_common_create_queue_set_t)); if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) { ocs_log_err(sli4->os, "DMA allocation failed\n"); goto error; } ocs_memset(dma.virt, 0, payload_size); if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, payload_size, &dma) == -1) { goto error; } req = (sli4_req_fcoe_rq_create_v2_t *)((uint8_t *)dma.virt); /* Fill Header fields */ req->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; req->hdr.version = 2; req->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v2_t) - sizeof(sli4_req_hdr_t) + (8 * total_page_count); /* Fill Payload fields */ req->dnb = TRUE; req->num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); req->rqe_count = qs[0]->dma.size / SLI4_FCOE_RQE_SIZE; req->rqe_size = SLI4_FCOE_RQE_SIZE_8; req->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096; req->rq_count = num_rq_pairs * 2; req->base_cq_id = base_cq_id; req->hdr_buffer_size = header_buffer_size; req->payload_buffer_size = payload_buffer_size; for (i = 0; i < (num_rq_pairs * 2); i++) { for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += SLI_PAGE_SIZE) { req->page_physical_address[offset].low = ocs_addr32_lo(addr); req->page_physical_address[offset].high = ocs_addr32_hi(addr); offset++; } } if (sli_bmbx_command(sli4)){ ocs_log_crit(sli4->os, "bootstrap mailbox write faild RQSet\n"); goto error; } rsp = (void *)((uint8_t *)dma.virt); if (rsp->hdr.status) { ocs_log_err(sli4->os, "bad create RQSet status=%#x addl=%#x\n", rsp->hdr.status, rsp->hdr.additional_status); goto error; } else { for (i = 0; i < (num_rq_pairs * 2); i++) { qs[i]->id = i + rsp->q_id; if ((qs[i]->id & 1) == 0) { qs[i]->u.flag.is_hdr = TRUE; } else { qs[i]->u.flag.is_hdr = FALSE; } qs[i]->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off; qs[i]->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset; } } ocs_dma_free(sli4->os, &dma); return 0; error: for (i = 0; i < (num_rq_pairs * 2); i++) { if (qs[i]->dma.size) { ocs_dma_free(sli4->os, &qs[i]->dma); } } if (dma.size) { ocs_dma_free(sli4->os, &dma); } return -1; } /** * @ingroup sli_fc * @brief Get the RPI resource requirements. * * @param sli4 SLI context. * @param n_rpi Number of RPIs desired. * * @return Returns the number of bytes needed. This value may be zero. */ uint32_t sli_fc_get_rpi_requirements(sli4_t *sli4, uint32_t n_rpi) { uint32_t bytes = 0; /* Check if header templates needed */ if (sli4->config.hdr_template_req) { /* round up to a page */ bytes = SLI_ROUND_PAGE(n_rpi * SLI4_FCOE_HDR_TEMPLATE_SIZE); } return bytes; } /** * @ingroup sli_fc * @brief Return a text string corresponding to a CQE status value * * @param status Status value * * @return Returns corresponding string, otherwise "unknown" */ const char * sli_fc_get_status_string(uint32_t status) { static struct { uint32_t code; const char *label; } lookup[] = { {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"}, {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"}, {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"}, {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"}, {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"}, {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"}, {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"}, {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"}, {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"}, {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"}, {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"}, {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"}, {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"}, {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"}, {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"}, {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"}, {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"}, {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"}, }; uint32_t i; for (i = 0; i < ARRAY_SIZE(lookup); i++) { if (status == lookup[i].code) { return lookup[i].label; } } return "unknown"; } diff --git a/sys/dev/ocs_fc/sli4.h b/sys/dev/ocs_fc/sli4.h index 54e1915a715c..c8bb9ab5c015 100644 --- a/sys/dev/ocs_fc/sli4.h +++ b/sys/dev/ocs_fc/sli4.h @@ -1,5638 +1,5678 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Define common SLI-4 structures and function prototypes. */ #ifndef _SLI4_H #define _SLI4_H #include "ocs_os.h" #define SLI_PAGE_SIZE (4096) #define SLI_SUB_PAGE_MASK (SLI_PAGE_SIZE - 1) #define SLI_PAGE_SHIFT 12 #define SLI_ROUND_PAGE(b) (((b) + SLI_SUB_PAGE_MASK) & ~SLI_SUB_PAGE_MASK) #define SLI4_BMBX_TIMEOUT_MSEC 30000 #define SLI4_FW_READY_TIMEOUT_MSEC 30000 static inline uint32_t sli_page_count(size_t bytes, uint32_t page_size) { uint32_t mask = page_size - 1; uint32_t shift = 0; switch (page_size) { case 4096: shift = 12; break; case 8192: shift = 13; break; case 16384: shift = 14; break; case 32768: shift = 15; break; case 65536: shift = 16; break; default: return 0; } return (bytes + mask) >> shift; } /************************************************************************* * Common PCI configuration space register definitions */ #define SLI4_PCI_CLASS_REVISION 0x0008 /** register offset */ #define SLI4_PCI_REV_ID_SHIFT 0 #define SLI4_PCI_REV_ID_MASK 0xff #define SLI4_PCI_CLASS_SHIFT 8 #define SLI4_PCI_CLASS_MASK 0xfff #define SLI4_PCI_SOFT_RESET_CSR 0x005c /** register offset */ #define SLI4_PCI_SOFT_RESET_MASK 0x0080 /************************************************************************* * Common SLI-4 register offsets and field definitions */ /** * @brief SLI_INTF - SLI Interface Definition Register */ #define SLI4_INTF_REG 0x0058 /** register offset */ #define SLI4_INTF_VALID_SHIFT 29 #define SLI4_INTF_VALID_MASK 0x7 #define SLI4_INTF_VALID 0x6 #define SLI4_INTF_IF_TYPE_SHIFT 12 #define SLI4_INTF_IF_TYPE_MASK 0xf #define SLI4_INTF_SLI_FAMILY_SHIFT 8 #define SLI4_INTF_SLI_FAMILY_MASK 0xf #define SLI4_INTF_SLI_REVISION_SHIFT 4 #define SLI4_INTF_SLI_REVISION_MASK 0xf #define SLI4_FAMILY_CHECK_ASIC_TYPE 0xf #define SLI4_IF_TYPE_BE3_SKH_PF 0 #define SLI4_IF_TYPE_BE3_SKH_VF 1 #define SLI4_IF_TYPE_LANCER_FC_ETH 2 #define SLI4_IF_TYPE_LANCER_RDMA 3 #define SLI4_IF_TYPE_LANCER_G7 6 #define SLI4_MAX_IF_TYPES 7 /** * @brief ASIC_ID - SLI ASIC Type and Revision Register */ #define SLI4_ASIC_ID_REG 0x009c /* register offset */ #define SLI4_ASIC_REV_SHIFT 0 #define SLI4_ASIC_REV_MASK 0xf #define SLI4_ASIC_VER_SHIFT 4 #define SLI4_ASIC_VER_MASK 0xf #define SLI4_ASIC_GEN_SHIFT 8 #define SLI4_ASIC_GEN_MASK 0xff #define SLI4_ASIC_GEN_BE2 0x00 #define SLI4_ASIC_GEN_BE3 0x03 #define SLI4_ASIC_GEN_SKYHAWK 0x04 #define SLI4_ASIC_GEN_CORSAIR 0x05 #define SLI4_ASIC_GEN_LANCER 0x0b /** * @brief BMBX - Bootstrap Mailbox Register */ #define SLI4_BMBX_REG 0x0160 /* register offset */ #define SLI4_BMBX_MASK_HI 0x3 #define SLI4_BMBX_MASK_LO 0xf #define SLI4_BMBX_RDY BIT(0) #define SLI4_BMBX_HI BIT(1) #define SLI4_BMBX_WRITE_HI(r) ((ocs_addr32_hi(r) & ~SLI4_BMBX_MASK_HI) | \ SLI4_BMBX_HI) #define SLI4_BMBX_WRITE_LO(r) (((ocs_addr32_hi(r) & SLI4_BMBX_MASK_HI) << 30) | \ (((r) & ~SLI4_BMBX_MASK_LO) >> 2)) #define SLI4_BMBX_SIZE 256 /** * @brief EQCQ_DOORBELL - EQ and CQ Doorbell Register */ #define SLI4_EQCQ_DOORBELL_REG 0x120 #define SLI4_EQCQ_DOORBELL_CI BIT(9) #define SLI4_EQCQ_DOORBELL_QT BIT(10) #define SLI4_EQCQ_DOORBELL_ARM BIT(29) #define SLI4_EQCQ_DOORBELL_SE BIT(31) #define SLI4_EQCQ_NUM_SHIFT 16 #define SLI4_EQCQ_NUM_MASK 0x01ff #define SLI4_EQCQ_EQ_ID_MASK 0x3fff #define SLI4_EQCQ_CQ_ID_MASK 0x7fff #define SLI4_EQCQ_EQ_ID_MASK_LO 0x01ff #define SLI4_EQCQ_CQ_ID_MASK_LO 0x03ff #define SLI4_EQCQ_EQCQ_ID_MASK_HI 0xf800 #define SLI4_IF6_EQ_DOORBELL_REG 0x120 #define SLI4_IF6_CQ_DOORBELL_REG 0xC0 /** * @brief SLIPORT_CONTROL - SLI Port Control Register */ #define SLI4_SLIPORT_CONTROL_REG 0x0408 #define SLI4_SLIPORT_CONTROL_END BIT(30) #define SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN (0) #define SLI4_SLIPORT_CONTROL_BIG_ENDIAN BIT(30) #define SLI4_SLIPORT_CONTROL_IP BIT(27) #define SLI4_SLIPORT_CONTROL_IDIS BIT(22) #define SLI4_SLIPORT_CONTROL_FDD BIT(31) /** * @brief SLI4_SLIPORT_ERROR1 - SLI Port Error Register */ #define SLI4_SLIPORT_ERROR1 0x040c /** * @brief SLI4_SLIPORT_ERROR2 - SLI Port Error Register */ #define SLI4_SLIPORT_ERROR2 0x0410 /** * @brief User error registers */ #define SLI4_UERR_STATUS_LOW_REG 0xA0 #define SLI4_UERR_STATUS_HIGH_REG 0xA4 #define SLI4_UERR_MASK_LOW_REG 0xA8 #define SLI4_UERR_MASK_HIGH_REG 0xAC /** * @brief Registers for generating software UE (BE3) */ #define SLI4_SW_UE_CSR1 0x138 #define SLI4_SW_UE_CSR2 0x1FFFC /** * @brief Registers for generating software UE (Skyhawk) */ #define SLI4_SW_UE_REG 0x5C /* register offset */ static inline uint32_t sli_eq_doorbell(uint16_t n_popped, uint16_t id, uint8_t arm) { uint32_t reg = 0; #if BYTE_ORDER == LITTLE_ENDIAN struct { uint32_t eq_id_lo:9, ci:1, /* clear interrupt */ qt:1, /* queue type */ eq_id_hi:5, number_popped:13, arm:1, :1, se:1; } * eq_doorbell = (void *)® #else #error big endian version not defined #endif eq_doorbell->eq_id_lo = id & SLI4_EQCQ_EQ_ID_MASK_LO; eq_doorbell->qt = 1; /* EQ is type 1 (section 2.2.3.3 SLI Arch) */ eq_doorbell->eq_id_hi = (id >> 9) & 0x1f; eq_doorbell->number_popped = n_popped; eq_doorbell->arm = arm; eq_doorbell->ci = TRUE; return reg; } static inline uint32_t sli_cq_doorbell(uint16_t n_popped, uint16_t id, uint8_t arm) { uint32_t reg = 0; #if BYTE_ORDER == LITTLE_ENDIAN struct { uint32_t cq_id_lo:10, qt:1, /* queue type */ cq_id_hi:5, number_popped:13, arm:1, :1, se:1; } * cq_doorbell = (void *)® #else #error big endian version not defined #endif cq_doorbell->cq_id_lo = id & SLI4_EQCQ_CQ_ID_MASK_LO; cq_doorbell->qt = 0; /* CQ is type 0 (section 2.2.3.3 SLI Arch) */ cq_doorbell->cq_id_hi = (id >> 10) & 0x1f; cq_doorbell->number_popped = n_popped; cq_doorbell->arm = arm; return reg; } static inline uint32_t sli_iftype6_eq_doorbell(uint16_t n_popped, uint16_t id, uint8_t arm) { uint32_t reg = 0; #if BYTE_ORDER == LITTLE_ENDIAN struct { uint32_t eq_id:12, :4, /* clear interrupt */ number_popped:13, arm:1, :1, io:1; } * eq_doorbell = (void *)® #else #error big endian version not defined #endif eq_doorbell->eq_id = id; eq_doorbell->number_popped = n_popped; eq_doorbell->arm = arm; return reg; } static inline uint32_t sli_iftype6_cq_doorbell(uint16_t n_popped, uint16_t id, uint8_t arm) { uint32_t reg = 0; #if BYTE_ORDER == LITTLE_ENDIAN struct { uint32_t cq_id:16, number_popped:13, arm:1, :1, se:1; } * cq_doorbell = (void *)® #else #error big endian version not defined #endif cq_doorbell->cq_id = id; cq_doorbell->number_popped = n_popped; cq_doorbell->arm = arm; return reg; } /** * @brief MQ_DOORBELL - MQ Doorbell Register */ #define SLI4_MQ_DOORBELL_REG 0x0140 /* register offset */ #define SLI4_IF6_MQ_DOORBELL_REG 0x0160 /* register offset if_type = 6 */ #define SLI4_MQ_DOORBELL_NUM_SHIFT 16 #define SLI4_MQ_DOORBELL_NUM_MASK 0x3fff #define SLI4_MQ_DOORBELL_ID_MASK 0xffff #define SLI4_MQ_DOORBELL(n, i) ((((n) & SLI4_MQ_DOORBELL_NUM_MASK) << SLI4_MQ_DOORBELL_NUM_SHIFT) | \ ((i) & SLI4_MQ_DOORBELL_ID_MASK)) /** * @brief RQ_DOORBELL - RQ Doorbell Register */ #define SLI4_RQ_DOORBELL_REG 0x0a0 /* register offset */ #define SLI4_IF6_RQ_DOORBELL_REG 0x0080 /* register offset of if_type = 6 */ #define SLI4_RQ_DOORBELL_NUM_SHIFT 16 #define SLI4_RQ_DOORBELL_NUM_MASK 0x3fff #define SLI4_RQ_DOORBELL_ID_MASK 0xffff #define SLI4_RQ_DOORBELL(n, i) ((((n) & SLI4_RQ_DOORBELL_NUM_MASK) << SLI4_RQ_DOORBELL_NUM_SHIFT) | \ ((i) & SLI4_RQ_DOORBELL_ID_MASK)) /** * @brief WQ_DOORBELL - WQ Doorbell Register */ #define SLI4_IO_WQ_DOORBELL_REG 0x040 /* register offset */ #define SLI4_IF6_WQ_DOORBELL_REG 0x040 /* register offset for if_type = 6 */ #define SLI4_WQ_DOORBELL_IDX_SHIFT 16 #define SLI4_WQ_DOORBELL_IDX_MASK 0x00ff #define SLI4_WQ_DOORBELL_NUM_SHIFT 24 #define SLI4_WQ_DOORBELL_NUM_MASK 0x00ff #define SLI4_WQ_DOORBELL_ID_MASK 0xffff #define SLI4_WQ_DOORBELL(n, x, i) ((((n) & SLI4_WQ_DOORBELL_NUM_MASK) << SLI4_WQ_DOORBELL_NUM_SHIFT) | \ (((x) & SLI4_WQ_DOORBELL_IDX_MASK) << SLI4_WQ_DOORBELL_IDX_SHIFT) | \ ((i) & SLI4_WQ_DOORBELL_ID_MASK)) /** * @brief SLIPORT_SEMAPHORE - SLI Port Host and Port Status Register */ #define SLI4_PORT_SEMAPHORE_REG_0 0x00ac /** register offset Interface Type 0 + 1 */ #define SLI4_PORT_SEMAPHORE_REG_1 0x0180 /** register offset Interface Type 0 + 1 */ #define SLI4_PORT_SEMAPHORE_REG_236 0x0400 /** register offset Interface Type 2 + 3 + 6*/ #define SLI4_PORT_SEMAPHORE_PORT_MASK 0x0000ffff #define SLI4_PORT_SEMAPHORE_PORT(r) ((r) & SLI4_PORT_SEMAPHORE_PORT_MASK) #define SLI4_PORT_SEMAPHORE_HOST_MASK 0x00ff0000 #define SLI4_PORT_SEMAPHORE_HOST_SHIFT 16 #define SLI4_PORT_SEMAPHORE_HOST(r) (((r) & SLI4_PORT_SEMAPHORE_HOST_MASK) >> \ SLI4_PORT_SEMAPHORE_HOST_SHIFT) #define SLI4_PORT_SEMAPHORE_SCR2 BIT(26) /** scratch area 2 */ #define SLI4_PORT_SEMAPHORE_SCR1 BIT(27) /** scratch area 1 */ #define SLI4_PORT_SEMAPHORE_IPC BIT(28) /** IP conflict */ #define SLI4_PORT_SEMAPHORE_NIP BIT(29) /** no IP address */ #define SLI4_PORT_SEMAPHORE_SFI BIT(30) /** secondary firmware image used */ #define SLI4_PORT_SEMAPHORE_PERR BIT(31) /** POST fatal error */ #define SLI4_PORT_SEMAPHORE_STATUS_POST_READY 0xc000 #define SLI4_PORT_SEMAPHORE_STATUS_UNRECOV_ERR 0xf000 #define SLI4_PORT_SEMAPHORE_STATUS_ERR_MASK 0xf000 #define SLI4_PORT_SEMAPHORE_IN_ERR(r) (SLI4_PORT_SEMAPHORE_STATUS_UNRECOV_ERR == ((r) & \ SLI4_PORT_SEMAPHORE_STATUS_ERR_MASK)) /** * @brief SLIPORT_STATUS - SLI Port Status Register */ #define SLI4_PORT_STATUS_REG_236 0x0404 /** register offset Interface Type 2 + 3 + 6*/ #define SLI4_PORT_STATUS_FDP BIT(21) /** function specific dump present */ #define SLI4_PORT_STATUS_RDY BIT(23) /** ready */ #define SLI4_PORT_STATUS_RN BIT(24) /** reset needed */ #define SLI4_PORT_STATUS_DIP BIT(25) /** dump present */ #define SLI4_PORT_STATUS_OTI BIT(29) /** over temp indicator */ #define SLI4_PORT_STATUS_END BIT(30) /** endianness */ #define SLI4_PORT_STATUS_ERR BIT(31) /** SLI port error */ #define SLI4_PORT_STATUS_READY(r) ((r) & SLI4_PORT_STATUS_RDY) #define SLI4_PORT_STATUS_ERROR(r) ((r) & SLI4_PORT_STATUS_ERR) #define SLI4_PORT_STATUS_DUMP_PRESENT(r) ((r) & SLI4_PORT_STATUS_DIP) #define SLI4_PORT_STATUS_FDP_PRESENT(r) ((r) & SLI4_PORT_STATUS_FDP) #define SLI4_PHSDEV_CONTROL_REG_236 0x0414 /** register offset Interface Type 2 + 3 + 6*/ #define SLI4_PHYDEV_CONTROL_DRST BIT(0) /** physical device reset */ #define SLI4_PHYDEV_CONTROL_FRST BIT(1) /** firmware reset */ #define SLI4_PHYDEV_CONTROL_DD BIT(2) /** diagnostic dump */ #define SLI4_PHYDEV_CONTROL_FRL_MASK 0x000000f0 #define SLI4_PHYDEV_CONTROL_FRL_SHIFT 4 #define SLI4_PHYDEV_CONTROL_FRL(r) (((r) & SLI4_PHYDEV_CONTROL_FRL_MASK) >> \ SLI4_PHYDEV_CONTROL_FRL_SHIFT_SHIFT) /************************************************************************* * SLI-4 mailbox command formats and definitions */ typedef struct sli4_mbox_command_header_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :8, command:8, status:16; /** Port writes to indicate success / fail */ #else #error big endian version not defined #endif } sli4_mbox_command_header_t; #define SLI4_MBOX_COMMAND_CONFIG_LINK 0x07 #define SLI4_MBOX_COMMAND_DUMP 0x17 #define SLI4_MBOX_COMMAND_DOWN_LINK 0x06 #define SLI4_MBOX_COMMAND_INIT_LINK 0x05 #define SLI4_MBOX_COMMAND_INIT_VFI 0xa3 #define SLI4_MBOX_COMMAND_INIT_VPI 0xa4 #define SLI4_MBOX_COMMAND_POST_XRI 0xa7 #define SLI4_MBOX_COMMAND_RELEASE_XRI 0xac #define SLI4_MBOX_COMMAND_READ_CONFIG 0x0b #define SLI4_MBOX_COMMAND_READ_STATUS 0x0e #define SLI4_MBOX_COMMAND_READ_NVPARMS 0x02 #define SLI4_MBOX_COMMAND_READ_REV 0x11 #define SLI4_MBOX_COMMAND_READ_LNK_STAT 0x12 #define SLI4_MBOX_COMMAND_READ_SPARM64 0x8d #define SLI4_MBOX_COMMAND_READ_TOPOLOGY 0x95 #define SLI4_MBOX_COMMAND_REG_FCFI 0xa0 #define SLI4_MBOX_COMMAND_REG_FCFI_MRQ 0xaf #define SLI4_MBOX_COMMAND_REG_RPI 0x93 #define SLI4_MBOX_COMMAND_REG_RX_RQ 0xa6 #define SLI4_MBOX_COMMAND_REG_VFI 0x9f #define SLI4_MBOX_COMMAND_REG_VPI 0x96 #define SLI4_MBOX_COMMAND_REQUEST_FEATURES 0x9d #define SLI4_MBOX_COMMAND_SLI_CONFIG 0x9b #define SLI4_MBOX_COMMAND_UNREG_FCFI 0xa2 #define SLI4_MBOX_COMMAND_UNREG_RPI 0x14 #define SLI4_MBOX_COMMAND_UNREG_VFI 0xa1 #define SLI4_MBOX_COMMAND_UNREG_VPI 0x97 #define SLI4_MBOX_COMMAND_WRITE_NVPARMS 0x03 #define SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY 0xAD #define SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP 0xAE #define SLI4_MBOX_STATUS_SUCCESS 0x0000 #define SLI4_MBOX_STATUS_FAILURE 0x0001 #define SLI4_MBOX_STATUS_RPI_NOT_REG 0x1400 /** * @brief Buffer Descriptor Entry (BDE) */ typedef struct sli4_bde_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t buffer_length:24, bde_type:8; union { struct { uint32_t buffer_address_low; uint32_t buffer_address_high; } data; struct { uint32_t offset; uint32_t rsvd2; } imm; struct { uint32_t sgl_segment_address_low; uint32_t sgl_segment_address_high; } blp; } u; #else #error big endian version not defined #endif } sli4_bde_t; #define SLI4_BDE_TYPE_BDE_64 0x00 /** Generic 64-bit data */ #define SLI4_BDE_TYPE_BDE_IMM 0x01 /** Immediate data */ #define SLI4_BDE_TYPE_BLP 0x40 /** Buffer List Pointer */ /** * @brief Scatter-Gather Entry (SGE) */ typedef struct sli4_sge_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t buffer_address_high; uint32_t buffer_address_low; uint32_t data_offset:27, sge_type:4, last:1; uint32_t buffer_length; #else #error big endian version not defined #endif } sli4_sge_t; /** * @brief T10 DIF Scatter-Gather Entry (SGE) */ typedef struct sli4_dif_sge_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t buffer_address_high; uint32_t buffer_address_low; uint32_t :27, sge_type:4, last:1; uint32_t :32; #else #error big endian version not defined #endif } sli4_dif_sge_t; /** * @brief T10 DIF Seed Scatter-Gather Entry (SGE) */ typedef struct sli4_diseed_sge_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t ref_tag_cmp; uint32_t ref_tag_repl; uint32_t app_tag_repl:16, :2, hs:1, ws:1, ic:1, ics:1, atrt:1, at:1, fwd_app_tag:1, repl_app_tag:1, head_insert:1, sge_type:4, last:1; uint32_t app_tag_cmp:16, dif_blk_size:3, auto_incr_ref_tag:1, check_app_tag:1, check_ref_tag:1, check_crc:1, new_ref_tag:1, dif_op_rx:4, dif_op_tx:4; #else #error big endian version not defined #endif } sli4_diseed_sge_t; /** * @brief List Segment Pointer Scatter-Gather Entry (SGE) */ typedef struct sli4_lsp_sge_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t buffer_address_high; uint32_t buffer_address_low; uint32_t :27, sge_type:4, last:1; uint32_t segment_length:24, :8; #else #error big endian version not defined #endif } sli4_lsp_sge_t; #define SLI4_SGE_MAX_RESERVED 3 #define SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC 0x00 #define SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF 0x01 #define SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM 0x02 #define SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF 0x03 #define SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC 0x04 #define SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM 0x05 #define SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM 0x06 #define SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC 0x07 #define SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW 0x08 #define SLI4_SGE_TYPE_DATA 0x00 #define SLI4_SGE_TYPE_CHAIN 0x03 /** Skyhawk only */ #define SLI4_SGE_TYPE_DIF 0x04 /** Data Integrity Field */ #define SLI4_SGE_TYPE_LSP 0x05 /** List Segment Pointer */ #define SLI4_SGE_TYPE_PEDIF 0x06 /** Post Encryption Engine DIF */ #define SLI4_SGE_TYPE_PESEED 0x07 /** Post Encryption Engine DIF Seed */ #define SLI4_SGE_TYPE_DISEED 0x08 /** DIF Seed */ #define SLI4_SGE_TYPE_ENC 0x09 /** Encryption */ #define SLI4_SGE_TYPE_ATM 0x0a /** DIF Application Tag Mask */ #define SLI4_SGE_TYPE_SKIP 0x0c /** SKIP */ #define OCS_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ /** * @brief CONFIG_LINK */ typedef struct sli4_cmd_config_link_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t maxbbc:8, /** Max buffer-to-buffer credit */ :24; uint32_t alpa:8, n_port_id:16, :8; uint32_t rsvd3; uint32_t e_d_tov; uint32_t lp_tov; uint32_t r_a_tov; uint32_t r_t_tov; uint32_t al_tov; uint32_t rsvd9; uint32_t :8, bbscn:4, /** buffer-to-buffer state change number */ cscn:1, /** configure BBSCN */ :19; #else #error big endian version not defined #endif } sli4_cmd_config_link_t; /** * @brief DUMP Type 4 */ #define SLI4_WKI_TAG_SAT_TEM 0x1040 typedef struct sli4_cmd_dump4_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t type:4, :28; uint32_t wki_selection:16, :16; uint32_t resv; uint32_t returned_byte_cnt; uint32_t resp_data[59]; #else #error big endian version not defined #endif } sli4_cmd_dump4_t; /** * @brief FW_INITIALIZE - initialize a SLI port * * @note This command uses a different format than all others. */ extern const uint8_t sli4_fw_initialize[8]; /** * @brief FW_DEINITIALIZE - deinitialize a SLI port * * @note This command uses a different format than all others. */ extern const uint8_t sli4_fw_deinitialize[8]; /** * @brief INIT_LINK - initialize the link for a FC/FCoE port */ typedef struct sli4_cmd_init_link_flags_s { uint32_t loopback:1, topology:2, #define FC_TOPOLOGY_FCAL 0 #define FC_TOPOLOGY_P2P 1 :3, unfair:1, skip_lirp_lilp:1, gen_loop_validity_check:1, skip_lisa:1, enable_topology_failover:1, fixed_speed:1, :3, select_hightest_al_pa:1, :16; /* pad to 32 bits */ } sli4_cmd_init_link_flags_t; #define SLI4_INIT_LINK_F_LOOP_BACK BIT(0) #define SLI4_INIT_LINK_F_UNFAIR BIT(6) #define SLI4_INIT_LINK_F_NO_LIRP BIT(7) #define SLI4_INIT_LINK_F_LOOP_VALID_CHK BIT(8) #define SLI4_INIT_LINK_F_NO_LISA BIT(9) #define SLI4_INIT_LINK_F_FAIL_OVER BIT(10) #define SLI4_INIT_LINK_F_NO_AUTOSPEED BIT(11) #define SLI4_INIT_LINK_F_PICK_HI_ALPA BIT(15) #define SLI4_INIT_LINK_F_P2P_ONLY 1 #define SLI4_INIT_LINK_F_FCAL_ONLY 2 #define SLI4_INIT_LINK_F_FCAL_FAIL_OVER 0 #define SLI4_INIT_LINK_F_P2P_FAIL_OVER 1 typedef struct sli4_cmd_init_link_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t selective_reset_al_pa:8, :24; sli4_cmd_init_link_flags_t link_flags; uint32_t link_speed_selection_code; #define FC_LINK_SPEED_1G 1 #define FC_LINK_SPEED_2G 2 #define FC_LINK_SPEED_AUTO_1_2 3 #define FC_LINK_SPEED_4G 4 #define FC_LINK_SPEED_AUTO_4_1 5 #define FC_LINK_SPEED_AUTO_4_2 6 #define FC_LINK_SPEED_AUTO_4_2_1 7 #define FC_LINK_SPEED_8G 8 #define FC_LINK_SPEED_AUTO_8_1 9 #define FC_LINK_SPEED_AUTO_8_2 10 #define FC_LINK_SPEED_AUTO_8_2_1 11 #define FC_LINK_SPEED_AUTO_8_4 12 #define FC_LINK_SPEED_AUTO_8_4_1 13 #define FC_LINK_SPEED_AUTO_8_4_2 14 #define FC_LINK_SPEED_10G 16 #define FC_LINK_SPEED_16G 17 #define FC_LINK_SPEED_AUTO_16_8_4 18 #define FC_LINK_SPEED_AUTO_16_8 19 #define FC_LINK_SPEED_32G 20 #define FC_LINK_SPEED_AUTO_32_16_8 21 #define FC_LINK_SPEED_AUTO_32_16 22 #else #error big endian version not defined #endif } sli4_cmd_init_link_t; /** * @brief INIT_VFI - initialize the VFI resource */ typedef struct sli4_cmd_init_vfi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t vfi:16, :12, vp:1, vf:1, vt:1, vr:1; uint32_t fcfi:16, vpi:16; uint32_t vf_id:13, pri:3, :16; uint32_t :24, hop_count:8; #else #error big endian version not defined #endif } sli4_cmd_init_vfi_t; /** * @brief INIT_VPI - initialize the VPI resource */ typedef struct sli4_cmd_init_vpi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t vpi:16, vfi:16; #else #error big endian version not defined #endif } sli4_cmd_init_vpi_t; /** * @brief POST_XRI - post XRI resources to the SLI Port */ typedef struct sli4_cmd_post_xri_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t xri_base:16, xri_count:12, enx:1, dl:1, di:1, val:1; #else #error big endian version not defined #endif } sli4_cmd_post_xri_t; /** * @brief RELEASE_XRI - Release XRI resources from the SLI Port */ typedef struct sli4_cmd_release_xri_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t released_xri_count:5, :11, xri_count:5, :11; struct { uint32_t xri_tag0:16, xri_tag1:16; } xri_tbl[62]; #else #error big endian version not defined #endif } sli4_cmd_release_xri_t; /** * @brief READ_CONFIG - read SLI port configuration parameters */ typedef struct sli4_cmd_read_config_s { sli4_mbox_command_header_t hdr; } sli4_cmd_read_config_t; typedef struct sli4_res_read_config_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :31, ext:1; /** Resource Extents */ - uint32_t :24, + uint32_t :20, + pt:2, + tf:1, + ptv:1, topology:8; uint32_t rsvd3; uint32_t e_d_tov:16, :16; uint32_t rsvd5; uint32_t r_a_tov:16, :16; uint32_t rsvd7; uint32_t rsvd8; uint32_t lmt:16, /** Link Module Type */ :16; uint32_t rsvd10; uint32_t rsvd11; uint32_t xri_base:16, xri_count:16; uint32_t rpi_base:16, rpi_count:16; uint32_t vpi_base:16, vpi_count:16; uint32_t vfi_base:16, vfi_count:16; uint32_t :16, fcfi_count:16; uint32_t rq_count:16, eq_count:16; uint32_t wq_count:16, cq_count:16; uint32_t pad[45]; #else #error big endian version not defined #endif } sli4_res_read_config_t; #define SLI4_READ_CFG_TOPO_FCOE 0x0 /** FCoE topology */ #define SLI4_READ_CFG_TOPO_FC 0x1 /** FC topology unknown */ #define SLI4_READ_CFG_TOPO_FC_DA 0x2 /** FC Direct Attach (non FC-AL) topology */ #define SLI4_READ_CFG_TOPO_FC_AL 0x3 /** FC-AL topology */ /** * @brief READ_NVPARMS - read SLI port configuration parameters */ typedef struct sli4_cmd_read_nvparms_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd1; uint32_t rsvd2; uint32_t rsvd3; uint32_t rsvd4; uint8_t wwpn[8]; uint8_t wwnn[8]; uint32_t hard_alpa:8, preferred_d_id:24; #else #error big endian version not defined #endif } sli4_cmd_read_nvparms_t; /** * @brief WRITE_NVPARMS - write SLI port configuration parameters */ typedef struct sli4_cmd_write_nvparms_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd1; uint32_t rsvd2; uint32_t rsvd3; uint32_t rsvd4; uint8_t wwpn[8]; uint8_t wwnn[8]; uint32_t hard_alpa:8, preferred_d_id:24; #else #error big endian version not defined #endif } sli4_cmd_write_nvparms_t; /** * @brief READ_REV - read the Port revision levels */ typedef struct sli4_cmd_read_rev_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :16, sli_level:4, fcoem:1, ceev:2, :6, vpd:1, :2; uint32_t first_hw_revision; uint32_t second_hw_revision; uint32_t rsvd4; uint32_t third_hw_revision; uint32_t fc_ph_low:8, fc_ph_high:8, feature_level_low:8, feature_level_high:8; uint32_t rsvd7; uint32_t first_fw_id; char first_fw_name[16]; uint32_t second_fw_id; char second_fw_name[16]; uint32_t rsvd18[30]; uint32_t available_length:24, :8; uint32_t physical_address_low; uint32_t physical_address_high; uint32_t returned_vpd_length; uint32_t actual_vpd_length; #else #error big endian version not defined #endif } sli4_cmd_read_rev_t; /** * @brief READ_SPARM64 - read the Port service parameters */ typedef struct sli4_cmd_read_sparm64_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd1; uint32_t rsvd2; sli4_bde_t bde_64; uint32_t vpi:16, :16; uint32_t port_name_start:16, port_name_length:16; uint32_t node_name_start:16, node_name_length:16; #else #error big endian version not defined #endif } sli4_cmd_read_sparm64_t; #define SLI4_READ_SPARM64_VPI_DEFAULT 0 #define SLI4_READ_SPARM64_VPI_SPECIAL UINT16_MAX #define SLI4_READ_SPARM64_WWPN_OFFSET (4 * sizeof(uint32_t)) #define SLI4_READ_SPARM64_WWNN_OFFSET (SLI4_READ_SPARM64_WWPN_OFFSET + sizeof(uint64_t)) typedef struct sli4_port_state_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t nx_port_recv_state:2, nx_port_trans_state:2, nx_port_state_machine:4, link_speed:8, :14, tf:1, lu:1; #else #error big endian version not defined #endif } sli4_port_state_t; /** * @brief READ_TOPOLOGY - read the link event information */ typedef struct sli4_cmd_read_topology_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t event_tag; uint32_t attention_type:8, il:1, pb_recvd:1, :22; uint32_t topology:8, lip_type:8, lip_al_ps:8, al_pa_granted:8; sli4_bde_t bde_loop_map; sli4_port_state_t link_down; sli4_port_state_t link_current; uint32_t max_bbc:8, init_bbc:8, bbscn:4, cbbscn:4, :8; uint32_t r_t_tov:9, :3, al_tov:4, lp_tov:16; uint32_t acquired_al_pa:8, :7, pb:1, specified_al_pa:16; uint32_t initial_n_port_id:24, :8; #else #error big endian version not defined #endif } sli4_cmd_read_topology_t; #define SLI4_MIN_LOOP_MAP_BYTES 128 #define SLI4_READ_TOPOLOGY_LINK_UP 0x1 #define SLI4_READ_TOPOLOGY_LINK_DOWN 0x2 #define SLI4_READ_TOPOLOGY_LINK_NO_ALPA 0x3 #define SLI4_READ_TOPOLOGY_UNKNOWN 0x0 #define SLI4_READ_TOPOLOGY_NPORT 0x1 #define SLI4_READ_TOPOLOGY_FC_AL 0x2 #define SLI4_READ_TOPOLOGY_SPEED_NONE 0x00 #define SLI4_READ_TOPOLOGY_SPEED_1G 0x04 #define SLI4_READ_TOPOLOGY_SPEED_2G 0x08 #define SLI4_READ_TOPOLOGY_SPEED_4G 0x10 #define SLI4_READ_TOPOLOGY_SPEED_8G 0x20 #define SLI4_READ_TOPOLOGY_SPEED_10G 0x40 #define SLI4_READ_TOPOLOGY_SPEED_16G 0x80 #define SLI4_READ_TOPOLOGY_SPEED_32G 0x90 /** * @brief REG_FCFI - activate a FC Forwarder */ #define SLI4_CMD_REG_FCFI_NUM_RQ_CFG 4 typedef struct sli4_cmd_reg_fcfi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fcf_index:16, fcfi:16; uint32_t rq_id_1:16, rq_id_0:16; uint32_t rq_id_3:16, rq_id_2:16; struct { uint32_t r_ctl_mask:8, r_ctl_match:8, type_mask:8, type_match:8; } rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; uint32_t vlan_tag:12, vv:1, :19; #else #error big endian version not defined #endif } sli4_cmd_reg_fcfi_t; #define SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG 4 #define SLI4_CMD_REG_FCFI_MRQ_MAX_NUM_RQ 32 #define SLI4_CMD_REG_FCFI_SET_FCFI_MODE 0 #define SLI4_CMD_REG_FCFI_SET_MRQ_MODE 1 typedef struct sli4_cmd_reg_fcfi_mrq_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fcf_index:16, fcfi:16; uint32_t rq_id_1:16, rq_id_0:16; uint32_t rq_id_3:16, rq_id_2:16; struct { uint32_t r_ctl_mask:8, r_ctl_match:8, type_mask:8, type_match:8; } rq_cfg[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; uint32_t vlan_tag:12, vv:1, mode:1, :18; uint32_t num_mrq_pairs:8, mrq_filter_bitmask:4, rq_selection_policy:4, :16; #endif } sli4_cmd_reg_fcfi_mrq_t; /** * @brief REG_RPI - register a Remote Port Indicator */ typedef struct sli4_cmd_reg_rpi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rpi:16, :16; uint32_t remote_n_port_id:24, upd:1, :2, etow:1, :1, terp:1, :1, ci:1; sli4_bde_t bde_64; uint32_t vpi:16, :16; #else #error big endian version not defined #endif } sli4_cmd_reg_rpi_t; #define SLI4_REG_RPI_BUF_LEN 0x70 /** * @brief REG_VFI - register a Virtual Fabric Indicator */ typedef struct sli4_cmd_reg_vfi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t vfi:16, :12, vp:1, upd:1, :2; uint32_t fcfi:16, vpi:16; /* vp=TRUE */ uint8_t wwpn[8]; /* vp=TRUE */ sli4_bde_t sparm; /* either FLOGI or PLOGI */ uint32_t e_d_tov; uint32_t r_a_tov; uint32_t local_n_port_id:24, /* vp=TRUE */ :8; #else #error big endian version not defined #endif } sli4_cmd_reg_vfi_t; /** * @brief REG_VPI - register a Virtual Port Indicator */ typedef struct sli4_cmd_reg_vpi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd1; uint32_t local_n_port_id:24, upd:1, :7; uint8_t wwpn[8]; uint32_t rsvd5; uint32_t vpi:16, vfi:16; #else #error big endian version not defined #endif } sli4_cmd_reg_vpi_t; /** * @brief REQUEST_FEATURES - request / query SLI features */ typedef union { #if BYTE_ORDER == LITTLE_ENDIAN struct { uint32_t iaab:1, /** inhibit auto-ABTS originator */ npiv:1, /** NPIV support */ dif:1, /** DIF/DIX support */ vf:1, /** virtual fabric support */ fcpi:1, /** FCP initiator support */ fcpt:1, /** FCP target support */ fcpc:1, /** combined FCP initiator/target */ :1, rqd:1, /** recovery qualified delay */ iaar:1, /** inhibit auto-ABTS responder */ hlm:1, /** High Login Mode */ perfh:1, /** performance hints */ rxseq:1, /** RX Sequence Coalescing */ rxri:1, /** Release XRI variant of Coalescing */ dcl2:1, /** Disable Class 2 */ rsco:1, /** Receive Sequence Coalescing Optimizations */ mrqp:1, /** Multi RQ Pair Mode Support */ :15; } flag; uint32_t dword; #else #error big endian version not defined #endif } sli4_features_t; typedef struct sli4_cmd_request_features_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t qry:1, :31; #else #error big endian version not defined #endif sli4_features_t command; sli4_features_t response; } sli4_cmd_request_features_t; /** * @brief SLI_CONFIG - submit a configuration command to Port * * Command is either embedded as part of the payload (embed) or located * in a separate memory buffer (mem) */ typedef struct sli4_sli_config_pmd_s { uint32_t address_low; uint32_t address_high; uint32_t length:24, :8; } sli4_sli_config_pmd_t; typedef struct sli4_cmd_sli_config_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t emb:1, :2, pmd_count:5, :24; uint32_t payload_length; uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; union { uint8_t embed[58 * sizeof(uint32_t)]; sli4_sli_config_pmd_t mem; } payload; #else #error big endian version not defined #endif } sli4_cmd_sli_config_t; /** * @brief READ_STATUS - read tx/rx status of a particular port * */ typedef struct sli4_cmd_read_status_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t cc:1, :31; uint32_t rsvd2; uint32_t transmit_kbyte_count; uint32_t receive_kbyte_count; uint32_t transmit_frame_count; uint32_t receive_frame_count; uint32_t transmit_sequence_count; uint32_t receive_sequence_count; uint32_t total_exchanges_originator; uint32_t total_exchanges_responder; uint32_t receive_p_bsy_count; uint32_t receive_f_bsy_count; uint32_t dropped_frames_due_to_no_rq_buffer_count; uint32_t empty_rq_timeout_count; uint32_t dropped_frames_due_to_no_xri_count; uint32_t empty_xri_pool_count; #else #error big endian version not defined #endif } sli4_cmd_read_status_t; /** * @brief READ_LNK_STAT - read link status of a particular port * */ typedef struct sli4_cmd_read_link_stats_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rec:1, gec:1, w02of:1, w03of:1, w04of:1, w05of:1, w06of:1, w07of:1, w08of:1, w09of:1, w10of:1, w11of:1, w12of:1, w13of:1, w14of:1, w15of:1, w16of:1, w17of:1, w18of:1, w19of:1, w20of:1, w21of:1, resv0:8, clrc:1, clof:1; uint32_t link_failure_error_count; uint32_t loss_of_sync_error_count; uint32_t loss_of_signal_error_count; uint32_t primitive_sequence_error_count; uint32_t invalid_transmission_word_error_count; uint32_t crc_error_count; uint32_t primitive_sequence_event_timeout_count; uint32_t elastic_buffer_overrun_error_count; uint32_t arbitration_fc_al_timout_count; uint32_t advertised_receive_bufftor_to_buffer_credit; uint32_t current_receive_buffer_to_buffer_credit; uint32_t advertised_transmit_buffer_to_buffer_credit; uint32_t current_transmit_buffer_to_buffer_credit; uint32_t received_eofa_count; uint32_t received_eofdti_count; uint32_t received_eofni_count; uint32_t received_soff_count; uint32_t received_dropped_no_aer_count; uint32_t received_dropped_no_available_rpi_resources_count; uint32_t received_dropped_no_available_xri_resources_count; #else #error big endian version not defined #endif } sli4_cmd_read_link_stats_t; /** * @brief Format a WQE with WQ_ID Association performance hint * * @par Description * PHWQ works by over-writing part of Word 10 in the WQE with the WQ ID. * * @param entry Pointer to the WQE. * @param q_id Queue ID. * * @return None. */ static inline void sli_set_wq_id_association(void *entry, uint16_t q_id) { uint32_t *wqe = entry; /* * Set Word 10, bit 0 to zero * Set Word 10, bits 15:1 to the WQ ID */ #if BYTE_ORDER == LITTLE_ENDIAN wqe[10] &= ~0xffff; wqe[10] |= q_id << 1; #else #error big endian version not defined #endif } /** * @brief UNREG_FCFI - unregister a FCFI */ typedef struct sli4_cmd_unreg_fcfi_s { sli4_mbox_command_header_t hdr; uint32_t rsvd1; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fcfi:16, :16; #else #error big endian version not defined #endif } sli4_cmd_unreg_fcfi_t; /** * @brief UNREG_RPI - unregister one or more RPI */ typedef struct sli4_cmd_unreg_rpi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t index:16, :13, dp:1, ii:2; uint32_t destination_n_port_id:24, :8; #else #error big endian version not defined #endif } sli4_cmd_unreg_rpi_t; #define SLI4_UNREG_RPI_II_RPI 0x0 #define SLI4_UNREG_RPI_II_VPI 0x1 #define SLI4_UNREG_RPI_II_VFI 0x2 #define SLI4_UNREG_RPI_II_FCFI 0x3 /** * @brief UNREG_VFI - unregister one or more VFI */ typedef struct sli4_cmd_unreg_vfi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd1; uint32_t index:16, :14, ii:2; #else #error big endian version not defined #endif } sli4_cmd_unreg_vfi_t; #define SLI4_UNREG_VFI_II_VFI 0x0 #define SLI4_UNREG_VFI_II_FCFI 0x3 enum { SLI4_UNREG_TYPE_PORT, SLI4_UNREG_TYPE_DOMAIN, SLI4_UNREG_TYPE_FCF, SLI4_UNREG_TYPE_ALL }; /** * @brief UNREG_VPI - unregister one or more VPI */ typedef struct sli4_cmd_unreg_vpi_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd1; uint32_t index:16, :14, ii:2; #else #error big endian version not defined #endif } sli4_cmd_unreg_vpi_t; #define SLI4_UNREG_VPI_II_VPI 0x0 #define SLI4_UNREG_VPI_II_VFI 0x2 #define SLI4_UNREG_VPI_II_FCFI 0x3 /** * @brief AUTO_XFER_RDY - Configure the auto-generate XFER-RDY feature. */ typedef struct sli4_cmd_config_auto_xfer_rdy_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t resv; uint32_t max_burst_len; #else #error big endian version not defined #endif } sli4_cmd_config_auto_xfer_rdy_t; typedef struct sli4_cmd_config_auto_xfer_rdy_hp_s { sli4_mbox_command_header_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t resv; uint32_t max_burst_len; uint32_t esoc:1, :31; uint32_t block_size:16, :16; #else #error big endian version not defined #endif } sli4_cmd_config_auto_xfer_rdy_hp_t; /************************************************************************* * SLI-4 common configuration command formats and definitions */ #define SLI4_CFG_STATUS_SUCCESS 0x00 #define SLI4_CFG_STATUS_FAILED 0x01 #define SLI4_CFG_STATUS_ILLEGAL_REQUEST 0x02 #define SLI4_CFG_STATUS_ILLEGAL_FIELD 0x03 #define SLI4_MGMT_STATUS_FLASHROM_READ_FAILED 0xcb #define SLI4_CFG_ADD_STATUS_NO_STATUS 0x00 #define SLI4_CFG_ADD_STATUS_INVALID_OPCODE 0x1e /** * Subsystem values. */ #define SLI4_SUBSYSTEM_COMMON 0x01 #define SLI4_SUBSYSTEM_LOWLEVEL 0x0B #define SLI4_SUBSYSTEM_FCFCOE 0x0c #define SLI4_SUBSYSTEM_DMTF 0x11 #define SLI4_OPC_LOWLEVEL_SET_WATCHDOG 0X36 /** * Common opcode (OPC) values. */ #define SLI4_OPC_COMMON_FUNCTION_RESET 0x3d #define SLI4_OPC_COMMON_CREATE_CQ 0x0c #define SLI4_OPC_COMMON_CREATE_CQ_SET 0x1d #define SLI4_OPC_COMMON_DESTROY_CQ 0x36 #define SLI4_OPC_COMMON_MODIFY_EQ_DELAY 0x29 #define SLI4_OPC_COMMON_CREATE_EQ 0x0d #define SLI4_OPC_COMMON_DESTROY_EQ 0x37 #define SLI4_OPC_COMMON_CREATE_MQ_EXT 0x5a #define SLI4_OPC_COMMON_DESTROY_MQ 0x35 #define SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES 0x20 #define SLI4_OPC_COMMON_NOP 0x21 #define SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO 0x9a #define SLI4_OPC_COMMON_GET_SLI4_PARAMETERS 0xb5 #define SLI4_OPC_COMMON_QUERY_FW_CONFIG 0x3a #define SLI4_OPC_COMMON_GET_PORT_NAME 0x4d #define SLI4_OPC_COMMON_WRITE_FLASHROM 0x07 #define SLI4_OPC_COMMON_MANAGE_FAT 0x44 #define SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA 0x49 #define SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES 0x79 #define SLI4_OPC_COMMON_GET_EXT_FAT_CAPABILITIES 0x7d #define SLI4_OPC_COMMON_SET_EXT_FAT_CAPABILITIES 0x7e #define SLI4_OPC_COMMON_EXT_FAT_CONFIGURE_SNAPSHOT 0x7f #define SLI4_OPC_COMMON_EXT_FAT_RETRIEVE_SNAPSHOT 0x80 #define SLI4_OPC_COMMON_EXT_FAT_READ_STRING_TABLE 0x82 #define SLI4_OPC_COMMON_GET_FUNCTION_CONFIG 0xa0 #define SLI4_OPC_COMMON_GET_PROFILE_CONFIG 0xa4 #define SLI4_OPC_COMMON_SET_PROFILE_CONFIG 0xa5 #define SLI4_OPC_COMMON_GET_PROFILE_LIST 0xa6 #define SLI4_OPC_COMMON_GET_ACTIVE_PROFILE 0xa7 #define SLI4_OPC_COMMON_SET_ACTIVE_PROFILE 0xa8 #define SLI4_OPC_COMMON_READ_OBJECT 0xab #define SLI4_OPC_COMMON_WRITE_OBJECT 0xac #define SLI4_OPC_COMMON_DELETE_OBJECT 0xae #define SLI4_OPC_COMMON_READ_OBJECT_LIST 0xad #define SLI4_OPC_COMMON_SET_DUMP_LOCATION 0xb8 #define SLI4_OPC_COMMON_SET_FEATURES 0xbf #define SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO 0xc9 #define SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID 0xca /** * DMTF opcode (OPC) values. */ #define SLI4_OPC_DMTF_EXEC_CLP_CMD 0x01 /** * @brief Generic Command Request header */ typedef struct sli4_req_hdr_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t opcode:8, subsystem:8, :16; uint32_t timeout; uint32_t request_length; uint32_t version:8, :24; #else #error big endian version not defined #endif } sli4_req_hdr_t; /** * @brief Generic Command Response header */ typedef struct sli4_res_hdr_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t opcode:8, subsystem:8, :16; uint32_t status:8, additional_status:8, :16; uint32_t response_length; uint32_t actual_response_length; #else #error big endian version not defined #endif } sli4_res_hdr_t; /** * @brief COMMON_FUNCTION_RESET * * Resets the Port, returning it to a power-on state. This configuration * command does not have a payload and should set/expect the lengths to * be zero. */ typedef struct sli4_req_common_function_reset_s { sli4_req_hdr_t hdr; } sli4_req_common_function_reset_t; typedef struct sli4_res_common_function_reset_s { sli4_res_hdr_t hdr; } sli4_res_common_function_reset_t; /** * @brief COMMON_CREATE_CQ_V0 * * Create a Completion Queue. */ typedef struct sli4_req_common_create_cq_v0_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, :16; uint32_t :12, clswm:2, nodelay:1, :12, cqecnt:2, valid:1, :1, evt:1; uint32_t :22, eq_id:8, :1, arm:1; uint32_t rsvd[2]; struct { uint32_t low; uint32_t high; } page_physical_address[0]; #else #error big endian version not defined #endif } sli4_req_common_create_cq_v0_t; /** * @brief COMMON_CREATE_CQ_V2 * * Create a Completion Queue. */ typedef struct sli4_req_common_create_cq_v2_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, page_size:8, :8, uint32_t :12, clswm:2, nodelay:1, autovalid:1, :9, cqe_size:2, cqecnt:2, valid:1, :1, evt:1; uint32_t eq_id:16, :15, arm:1; uint32_t cqe_count:16, :16; uint32_t rsvd[1]; struct { uint32_t low; uint32_t high; } page_physical_address[0]; #else #error big endian version not defined #endif } sli4_req_common_create_cq_v2_t; /** * @brief COMMON_CREATE_CQ_SET_V0 * * Create a set of Completion Queues. */ typedef struct sli4_req_common_create_cq_set_v0_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, page_size:8, :8; uint32_t :12, clswm:2, nodelay:1, autovalid:1, rsvd:11, cqecnt:2, valid:1, :1, evt:1; uint32_t num_cq_req:16, cqe_count:15, arm:1; uint16_t eq_id[16]; struct { uint32_t low; uint32_t high; } page_physical_address[0]; #else #error big endian version not defined #endif } sli4_req_common_create_cq_set_v0_t; /** * CQE count. */ #define SLI4_CQ_CNT_256 0 #define SLI4_CQ_CNT_512 1 #define SLI4_CQ_CNT_1024 2 #define SLI4_CQ_CNT_LARGE 3 #define SLI4_CQE_BYTES (4 * sizeof(uint32_t)) #define SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES 8 /** * @brief Generic Common Create EQ/CQ/MQ/WQ/RQ Queue completion */ typedef struct sli4_res_common_create_queue_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t q_id:16, :8, ulp:8; uint32_t db_offset; uint32_t db_rs:16, db_fmt:16; #else #error big endian version not defined #endif } sli4_res_common_create_queue_t; typedef struct sli4_res_common_create_queue_set_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t q_id:16, num_q_allocated:16; #else #error big endian version not defined #endif } sli4_res_common_create_queue_set_t; /** * @brief Common Destroy CQ */ typedef struct sli4_req_common_destroy_cq_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t cq_id:16, :16; #else #error big endian version not defined #endif } sli4_req_common_destroy_cq_t; /** * @brief COMMON_MODIFY_EQ_DELAY * * Modify the delay multiplier for EQs */ typedef struct sli4_req_common_modify_eq_delay_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_eq; struct { uint32_t eq_id; uint32_t phase; uint32_t delay_multiplier; } eq_delay_record[8]; #else #error big endian version not defined #endif } sli4_req_common_modify_eq_delay_t; /** * @brief COMMON_CREATE_EQ * * Create an Event Queue. */ typedef struct sli4_req_common_create_eq_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, :16; uint32_t :28, autovalid:1, valid:1, :1, eqesz:1; uint32_t :26, count:3, :2, arm:1; uint32_t :13, delay_multiplier:10, :9; uint32_t rsvd; struct { uint32_t low; uint32_t high; } page_address[8]; #else #error big endian version not defined #endif } sli4_req_common_create_eq_t; #define SLI4_EQ_CNT_256 0 #define SLI4_EQ_CNT_512 1 #define SLI4_EQ_CNT_1024 2 #define SLI4_EQ_CNT_2048 3 #define SLI4_EQ_CNT_4096 4 #define SLI4_EQE_SIZE_4 0 #define SLI4_EQE_SIZE_16 1 /** * @brief Common Destroy EQ */ typedef struct sli4_req_common_destroy_eq_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t eq_id:16, :16; #else #error big endian version not defined #endif } sli4_req_common_destroy_eq_t; /** * @brief COMMON_CREATE_MQ_EXT * * Create a Mailbox Queue; accommodate v0 and v1 forms. */ typedef struct sli4_req_common_create_mq_ext_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, cq_id_v1:16; uint32_t async_event_bitmap; uint32_t async_cq_id_v1:16, ring_size:4, :2, cq_id_v0:10; uint32_t :31, val:1; uint32_t acqv:1, async_cq_id_v0:10, :21; uint32_t rsvd9; struct { uint32_t low; uint32_t high; } page_physical_address[8]; #else #error big endian version not defined #endif } sli4_req_common_create_mq_ext_t; #define SLI4_MQE_SIZE_16 0x05 #define SLI4_MQE_SIZE_32 0x06 #define SLI4_MQE_SIZE_64 0x07 #define SLI4_MQE_SIZE_128 0x08 #define SLI4_ASYNC_EVT_LINK_STATE BIT(1) #define SLI4_ASYNC_EVT_FCOE_FIP BIT(2) #define SLI4_ASYNC_EVT_DCBX BIT(3) #define SLI4_ASYNC_EVT_ISCSI BIT(4) #define SLI4_ASYNC_EVT_GRP5 BIT(5) #define SLI4_ASYNC_EVT_FC BIT(16) #define SLI4_ASYNC_EVT_SLI_PORT BIT(17) #define SLI4_ASYNC_EVT_VF BIT(18) #define SLI4_ASYNC_EVT_MR BIT(19) #define SLI4_ASYNC_EVT_ALL \ SLI4_ASYNC_EVT_LINK_STATE | \ SLI4_ASYNC_EVT_FCOE_FIP | \ SLI4_ASYNC_EVT_DCBX | \ SLI4_ASYNC_EVT_ISCSI | \ SLI4_ASYNC_EVT_GRP5 | \ SLI4_ASYNC_EVT_FC | \ SLI4_ASYNC_EVT_SLI_PORT | \ SLI4_ASYNC_EVT_VF |\ SLI4_ASYNC_EVT_MR #define SLI4_ASYNC_EVT_FC_FCOE \ SLI4_ASYNC_EVT_LINK_STATE | \ SLI4_ASYNC_EVT_FCOE_FIP | \ SLI4_ASYNC_EVT_GRP5 | \ SLI4_ASYNC_EVT_FC | \ SLI4_ASYNC_EVT_SLI_PORT /** * @brief Common Destroy MQ */ typedef struct sli4_req_common_destroy_mq_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t mq_id:16, :16; #else #error big endian version not defined #endif } sli4_req_common_destroy_mq_t; /** * @brief COMMON_GET_CNTL_ATTRIBUTES * * Query for information about the SLI Port */ typedef struct sli4_res_common_get_cntl_attributes_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint8_t version_string[32]; uint8_t manufacturer_name[32]; uint32_t supported_modes; uint32_t eprom_version_lo:8, eprom_version_hi:8, :16; uint32_t mbx_data_structure_version; uint32_t ep_firmware_data_structure_version; uint8_t ncsi_version_string[12]; uint32_t default_extended_timeout; uint8_t model_number[32]; uint8_t description[64]; uint8_t serial_number[32]; uint8_t ip_version_string[32]; uint8_t fw_version_string[32]; uint8_t bios_version_string[32]; uint8_t redboot_version_string[32]; uint8_t driver_version_string[32]; uint8_t fw_on_flash_version_string[32]; uint32_t functionalities_supported; uint32_t max_cdb_length:16, asic_revision:8, generational_guid0:8; uint32_t generational_guid1_12[3]; uint32_t generational_guid13:24, hba_port_count:8; uint32_t default_link_down_timeout:16, iscsi_version_min_max:8, multifunctional_device:8; uint32_t cache_valid:8, hba_status:8, max_domains_supported:8, port_number:6, port_type:2; uint32_t firmware_post_status; uint32_t hba_mtu; uint32_t iscsi_features:8, rsvd121:24; uint32_t pci_vendor_id:16, pci_device_id:16; uint32_t pci_sub_vendor_id:16, pci_sub_system_id:16; uint32_t pci_bus_number:8, pci_device_number:8, pci_function_number:8, interface_type:8; uint64_t unique_identifier; uint32_t number_of_netfilters:8, rsvd130:24; #else #error big endian version not defined #endif } sli4_res_common_get_cntl_attributes_t; /** * @brief COMMON_GET_CNTL_ATTRIBUTES * * This command queries the controller information from the Flash ROM. */ typedef struct sli4_req_common_get_cntl_addl_attributes_s { sli4_req_hdr_t hdr; } sli4_req_common_get_cntl_addl_attributes_t; typedef struct sli4_res_common_get_cntl_addl_attributes_s { sli4_res_hdr_t hdr; uint16_t ipl_file_number; uint8_t ipl_file_version; uint8_t rsvd0; uint8_t on_die_temperature; uint8_t rsvd1[3]; uint32_t driver_advanced_features_supported; uint32_t rsvd2[4]; char fcoe_universal_bios_version[32]; char fcoe_x86_bios_version[32]; char fcoe_efi_bios_version[32]; char fcoe_fcode_version[32]; char uefi_bios_version[32]; char uefi_nic_version[32]; char uefi_fcode_version[32]; char uefi_iscsi_version[32]; char iscsi_x86_bios_version[32]; char pxe_x86_bios_version[32]; uint8_t fcoe_default_wwpn[8]; uint8_t ext_phy_version[32]; uint8_t fc_universal_bios_version[32]; uint8_t fc_x86_bios_version[32]; uint8_t fc_efi_bios_version[32]; uint8_t fc_fcode_version[32]; uint8_t ext_phy_crc_label[8]; uint8_t ipl_file_name[16]; uint8_t rsvd3[72]; } sli4_res_common_get_cntl_addl_attributes_t; /** * @brief COMMON_NOP * * This command does not do anything; it only returns the payload in the completion. */ typedef struct sli4_req_common_nop_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t context[2]; #else #error big endian version not defined #endif } sli4_req_common_nop_t; typedef struct sli4_res_common_nop_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t context[2]; #else #error big endian version not defined #endif } sli4_res_common_nop_t; /** * @brief COMMON_GET_RESOURCE_EXTENT_INFO */ typedef struct sli4_req_common_get_resource_extent_info_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t resource_type:16, :16; #else #error big endian version not defined #endif } sli4_req_common_get_resource_extent_info_t; #define SLI4_RSC_TYPE_ISCSI_INI_XRI 0x0c #define SLI4_RSC_TYPE_FCOE_VFI 0x20 #define SLI4_RSC_TYPE_FCOE_VPI 0x21 #define SLI4_RSC_TYPE_FCOE_RPI 0x22 #define SLI4_RSC_TYPE_FCOE_XRI 0x23 typedef struct sli4_res_common_get_resource_extent_info_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t resource_extent_count:16, resource_extent_size:16; #else #error big endian version not defined #endif } sli4_res_common_get_resource_extent_info_t; #define SLI4_128BYTE_WQE_SUPPORT 0x02 /** * @brief COMMON_GET_SLI4_PARAMETERS */ typedef struct sli4_res_common_get_sli4_parameters_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t protocol_type:8, :24; uint32_t ft:1, :3, sli_revision:4, sli_family:4, if_type:4, sli_hint_1:8, sli_hint_2:5, :3; uint32_t eq_page_cnt:4, :4, eqe_sizes:4, :4, eq_page_sizes:8, eqe_count_method:4, :4; uint32_t eqe_count_mask:16, :16; uint32_t cq_page_cnt:4, :4, cqe_sizes:4, :2, cqv:2, cq_page_sizes:8, cqe_count_method:4, :4; uint32_t cqe_count_mask:16, :16; uint32_t mq_page_cnt:4, :10, mqv:2, mq_page_sizes:8, mqe_count_method:4, :4; uint32_t mqe_count_mask:16, :16; uint32_t wq_page_cnt:4, :4, wqe_sizes:4, :2, wqv:2, wq_page_sizes:8, wqe_count_method:4, :4; uint32_t wqe_count_mask:16, :16; uint32_t rq_page_cnt:4, :4, rqe_sizes:4, :2, rqv:2, rq_page_sizes:8, rqe_count_method:4, :4; uint32_t rqe_count_mask:16, :12, rq_db_window:4; uint32_t fcoe:1, ext:1, hdrr:1, sglr:1, fbrr:1, areg:1, tgt:1, terp:1, assi:1, wchn:1, tcca:1, trty:1, trir:1, phoff:1, phon:1, phwq:1, /** Performance Hint WQ_ID Association */ boundary_4ga:1, rxc:1, hlm:1, ipr:1, rxri:1, sglc:1, timm:1, tsmm:1, :1, oas:1, lc:1, agxf:1, loopback_scope:4; uint32_t sge_supported_length; uint32_t sgl_page_cnt:4, :4, sgl_page_sizes:8, sgl_pp_align:8, :8; uint32_t min_rq_buffer_size:16, :16; uint32_t max_rq_buffer_size; uint32_t physical_xri_max:16, physical_rpi_max:16; uint32_t physical_vpi_max:16, physical_vfi_max:16; uint32_t rsvd19; uint32_t frag_num_field_offset:16, /* dword 20 */ frag_num_field_size:16; uint32_t sgl_index_field_offset:16, /* dword 21 */ sgl_index_field_size:16; uint32_t chain_sge_initial_value_lo; /* dword 22 */ uint32_t chain_sge_initial_value_hi; /* dword 23 */ #else #error big endian version not defined #endif } sli4_res_common_get_sli4_parameters_t; /** * @brief COMMON_QUERY_FW_CONFIG * * This command retrieves firmware configuration parameters and adapter * resources available to the driver. */ typedef struct sli4_req_common_query_fw_config_s { sli4_req_hdr_t hdr; } sli4_req_common_query_fw_config_t; #define SLI4_FUNCTION_MODE_FCOE_INI_MODE 0x40 #define SLI4_FUNCTION_MODE_FCOE_TGT_MODE 0x80 #define SLI4_FUNCTION_MODE_DUA_MODE 0x800 #define SLI4_ULP_MODE_FCOE_INI 0x40 #define SLI4_ULP_MODE_FCOE_TGT 0x80 typedef struct sli4_res_common_query_fw_config_s { sli4_res_hdr_t hdr; uint32_t config_number; uint32_t asic_rev; uint32_t physical_port; uint32_t function_mode; uint32_t ulp0_mode; uint32_t ulp0_nic_wqid_base; uint32_t ulp0_nic_wq_total; /* Dword 10 */ uint32_t ulp0_toe_wqid_base; uint32_t ulp0_toe_wq_total; uint32_t ulp0_toe_rqid_base; uint32_t ulp0_toe_rq_total; uint32_t ulp0_toe_defrqid_base; uint32_t ulp0_toe_defrq_total; uint32_t ulp0_lro_rqid_base; uint32_t ulp0_lro_rq_total; uint32_t ulp0_iscsi_icd_base; uint32_t ulp0_iscsi_icd_total; /* Dword 20 */ uint32_t ulp1_mode; uint32_t ulp1_nic_wqid_base; uint32_t ulp1_nic_wq_total; uint32_t ulp1_toe_wqid_base; uint32_t ulp1_toe_wq_total; uint32_t ulp1_toe_rqid_base; uint32_t ulp1_toe_rq_total; uint32_t ulp1_toe_defrqid_base; uint32_t ulp1_toe_defrq_total; uint32_t ulp1_lro_rqid_base; /* Dword 30 */ uint32_t ulp1_lro_rq_total; uint32_t ulp1_iscsi_icd_base; uint32_t ulp1_iscsi_icd_total; uint32_t function_capabilities; uint32_t ulp0_cq_base; uint32_t ulp0_cq_total; uint32_t ulp0_eq_base; uint32_t ulp0_eq_total; uint32_t ulp0_iscsi_chain_icd_base; uint32_t ulp0_iscsi_chain_icd_total; /* Dword 40 */ uint32_t ulp1_iscsi_chain_icd_base; uint32_t ulp1_iscsi_chain_icd_total; } sli4_res_common_query_fw_config_t; /** * @brief COMMON_GET_PORT_NAME */ typedef struct sli4_req_common_get_port_name_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t pt:2, /* only COMMON_GET_PORT_NAME_V1 */ :30; #else #error big endian version not defined #endif } sli4_req_common_get_port_name_t; typedef struct sli4_res_common_get_port_name_s { sli4_res_hdr_t hdr; char port_name[4]; } sli4_res_common_get_port_name_t; /** * @brief COMMON_WRITE_FLASHROM */ typedef struct sli4_req_common_write_flashrom_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t flash_rom_access_opcode; uint32_t flash_rom_access_operation_type; uint32_t data_buffer_size; uint32_t offset; uint8_t data_buffer[4]; #else #error big endian version not defined #endif } sli4_req_common_write_flashrom_t; #define SLI4_MGMT_FLASHROM_OPCODE_FLASH 0x01 #define SLI4_MGMT_FLASHROM_OPCODE_SAVE 0x02 #define SLI4_MGMT_FLASHROM_OPCODE_CLEAR 0x03 #define SLI4_MGMT_FLASHROM_OPCODE_REPORT 0x04 #define SLI4_MGMT_FLASHROM_OPCODE_IMAGE_INFO 0x05 #define SLI4_MGMT_FLASHROM_OPCODE_IMAGE_CRC 0x06 #define SLI4_MGMT_FLASHROM_OPCODE_OFFSET_BASED_FLASH 0x07 #define SLI4_MGMT_FLASHROM_OPCODE_OFFSET_BASED_SAVE 0x08 #define SLI4_MGMT_PHY_FLASHROM_OPCODE_FLASH 0x09 #define SLI4_MGMT_PHY_FLASHROM_OPCODE_SAVE 0x0a #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ISCSI 0x00 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_REDBOOT 0x01 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_BIOS 0x02 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_PXE_BIOS 0x03 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_CODE_CONTROL 0x04 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_IPSEC_CFG 0x05 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_INIT_DATA 0x06 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ROM_OFFSET 0x07 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_FCOE_BIOS 0x08 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ISCSI_BAK 0x09 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_FCOE_ACT 0x0a #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_FCOE_BAK 0x0b #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_CODE_CTRL_P 0x0c #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_NCSI 0x0d #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_NIC 0x0e #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_DCBX 0x0f #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_PXE_BIOS_CFG 0x10 #define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ALL_CFG_DATA 0x11 /** * @brief COMMON_MANAGE_FAT */ typedef struct sli4_req_common_manage_fat_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fat_operation; uint32_t read_log_offset; uint32_t read_log_length; uint32_t data_buffer_size; uint32_t data_buffer; /* response only */ #else #error big endian version not defined #endif } sli4_req_common_manage_fat_t; /** * @brief COMMON_GET_EXT_FAT_CAPABILITIES */ typedef struct sli4_req_common_get_ext_fat_capabilities_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t parameter_type; #else #error big endian version not defined #endif } sli4_req_common_get_ext_fat_capabilities_t; /** * @brief COMMON_SET_EXT_FAT_CAPABILITIES */ typedef struct sli4_req_common_set_ext_fat_capabilities_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t maximum_log_entries; uint32_t log_entry_size; uint32_t logging_type:8, maximum_logging_functions:8, maximum_logging_ports:8, :8; uint32_t supported_modes; uint32_t number_modules; uint32_t debug_module[14]; #else #error big endian version not defined #endif } sli4_req_common_set_ext_fat_capabilities_t; /** * @brief COMMON_EXT_FAT_CONFIGURE_SNAPSHOT */ typedef struct sli4_req_common_ext_fat_configure_snapshot_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t total_log_entries; #else #error big endian version not defined #endif } sli4_req_common_ext_fat_configure_snapshot_t; /** * @brief COMMON_EXT_FAT_RETRIEVE_SNAPSHOT */ typedef struct sli4_req_common_ext_fat_retrieve_snapshot_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t snapshot_mode; uint32_t start_index; uint32_t number_log_entries; #else #error big endian version not defined #endif } sli4_req_common_ext_fat_retrieve_snapshot_t; typedef struct sli4_res_common_ext_fat_retrieve_snapshot_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t number_log_entries; uint32_t version:8, physical_port:8, function_id:16; uint32_t trace_level; uint32_t module_mask[2]; uint32_t trace_table_index; uint32_t timestamp; uint8_t string_data[16]; uint32_t data[6]; #else #error big endian version not defined #endif } sli4_res_common_ext_fat_retrieve_snapshot_t; /** * @brief COMMON_EXT_FAT_READ_STRING_TABLE */ typedef struct sli4_req_common_ext_fat_read_string_table_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t byte_offset; uint32_t number_bytes; #else #error big endian version not defined #endif } sli4_req_common_ext_fat_read_string_table_t; typedef struct sli4_res_common_ext_fat_read_string_table_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t number_returned_bytes; uint32_t number_remaining_bytes; uint32_t table_data0:8, :24; uint8_t table_data[0]; #else #error big endian version not defined #endif } sli4_res_common_ext_fat_read_string_table_t; /** * @brief COMMON_READ_TRANSCEIVER_DATA * * This command reads SFF transceiver data(Format is defined * by the SFF-8472 specification). */ typedef struct sli4_req_common_read_transceiver_data_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t page_number; uint32_t port; #else #error big endian version not defined #endif } sli4_req_common_read_transceiver_data_t; typedef struct sli4_res_common_read_transceiver_data_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t page_number; uint32_t port; uint32_t page_data[32]; uint32_t page_data_2[32]; #else #error big endian version not defined #endif } sli4_res_common_read_transceiver_data_t; /** * @brief COMMON_READ_OBJECT */ typedef struct sli4_req_common_read_object_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t desired_read_length:24, :8; uint32_t read_offset; uint8_t object_name[104]; uint32_t host_buffer_descriptor_count; sli4_bde_t host_buffer_descriptor[0]; #else #error big endian version not defined #endif } sli4_req_common_read_object_t; typedef struct sli4_res_common_read_object_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t actual_read_length; uint32_t resv:31, eof:1; #else #error big endian version not defined #endif } sli4_res_common_read_object_t; /** * @brief COMMON_WRITE_OBJECT */ typedef struct sli4_req_common_write_object_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t desired_write_length:24, :6, noc:1, eof:1; uint32_t write_offset; uint8_t object_name[104]; uint32_t host_buffer_descriptor_count; sli4_bde_t host_buffer_descriptor[0]; #else #error big endian version not defined #endif } sli4_req_common_write_object_t; typedef struct sli4_res_common_write_object_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t actual_write_length; uint32_t change_status:8, :24; #else #error big endian version not defined #endif } sli4_res_common_write_object_t; /** * @brief COMMON_DELETE_OBJECT */ typedef struct sli4_req_common_delete_object_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd4; uint32_t rsvd5; uint8_t object_name[104]; #else #error big endian version not defined #endif } sli4_req_common_delete_object_t; /** * @brief COMMON_READ_OBJECT_LIST */ typedef struct sli4_req_common_read_object_list_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t desired_read_length:24, :8; uint32_t read_offset; uint8_t object_name[104]; uint32_t host_buffer_descriptor_count; sli4_bde_t host_buffer_descriptor[0]; #else #error big endian version not defined #endif } sli4_req_common_read_object_list_t; /** * @brief COMMON_SET_DUMP_LOCATION */ typedef struct sli4_req_common_set_dump_location_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t buffer_length:24, :5, fdb:1, blp:1, qry:1; uint32_t buf_addr_low; uint32_t buf_addr_high; #else #error big endian version not defined #endif } sli4_req_common_set_dump_location_t; typedef struct sli4_res_common_set_dump_location_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t buffer_length:24, :8; #else #error big endian version not defined #endif }sli4_res_common_set_dump_location_t; /** * @brief COMMON_SET_SET_FEATURES */ #define SLI4_SET_FEATURES_DIF_SEED 0x01 #define SLI4_SET_FEATURES_XRI_TIMER 0x03 #define SLI4_SET_FEATURES_MAX_PCIE_SPEED 0x04 #define SLI4_SET_FEATURES_FCTL_CHECK 0x05 #define SLI4_SET_FEATURES_FEC 0x06 #define SLI4_SET_FEATURES_PCIE_RECV_DETECT 0x07 #define SLI4_SET_FEATURES_DIF_MEMORY_MODE 0x08 #define SLI4_SET_FEATURES_DISABLE_SLI_PORT_PAUSE_STATE 0x09 #define SLI4_SET_FEATURES_ENABLE_PCIE_OPTIONS 0x0A #define SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI 0x0C #define SLI4_SET_FEATURES_ENABLE_MULTI_RECEIVE_QUEUE 0x0D #define SLI4_SET_FEATURES_SET_FTD_XFER_HINT 0x0F #define SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK 0x11 +#define SLI4_SET_FEATURES_PERSISTENT_TOPOLOGY 0x20 typedef struct sli4_req_common_set_features_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t feature; uint32_t param_len; uint32_t params[8]; #else #error big endian version not defined #endif } sli4_req_common_set_features_t; typedef struct sli4_req_common_set_features_dif_seed_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t seed:16, :16; #else #error big endian version not defined #endif } sli4_req_common_set_features_dif_seed_t; typedef struct sli4_req_common_set_features_t10_pi_mem_model_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t tmm:1, :31; #else #error big endian version not defined #endif } sli4_req_common_set_features_t10_pi_mem_model_t; typedef struct sli4_req_common_set_features_multirq_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t isr:1, /*<< Include Sequence Reporting */ agxfe:1, /*<< Auto Generate XFER-RDY Feature Enabled */ :30; uint32_t num_rqs:8, rq_select_policy:4, :20; #else #error big endian version not defined #endif } sli4_req_common_set_features_multirq_t; typedef struct sli4_req_common_set_features_xfer_rdy_t10pi_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rtc:1, atv:1, tmm:1, :1, p_type:3, blk_size:3, :22; uint32_t app_tag:16, :16; #else #error big endian version not defined #endif } sli4_req_common_set_features_xfer_rdy_t10pi_t; typedef struct sli4_req_common_set_features_health_check_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t hck:1, qry:1, :30; #else #error big endian version not defined #endif } sli4_req_common_set_features_health_check_t; typedef struct sli4_req_common_set_features_set_fdt_xfer_hint_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fdt_xfer_hint; #else #error big endian version not defined #endif } sli4_req_common_set_features_set_fdt_xfer_hint_t; +typedef struct sli4_req_common_set_features_persistent_topo_param_s { +#if BYTE_ORDER == LITTLE_ENDIAN + uint32_t persistent_topo:2, + topo_failover:1, + :29; +#else +#error big endian version not defined +#endif +} sli4_req_common_set_features_persistent_topo_param_t; + /** * @brief DMTF_EXEC_CLP_CMD */ typedef struct sli4_req_dmtf_exec_clp_cmd_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t cmd_buf_length; uint32_t resp_buf_length; uint32_t cmd_buf_addr_low; uint32_t cmd_buf_addr_high; uint32_t resp_buf_addr_low; uint32_t resp_buf_addr_high; #else #error big endian version not defined #endif } sli4_req_dmtf_exec_clp_cmd_t; typedef struct sli4_res_dmtf_exec_clp_cmd_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :32; uint32_t resp_length; uint32_t :32; uint32_t :32; uint32_t :32; uint32_t :32; uint32_t clp_status; uint32_t clp_detailed_status; #else #error big endian version not defined #endif } sli4_res_dmtf_exec_clp_cmd_t; /** * @brief Resource descriptor */ #define SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE 0x50 #define SLI4_RESOURCE_DESCRIPTOR_TYPE_NIC 0x51 #define SLI4_RESOURCE_DESCRIPTOR_TYPE_ISCSI 0x52 #define SLI4_RESOURCE_DESCRIPTOR_TYPE_FCFCOE 0x53 #define SLI4_RESOURCE_DESCRIPTOR_TYPE_RDMA 0x54 #define SLI4_RESOURCE_DESCRIPTOR_TYPE_PORT 0x55 #define SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP 0x56 #define SLI4_PROTOCOL_NIC_TOE 0x01 #define SLI4_PROTOCOL_ISCSI 0x02 #define SLI4_PROTOCOL_FCOE 0x04 #define SLI4_PROTOCOL_NIC_TOE_RDMA 0x08 #define SLI4_PROTOCOL_FC 0x10 #define SLI4_PROTOCOL_DEFAULT 0xff typedef struct sli4_resource_descriptor_v1_s { uint32_t descriptor_type:8, descriptor_length:8, :16; uint32_t type_specific[0]; } sli4_resource_descriptor_v1_t; typedef struct sli4_pcie_resource_descriptor_v1_s { uint32_t descriptor_type:8, descriptor_length:8, :14, imm:1, nosv:1; uint32_t :16, pf_number:10, :6; uint32_t rsvd1; uint32_t sriov_state:8, pf_state:8, pf_type:8, :8; uint32_t number_of_vfs:16, :16; uint32_t mission_roles:8, :19, pchg:1, schg:1, xchg:1, xrom:2; uint32_t rsvd2[16]; } sli4_pcie_resource_descriptor_v1_t; typedef struct sli4_isap_resource_descriptor_v1_s { uint32_t descriptor_type:8, descriptor_length:8, :16; uint32_t iscsi_tgt:1, iscsi_ini:1, iscsi_dif:1, :29; uint32_t rsvd1[3]; uint32_t fcoe_tgt:1, fcoe_ini:1, fcoe_dif:1, :29; uint32_t rsvd2[7]; uint32_t mc_type0:8, mc_type1:8, mc_type2:8, mc_type3:8; uint32_t rsvd3[3]; } sli4_isap_resouce_descriptor_v1_t; /** * @brief COMMON_GET_FUNCTION_CONFIG */ typedef struct sli4_req_common_get_function_config_s { sli4_req_hdr_t hdr; } sli4_req_common_get_function_config_t; typedef struct sli4_res_common_get_function_config_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t desc_count; uint32_t desc[54]; #else #error big endian version not defined #endif } sli4_res_common_get_function_config_t; /** * @brief COMMON_GET_PROFILE_CONFIG */ typedef struct sli4_req_common_get_profile_config_s { sli4_req_hdr_t hdr; uint32_t profile_id:8, typ:2, :22; } sli4_req_common_get_profile_config_t; typedef struct sli4_res_common_get_profile_config_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t desc_count; uint32_t desc[0]; #else #error big endian version not defined #endif } sli4_res_common_get_profile_config_t; /** * @brief COMMON_SET_PROFILE_CONFIG */ typedef struct sli4_req_common_set_profile_config_s { sli4_req_hdr_t hdr; uint32_t profile_id:8, :23, isap:1; uint32_t desc_count; uint32_t desc[0]; } sli4_req_common_set_profile_config_t; typedef struct sli4_res_common_set_profile_config_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN #else #error big endian version not defined #endif } sli4_res_common_set_profile_config_t; /** * @brief Profile Descriptor for profile functions */ typedef struct sli4_profile_descriptor_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t profile_id:8, :8, profile_index:8, :8; uint32_t profile_description[128]; #else #error big endian version not defined #endif } sli4_profile_descriptor_t; /* We don't know in advance how many descriptors there are. We have to pick a number that we think will be big enough and ask for that many. */ #define MAX_PRODUCT_DESCRIPTORS 40 /** * @brief COMMON_GET_PROFILE_LIST */ typedef struct sli4_req_common_get_profile_list_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t start_profile_index:8, :24; #else #error big endian version not defined #endif } sli4_req_common_get_profile_list_t; typedef struct sli4_res_common_get_profile_list_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t profile_descriptor_count; sli4_profile_descriptor_t profile_descriptor[MAX_PRODUCT_DESCRIPTORS]; #else #error big endian version not defined #endif } sli4_res_common_get_profile_list_t; /** * @brief COMMON_GET_ACTIVE_PROFILE */ typedef struct sli4_req_common_get_active_profile_s { sli4_req_hdr_t hdr; } sli4_req_common_get_active_profile_t; typedef struct sli4_res_common_get_active_profile_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t active_profile_id:8, :8, next_profile_id:8, :8; #else #error big endian version not defined #endif } sli4_res_common_get_active_profile_t; /** * @brief COMMON_SET_ACTIVE_PROFILE */ typedef struct sli4_req_common_set_active_profile_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t active_profile_id:8, :23, fd:1; #else #error big endian version not defined #endif } sli4_req_common_set_active_profile_t; typedef struct sli4_res_common_set_active_profile_s { sli4_res_hdr_t hdr; } sli4_res_common_set_active_profile_t; /** * @brief Link Config Descriptor for link config functions */ typedef struct sli4_link_config_descriptor_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t link_config_id:8, :24; uint32_t config_description[8]; #else #error big endian version not defined #endif } sli4_link_config_descriptor_t; #define MAX_LINK_CONFIG_DESCRIPTORS 10 /** * @brief COMMON_GET_RECONFIG_LINK_INFO */ typedef struct sli4_req_common_get_reconfig_link_info_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN #else #error big endian version not defined #endif } sli4_req_common_get_reconfig_link_info_t; typedef struct sli4_res_common_get_reconfig_link_info_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t active_link_config_id:8, :8, next_link_config_id:8, :8; uint32_t link_configuration_descriptor_count; sli4_link_config_descriptor_t desc[MAX_LINK_CONFIG_DESCRIPTORS]; #else #error big endian version not defined #endif } sli4_res_common_get_reconfig_link_info_t; /** * @brief COMMON_SET_RECONFIG_LINK_ID */ typedef struct sli4_req_common_set_reconfig_link_id_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t next_link_config_id:8, :23, fd:1; #else #error big endian version not defined #endif } sli4_req_common_set_reconfig_link_id_t; typedef struct sli4_res_common_set_reconfig_link_id_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN #else #error big endian version not defined #endif } sli4_res_common_set_reconfig_link_id_t; typedef struct sli4_req_lowlevel_set_watchdog_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t watchdog_timeout:16, :16; #else #error big endian version not defined #endif } sli4_req_lowlevel_set_watchdog_t; typedef struct sli4_res_lowlevel_set_watchdog_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd; #else #error big endian version not defined #endif } sli4_res_lowlevel_set_watchdog_t; /** * @brief Event Queue Entry */ typedef struct sli4_eqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t vld:1, /** valid */ major_code:3, minor_code:12, resource_id:16; #else #error big endian version not defined #endif } sli4_eqe_t; #define SLI4_MAJOR_CODE_STANDARD 0 #define SLI4_MAJOR_CODE_SENTINEL 1 /** * @brief Mailbox Completion Queue Entry * * A CQE generated on the completion of a MQE from a MQ. */ typedef struct sli4_mcqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t completion_status:16, /** values are protocol specific */ extended_status:16; uint32_t mqe_tag_low; uint32_t mqe_tag_high; uint32_t :27, con:1, /** consumed - command now being executed */ cmp:1, /** completed - command still executing if clear */ :1, ae:1, /** async event - this is an ACQE */ val:1; /** valid - contents of CQE are valid */ #else #error big endian version not defined #endif } sli4_mcqe_t; /** * @brief Asynchronous Completion Queue Entry * * A CQE generated asynchronously in response to the link or other internal events. */ typedef struct sli4_acqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t event_data[3]; uint32_t :8, event_code:8, event_type:8, /** values are protocol specific */ :6, ae:1, /** async event - this is an ACQE */ val:1; /** valid - contents of CQE are valid */ #else #error big endian version not defined #endif } sli4_acqe_t; #define SLI4_ACQE_EVENT_CODE_LINK_STATE 0x01 #define SLI4_ACQE_EVENT_CODE_FCOE_FIP 0x02 #define SLI4_ACQE_EVENT_CODE_DCBX 0x03 #define SLI4_ACQE_EVENT_CODE_ISCSI 0x04 #define SLI4_ACQE_EVENT_CODE_GRP_5 0x05 #define SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT 0x10 #define SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT 0x11 #define SLI4_ACQE_EVENT_CODE_VF_EVENT 0x12 #define SLI4_ACQE_EVENT_CODE_MR_EVENT 0x13 /** * @brief Register name enums */ typedef enum { SLI4_REG_BMBX, SLI4_REG_EQ_DOORBELL, SLI4_REG_CQ_DOORBELL, SLI4_REG_FCOE_RQ_DOORBELL, SLI4_REG_IO_WQ_DOORBELL, SLI4_REG_MQ_DOORBELL, SLI4_REG_PHYSDEV_CONTROL, SLI4_REG_SLIPORT_CONTROL, SLI4_REG_SLIPORT_ERROR1, SLI4_REG_SLIPORT_ERROR2, SLI4_REG_SLIPORT_SEMAPHORE, SLI4_REG_SLIPORT_STATUS, SLI4_REG_UERR_MASK_HI, SLI4_REG_UERR_MASK_LO, SLI4_REG_UERR_STATUS_HI, SLI4_REG_UERR_STATUS_LO, SLI4_REG_SW_UE_CSR1, SLI4_REG_SW_UE_CSR2, SLI4_REG_MAX /* must be last */ } sli4_regname_e; typedef struct sli4_reg_s { uint32_t rset; uint32_t off; } sli4_reg_t; typedef enum { SLI_QTYPE_EQ, SLI_QTYPE_CQ, SLI_QTYPE_MQ, SLI_QTYPE_WQ, SLI_QTYPE_RQ, SLI_QTYPE_MAX, /* must be last */ } sli4_qtype_e; #define SLI_USER_MQ_COUNT 1 /** User specified max mail queues */ #define SLI_MAX_CQ_SET_COUNT 16 #define SLI_MAX_RQ_SET_COUNT 16 typedef enum { SLI_QENTRY_ASYNC, SLI_QENTRY_MQ, SLI_QENTRY_RQ, SLI_QENTRY_WQ, SLI_QENTRY_WQ_RELEASE, SLI_QENTRY_OPT_WRITE_CMD, SLI_QENTRY_OPT_WRITE_DATA, SLI_QENTRY_XABT, SLI_QENTRY_MAX /* must be last */ } sli4_qentry_e; typedef struct sli4_queue_s { /* Common to all queue types */ ocs_dma_t dma; ocs_lock_t lock; uint32_t index; /** current host entry index */ uint16_t size; /** entry size */ uint16_t length; /** number of entries */ uint16_t n_posted; /** number entries posted */ uint16_t id; /** Port assigned xQ_ID */ uint16_t ulp; /** ULP assigned to this queue */ uint32_t doorbell_offset;/** The offset for the doorbell */ uint16_t doorbell_rset; /** register set for the doorbell */ uint8_t type; /** queue type ie EQ, CQ, ... */ uint32_t proc_limit; /** limit number of CQE processed per iteration */ uint32_t posted_limit; /** number of CQE/EQE to process before ringing doorbell */ uint32_t max_num_processed; time_t max_process_time; uint16_t phase; /** For if_type = 6, this value toggle for each iteration of the queue, a queue entry is valid when a cqe valid bit matches this value */ /* Type specific gunk */ union { uint32_t r_idx; /** "read" index (MQ only) */ struct { uint32_t is_mq:1,/** CQ contains MQ/Async completions */ is_hdr:1,/** is a RQ for packet headers */ rq_batch:1;/** RQ index incremented by 8 */ } flag; } u; } sli4_queue_t; static inline void sli_queue_lock(sli4_queue_t *q) { ocs_lock(&q->lock); } static inline void sli_queue_unlock(sli4_queue_t *q) { ocs_unlock(&q->lock); } #define SLI4_QUEUE_DEFAULT_CQ UINT16_MAX /** Use the default CQ */ #define SLI4_QUEUE_RQ_BATCH 8 typedef enum { SLI4_CB_LINK, SLI4_CB_FIP, SLI4_CB_MAX /* must be last */ } sli4_callback_e; typedef enum { SLI_LINK_STATUS_UP, SLI_LINK_STATUS_DOWN, SLI_LINK_STATUS_NO_ALPA, SLI_LINK_STATUS_MAX, } sli4_link_status_e; typedef enum { SLI_LINK_TOPO_NPORT = 1, /** fabric or point-to-point */ SLI_LINK_TOPO_LOOP, SLI_LINK_TOPO_LOOPBACK_INTERNAL, SLI_LINK_TOPO_LOOPBACK_EXTERNAL, SLI_LINK_TOPO_NONE, SLI_LINK_TOPO_MAX, } sli4_link_topology_e; /* TODO do we need both sli4_port_type_e & sli4_link_medium_e */ typedef enum { SLI_LINK_MEDIUM_ETHERNET, SLI_LINK_MEDIUM_FC, SLI_LINK_MEDIUM_MAX, } sli4_link_medium_e; typedef struct sli4_link_event_s { sli4_link_status_e status; /* link up/down */ sli4_link_topology_e topology; sli4_link_medium_e medium; /* Ethernet / FC */ uint32_t speed; /* Mbps */ uint8_t *loop_map; uint32_t fc_id; } sli4_link_event_t; /** * @brief Fields retrieved from skyhawk that used used to build chained SGL */ typedef struct sli4_sgl_chaining_params_s { uint8_t chaining_capable; uint16_t frag_num_field_offset; uint16_t sgl_index_field_offset; uint64_t frag_num_field_mask; uint64_t sgl_index_field_mask; uint32_t chain_sge_initial_value_lo; uint32_t chain_sge_initial_value_hi; } sli4_sgl_chaining_params_t; typedef struct sli4_fip_event_s { uint32_t type; uint32_t index; /* FCF index or UINT32_MAX if invalid */ } sli4_fip_event_t; typedef enum { SLI_RSRC_FCOE_VFI, SLI_RSRC_FCOE_VPI, SLI_RSRC_FCOE_RPI, SLI_RSRC_FCOE_XRI, SLI_RSRC_FCOE_FCFI, SLI_RSRC_MAX /* must be last */ } sli4_resource_e; typedef enum { SLI4_PORT_TYPE_FC, SLI4_PORT_TYPE_NIC, SLI4_PORT_TYPE_MAX /* must be last */ } sli4_port_type_e; typedef enum { SLI4_ASIC_TYPE_BE3 = 1, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_TYPE_LANCERG6, SLI4_ASIC_TYPE_LANCERG7 } sli4_asic_type_e; typedef enum { SLI4_ASIC_REV_FPGA = 1, SLI4_ASIC_REV_A0, SLI4_ASIC_REV_A1, SLI4_ASIC_REV_A2, SLI4_ASIC_REV_A3, SLI4_ASIC_REV_B0, SLI4_ASIC_REV_B1, SLI4_ASIC_REV_C0, SLI4_ASIC_REV_D0, } sli4_asic_rev_e; typedef struct sli4_s { ocs_os_handle_t os; sli4_port_type_e port_type; uint32_t sli_rev; /* SLI revision number */ uint32_t sli_family; uint32_t if_type; /* SLI Interface type */ sli4_asic_type_e asic_type; /*<< ASIC type */ sli4_asic_rev_e asic_rev; /*<< ASIC revision */ uint32_t physical_port; struct { uint16_t e_d_tov; uint16_t r_a_tov; uint16_t max_qcount[SLI_QTYPE_MAX]; uint32_t max_qentries[SLI_QTYPE_MAX]; uint16_t count_mask[SLI_QTYPE_MAX]; uint16_t count_method[SLI_QTYPE_MAX]; uint32_t qpage_count[SLI_QTYPE_MAX]; uint16_t link_module_type; uint8_t rq_batch; uint16_t rq_min_buf_size; uint32_t rq_max_buf_size; uint8_t topology; + uint8_t pt:4, + tf:1, + ptv:1, + :2; uint8_t wwpn[8]; uint8_t wwnn[8]; uint32_t fw_rev[2]; uint8_t fw_name[2][16]; char ipl_name[16]; uint32_t hw_rev[3]; uint8_t port_number; char port_name[2]; char bios_version_string[32]; uint8_t dual_ulp_capable; uint8_t is_ulp_fc[2]; /* * Tracks the port resources using extents metaphor. For * devices that don't implement extents (i.e. * has_extents == FALSE), the code models each resource as * a single large extent. */ struct { uint32_t number; /* number of extents */ uint32_t size; /* number of elements in each extent */ uint32_t n_alloc;/* number of elements allocated */ uint32_t *base; ocs_bitmap_t *use_map;/* bitmap showing resources in use */ uint32_t map_size;/* number of bits in bitmap */ } extent[SLI_RSRC_MAX]; sli4_features_t features; uint32_t has_extents:1, auto_reg:1, auto_xfer_rdy:1, hdr_template_req:1, perf_hint:1, perf_wq_id_association:1, cq_create_version:2, mq_create_version:2, high_login_mode:1, sgl_pre_registered:1, sgl_pre_registration_required:1, t10_dif_inline_capable:1, t10_dif_separate_capable:1; uint32_t sge_supported_length; uint32_t sgl_page_sizes; uint32_t max_sgl_pages; sli4_sgl_chaining_params_t sgl_chaining_params; size_t wqe_size; } config; /* * Callback functions */ int32_t (*link)(void *, void *); void *link_arg; int32_t (*fip)(void *, void *); void *fip_arg; ocs_dma_t bmbx; #if defined(OCS_INCLUDE_DEBUG) /* Save pointer to physical memory descriptor for non-embedded SLI_CONFIG * commands for BMBX dumping purposes */ ocs_dma_t *bmbx_non_emb_pmd; #endif struct { ocs_dma_t data; uint32_t length; } vpd; } sli4_t; /** * Get / set parameter functions */ static inline uint32_t sli_get_max_rsrc(sli4_t *sli4, sli4_resource_e rsrc) { if (rsrc >= SLI_RSRC_MAX) { return 0; } return sli4->config.extent[rsrc].size; } static inline uint32_t sli_get_max_queue(sli4_t *sli4, sli4_qtype_e qtype) { if (qtype >= SLI_QTYPE_MAX) { return 0; } return sli4->config.max_qcount[qtype]; } static inline uint32_t sli_get_max_qentries(sli4_t *sli4, sli4_qtype_e qtype) { return sli4->config.max_qentries[qtype]; } static inline uint32_t sli_get_max_sge(sli4_t *sli4) { return sli4->config.sge_supported_length; } static inline uint32_t sli_get_max_sgl(sli4_t *sli4) { if (sli4->config.sgl_page_sizes != 1) { ocs_log_test(sli4->os, "unsupported SGL page sizes %#x\n", sli4->config.sgl_page_sizes); return 0; } return ((sli4->config.max_sgl_pages * SLI_PAGE_SIZE) / sizeof(sli4_sge_t)); } static inline sli4_link_medium_e sli_get_medium(sli4_t *sli4) { switch (sli4->config.topology) { case SLI4_READ_CFG_TOPO_FCOE: return SLI_LINK_MEDIUM_ETHERNET; case SLI4_READ_CFG_TOPO_FC: case SLI4_READ_CFG_TOPO_FC_DA: case SLI4_READ_CFG_TOPO_FC_AL: return SLI_LINK_MEDIUM_FC; default: return SLI_LINK_MEDIUM_MAX; } } static inline void sli_skh_chain_sge_build(sli4_t *sli4, sli4_sge_t *sge, uint32_t xri_index, uint32_t frag_num, uint32_t offset) { sli4_sgl_chaining_params_t *cparms = &sli4->config.sgl_chaining_params; ocs_memset(sge, 0, sizeof(*sge)); sge->sge_type = SLI4_SGE_TYPE_CHAIN; sge->buffer_address_high = (uint32_t)cparms->chain_sge_initial_value_hi; sge->buffer_address_low = (uint32_t)((cparms->chain_sge_initial_value_lo | (((uintptr_t)(xri_index & cparms->sgl_index_field_mask)) << cparms->sgl_index_field_offset) | (((uintptr_t)(frag_num & cparms->frag_num_field_mask)) << cparms->frag_num_field_offset) | offset) >> 3); } static inline uint32_t sli_get_sli_rev(sli4_t *sli4) { return sli4->sli_rev; } static inline uint32_t sli_get_sli_family(sli4_t *sli4) { return sli4->sli_family; } static inline uint32_t sli_get_if_type(sli4_t *sli4) { return sli4->if_type; } static inline void * sli_get_wwn_port(sli4_t *sli4) { return sli4->config.wwpn; } static inline void * sli_get_wwn_node(sli4_t *sli4) { return sli4->config.wwnn; } static inline void * sli_get_vpd(sli4_t *sli4) { return sli4->vpd.data.virt; } static inline uint32_t sli_get_vpd_len(sli4_t *sli4) { return sli4->vpd.length; } static inline uint32_t sli_get_fw_revision(sli4_t *sli4, uint32_t which) { return sli4->config.fw_rev[which]; } static inline void * sli_get_fw_name(sli4_t *sli4, uint32_t which) { return sli4->config.fw_name[which]; } static inline char * sli_get_ipl_name(sli4_t *sli4) { return sli4->config.ipl_name; } static inline uint32_t sli_get_hw_revision(sli4_t *sli4, uint32_t which) { return sli4->config.hw_rev[which]; } static inline uint32_t sli_get_auto_xfer_rdy_capable(sli4_t *sli4) { return sli4->config.auto_xfer_rdy; } static inline uint32_t sli_get_dif_capable(sli4_t *sli4) { return sli4->config.features.flag.dif; } static inline uint32_t sli_is_dif_inline_capable(sli4_t *sli4) { return sli_get_dif_capable(sli4) && sli4->config.t10_dif_inline_capable; } static inline uint32_t sli_is_dif_separate_capable(sli4_t *sli4) { return sli_get_dif_capable(sli4) && sli4->config.t10_dif_separate_capable; } static inline uint32_t sli_get_is_dual_ulp_capable(sli4_t *sli4) { return sli4->config.dual_ulp_capable; } static inline uint32_t sli_get_is_sgl_chaining_capable(sli4_t *sli4) { return sli4->config.sgl_chaining_params.chaining_capable; } static inline uint32_t sli_get_is_ulp_enabled(sli4_t *sli4, uint16_t ulp) { return sli4->config.is_ulp_fc[ulp]; } static inline uint32_t sli_get_hlm_capable(sli4_t *sli4) { return sli4->config.features.flag.hlm; } static inline int32_t sli_set_hlm(sli4_t *sli4, uint32_t value) { if (value && !sli4->config.features.flag.hlm) { ocs_log_test(sli4->os, "HLM not supported\n"); return -1; } sli4->config.high_login_mode = value != 0 ? TRUE : FALSE; return 0; } static inline uint32_t sli_get_hlm(sli4_t *sli4) { return sli4->config.high_login_mode; } static inline uint32_t sli_get_sgl_preregister_required(sli4_t *sli4) { return sli4->config.sgl_pre_registration_required; } static inline uint32_t sli_get_sgl_preregister(sli4_t *sli4) { return sli4->config.sgl_pre_registered; } static inline int32_t sli_set_sgl_preregister(sli4_t *sli4, uint32_t value) { if ((value == 0) && sli4->config.sgl_pre_registration_required) { ocs_log_test(sli4->os, "SGL pre-registration required\n"); return -1; } sli4->config.sgl_pre_registered = value != 0 ? TRUE : FALSE; return 0; } static inline sli4_asic_type_e sli_get_asic_type(sli4_t *sli4) { return sli4->asic_type; } static inline sli4_asic_rev_e sli_get_asic_rev(sli4_t *sli4) { return sli4->asic_rev; } static inline int32_t sli_set_topology(sli4_t *sli4, uint32_t value) { int32_t rc = 0; switch (value) { case SLI4_READ_CFG_TOPO_FCOE: case SLI4_READ_CFG_TOPO_FC: case SLI4_READ_CFG_TOPO_FC_DA: case SLI4_READ_CFG_TOPO_FC_AL: sli4->config.topology = value; break; default: ocs_log_test(sli4->os, "unsupported topology %#x\n", value); rc = -1; } return rc; } +static inline void +sli_config_persistent_topology(sli4_t *sli4, sli4_req_common_set_features_persistent_topo_param_t *req) +{ + sli4->config.pt = req->persistent_topo; + sli4->config.tf = req->topo_failover; +} + static inline uint16_t sli_get_link_module_type(sli4_t *sli4) { return sli4->config.link_module_type; } static inline char * sli_get_portnum(sli4_t *sli4) { return sli4->config.port_name; } static inline char * sli_get_bios_version_string(sli4_t *sli4) { return sli4->config.bios_version_string; } static inline uint32_t sli_convert_mask_to_count(uint32_t method, uint32_t mask) { uint32_t count = 0; if (method) { count = 1 << ocs_lg2(mask); count *= 16; } else { count = mask; } return count; } +static inline bool +sli_fcal_is_speed_supported(uint32_t link_speed) +{ + if ((link_speed == FC_LINK_SPEED_16G) || + (link_speed == FC_LINK_SPEED_32G) || + (link_speed >= FC_LINK_SPEED_AUTO_32_16)) { + ocs_log_err(NULL, "unsupported FC-AL speed (speed_code: %d)\n", link_speed); + return FALSE; + } + + return TRUE; +} + /** * @brief Common Create Queue function prototype */ typedef int32_t (*sli4_create_q_fn_t)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t); /** * @brief Common Destroy Queue function prototype */ typedef int32_t (*sli4_destroy_q_fn_t)(sli4_t *, void *, size_t, uint16_t); /**************************************************************************** * Function prototypes */ extern int32_t sli_cmd_config_auto_xfer_rdy(sli4_t *, void *, size_t, uint32_t); extern int32_t sli_cmd_config_auto_xfer_rdy_hp(sli4_t *, void *, size_t, uint32_t, uint32_t, uint32_t); extern int32_t sli_cmd_config_link(sli4_t *, void *, size_t); extern int32_t sli_cmd_down_link(sli4_t *, void *, size_t); extern int32_t sli_cmd_dump_type4(sli4_t *, void *, size_t, uint16_t); extern int32_t sli_cmd_common_read_transceiver_data(sli4_t *, void *, size_t, uint32_t, ocs_dma_t *); extern int32_t sli_cmd_read_link_stats(sli4_t *, void *, size_t,uint8_t, uint8_t, uint8_t); extern int32_t sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size, uint8_t clear_counters); extern int32_t sli_cmd_init_link(sli4_t *, void *, size_t, uint32_t, uint8_t); extern int32_t sli_cmd_init_vfi(sli4_t *, void *, size_t, uint16_t, uint16_t, uint16_t); extern int32_t sli_cmd_init_vpi(sli4_t *, void *, size_t, uint16_t, uint16_t); extern int32_t sli_cmd_post_xri(sli4_t *, void *, size_t, uint16_t, uint16_t); extern int32_t sli_cmd_release_xri(sli4_t *, void *, size_t, uint8_t); extern int32_t sli_cmd_read_sparm64(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t); extern int32_t sli_cmd_read_topology(sli4_t *, void *, size_t, ocs_dma_t *); extern int32_t sli_cmd_read_nvparms(sli4_t *, void *, size_t); extern int32_t sli_cmd_write_nvparms(sli4_t *, void *, size_t, uint8_t *, uint8_t *, uint8_t, uint32_t); typedef struct { uint16_t rq_id; uint8_t r_ctl_mask; uint8_t r_ctl_match; uint8_t type_mask; uint8_t type_match; } sli4_cmd_rq_cfg_t; extern int32_t sli_cmd_reg_fcfi(sli4_t *, void *, size_t, uint16_t, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t); extern int32_t sli_cmd_reg_fcfi_mrq(sli4_t *, void *, size_t, uint8_t, uint16_t, uint16_t, uint8_t, uint8_t , uint16_t, sli4_cmd_rq_cfg_t *); extern int32_t sli_cmd_reg_rpi(sli4_t *, void *, size_t, uint32_t, uint16_t, uint16_t, ocs_dma_t *, uint8_t, uint8_t); extern int32_t sli_cmd_reg_vfi(sli4_t *, void *, size_t, ocs_domain_t *); extern int32_t sli_cmd_reg_vpi(sli4_t *, void *, size_t, ocs_sli_port_t *, uint8_t); extern int32_t sli_cmd_sli_config(sli4_t *, void *, size_t, uint32_t, ocs_dma_t *); extern int32_t sli_cmd_unreg_fcfi(sli4_t *, void *, size_t, uint16_t); extern int32_t sli_cmd_unreg_rpi(sli4_t *, void *, size_t, uint16_t, sli4_resource_e, uint32_t); extern int32_t sli_cmd_unreg_vfi(sli4_t *, void *, size_t, ocs_domain_t *, uint32_t); extern int32_t sli_cmd_unreg_vpi(sli4_t *, void *, size_t, uint16_t, uint32_t); extern int32_t sli_cmd_common_nop(sli4_t *, void *, size_t, uint64_t); extern int32_t sli_cmd_common_get_resource_extent_info(sli4_t *, void *, size_t, uint16_t); extern int32_t sli_cmd_common_get_sli4_parameters(sli4_t *, void *, size_t); extern int32_t sli_cmd_common_write_object(sli4_t *, void *, size_t, uint16_t, uint16_t, uint32_t, uint32_t, char *, ocs_dma_t *); extern int32_t sli_cmd_common_delete_object(sli4_t *, void *, size_t, char *); extern int32_t sli_cmd_common_read_object(sli4_t *, void *, size_t, uint32_t, uint32_t, char *, ocs_dma_t *); extern int32_t sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *cmd, ocs_dma_t *resp); extern int32_t sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size, uint8_t query, uint8_t is_buffer_list, ocs_dma_t *buffer, uint8_t fdb); extern int32_t sli_cmd_common_set_features(sli4_t *, void *, size_t, uint32_t, uint32_t, void*); extern int32_t sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size, uint32_t start_profile_index, ocs_dma_t *dma); extern int32_t sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size); extern int32_t sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size, uint32_t fd, uint32_t active_profile_id); extern int32_t sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma); extern int32_t sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint32_t fd, uint32_t active_link_config_id); extern int32_t sli_cmd_common_get_function_config(sli4_t *sli4, void *buf, size_t size); extern int32_t sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma); extern int32_t sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint8_t profile_id, uint32_t descriptor_count, uint8_t isap); extern int32_t sli_cqe_mq(void *); extern int32_t sli_cqe_async(sli4_t *, void *); extern int32_t sli_setup(sli4_t *, ocs_os_handle_t, sli4_port_type_e); extern void sli_calc_max_qentries(sli4_t *sli4); extern int32_t sli_init(sli4_t *); extern int32_t sli_reset(sli4_t *); extern int32_t sli_fw_reset(sli4_t *); extern int32_t sli_teardown(sli4_t *); extern int32_t sli_callback(sli4_t *, sli4_callback_e, void *, void *); extern int32_t sli_bmbx_command(sli4_t *); extern int32_t __sli_queue_init(sli4_t *, sli4_queue_t *, uint32_t, size_t, uint32_t, uint32_t); extern int32_t __sli_create_queue(sli4_t *, sli4_queue_t *); extern int32_t sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult); extern int32_t sli_queue_alloc(sli4_t *, uint32_t, sli4_queue_t *, uint32_t, sli4_queue_t *, uint16_t); extern int32_t sli_cq_alloc_set(sli4_t *, sli4_queue_t *qs[], uint32_t, uint32_t, sli4_queue_t *eqs[]); extern int32_t sli_get_queue_entry_size(sli4_t *, uint32_t); extern int32_t sli_queue_free(sli4_t *, sli4_queue_t *, uint32_t, uint32_t); extern int32_t sli_queue_reset(sli4_t *, sli4_queue_t *); extern int32_t sli_queue_is_empty(sli4_t *, sli4_queue_t *); extern int32_t sli_queue_eq_arm(sli4_t *, sli4_queue_t *, uint8_t); extern int32_t sli_queue_arm(sli4_t *, sli4_queue_t *, uint8_t); extern int32_t _sli_queue_write(sli4_t *, sli4_queue_t *, uint8_t *); extern int32_t sli_queue_write(sli4_t *, sli4_queue_t *, uint8_t *); extern int32_t sli_queue_read(sli4_t *, sli4_queue_t *, uint8_t *); extern int32_t sli_queue_index(sli4_t *, sli4_queue_t *); extern int32_t _sli_queue_poke(sli4_t *, sli4_queue_t *, uint32_t, uint8_t *); extern int32_t sli_queue_poke(sli4_t *, sli4_queue_t *, uint32_t, uint8_t *); extern int32_t sli_resource_alloc(sli4_t *, sli4_resource_e, uint32_t *, uint32_t *); extern int32_t sli_resource_free(sli4_t *, sli4_resource_e, uint32_t); extern int32_t sli_resource_reset(sli4_t *, sli4_resource_e); extern int32_t sli_eq_parse(sli4_t *, uint8_t *, uint16_t *); extern int32_t sli_cq_parse(sli4_t *, sli4_queue_t *, uint8_t *, sli4_qentry_e *, uint16_t *); extern int32_t sli_raise_ue(sli4_t *, uint8_t); extern int32_t sli_dump_is_ready(sli4_t *); extern int32_t sli_dump_is_present(sli4_t *); extern int32_t sli_reset_required(sli4_t *); extern int32_t sli_fw_error_status(sli4_t *); extern int32_t sli_fw_ready(sli4_t *); extern uint32_t sli_reg_read(sli4_t *, sli4_regname_e); extern void sli_reg_write(sli4_t *, sli4_regname_e, uint32_t); extern int32_t sli_link_is_configurable(sli4_t *); #include "ocs_fcp.h" /** * @brief Maximum value for a FCFI * * Note that although most commands provide a 16 bit field for the FCFI, * the FC/FCoE Asynchronous Recived CQE format only provides 6 bits for * the returned FCFI. Then effectively, the FCFI cannot be larger than * 1 << 6 or 64. */ #define SLI4_MAX_FCFI 64 /** * @brief Maximum value for FCF index * * The SLI-4 specification uses a 16 bit field in most places for the FCF * index, but practically, this value will be much smaller. Arbitrarily * limit the max FCF index to match the max FCFI value. */ #define SLI4_MAX_FCF_INDEX SLI4_MAX_FCFI /************************************************************************* * SLI-4 FC/FCoE mailbox command formats and definitions. */ /** * FC/FCoE opcode (OPC) values. */ #define SLI4_OPC_FCOE_WQ_CREATE 0x1 #define SLI4_OPC_FCOE_WQ_DESTROY 0x2 #define SLI4_OPC_FCOE_POST_SGL_PAGES 0x3 #define SLI4_OPC_FCOE_RQ_CREATE 0x5 #define SLI4_OPC_FCOE_RQ_DESTROY 0x6 #define SLI4_OPC_FCOE_READ_FCF_TABLE 0x8 #define SLI4_OPC_FCOE_POST_HDR_TEMPLATES 0xb #define SLI4_OPC_FCOE_REDISCOVER_FCF 0x10 /* Use the default CQ associated with the WQ */ #define SLI4_CQ_DEFAULT 0xffff typedef struct sli4_physical_page_descriptor_s { uint32_t low; uint32_t high; } sli4_physical_page_descriptor_t; /** * @brief FCOE_WQ_CREATE * * Create a Work Queue for FC/FCoE use. */ #define SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES 4 typedef struct sli4_req_fcoe_wq_create_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:8, dua:1, :7, cq_id:16; sli4_physical_page_descriptor_t page_physical_address[SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES]; uint32_t bqu:1, :7, ulp:8, :16; #else #error big endian version not defined #endif } sli4_req_fcoe_wq_create_t; /** * @brief FCOE_WQ_CREATE_V1 * * Create a version 1 Work Queue for FC/FCoE use. */ typedef struct sli4_req_fcoe_wq_create_v1_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, cq_id:16; uint32_t page_size:8, wqe_size:4, :4, wqe_count:16; uint32_t rsvd6; sli4_physical_page_descriptor_t page_physical_address[8]; #else #error big endian version not defined #endif } sli4_req_fcoe_wq_create_v1_t; #define SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES 8 /** * @brief FCOE_WQ_DESTROY * * Destroy an FC/FCoE Work Queue. */ typedef struct sli4_req_fcoe_wq_destroy_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t wq_id:16, :16; #else #error big endian version not defined #endif } sli4_req_fcoe_wq_destroy_t; /** * @brief FCOE_POST_SGL_PAGES * * Register the scatter gather list (SGL) memory and associate it with an XRI. */ typedef struct sli4_req_fcoe_post_sgl_pages_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t xri_start:16, xri_count:16; struct { uint32_t page0_low; uint32_t page0_high; uint32_t page1_low; uint32_t page1_high; } page_set[10]; #else #error big endian version not defined #endif } sli4_req_fcoe_post_sgl_pages_t; /** * @brief FCOE_RQ_CREATE * * Create a Receive Queue for FC/FCoE use. */ typedef struct sli4_req_fcoe_rq_create_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, dua:1, bqu:1, :6, ulp:8; uint32_t :16, rqe_count:4, :12; uint32_t rsvd6; uint32_t buffer_size:16, cq_id:16; uint32_t rsvd8; sli4_physical_page_descriptor_t page_physical_address[8]; #else #error big endian version not defined #endif } sli4_req_fcoe_rq_create_t; #define SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES 8 #define SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE 128 #define SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE 2048 /** * @brief FCOE_RQ_CREATE_V1 * * Create a version 1 Receive Queue for FC/FCoE use. */ typedef struct sli4_req_fcoe_rq_create_v1_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, :13, dim:1, dfd:1, dnb:1; uint32_t page_size:8, rqe_size:4, :4, rqe_count:16; uint32_t rsvd6; uint32_t :16, cq_id:16; uint32_t buffer_size; sli4_physical_page_descriptor_t page_physical_address[8]; #else #error big endian version not defined #endif } sli4_req_fcoe_rq_create_v1_t; /** * @brief FCOE_RQ_CREATE_V2 * * Create a version 2 Receive Queue for FC/FCoE use. */ typedef struct sli4_req_fcoe_rq_create_v2_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t num_pages:16, rq_count:8, :5, dim:1, dfd:1, dnb:1; uint32_t page_size:8, rqe_size:4, :4, rqe_count:16; uint32_t hdr_buffer_size:16, payload_buffer_size:16; uint32_t base_cq_id:16, :16; uint32_t rsvd; sli4_physical_page_descriptor_t page_physical_address[0]; #else #error big endian version not defined #endif } sli4_req_fcoe_rq_create_v2_t; #define SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES 8 #define SLI4_FCOE_RQ_CREATE_V1_MIN_BUF_SIZE 64 #define SLI4_FCOE_RQ_CREATE_V1_MAX_BUF_SIZE 2048 #define SLI4_FCOE_RQE_SIZE_8 0x2 #define SLI4_FCOE_RQE_SIZE_16 0x3 #define SLI4_FCOE_RQE_SIZE_32 0x4 #define SLI4_FCOE_RQE_SIZE_64 0x5 #define SLI4_FCOE_RQE_SIZE_128 0x6 #define SLI4_FCOE_RQ_PAGE_SIZE_4096 0x1 #define SLI4_FCOE_RQ_PAGE_SIZE_8192 0x2 #define SLI4_FCOE_RQ_PAGE_SIZE_16384 0x4 #define SLI4_FCOE_RQ_PAGE_SIZE_32768 0x8 #define SLI4_FCOE_RQ_PAGE_SIZE_64536 0x10 #define SLI4_FCOE_RQE_SIZE 8 /** * @brief FCOE_RQ_DESTROY * * Destroy an FC/FCoE Receive Queue. */ typedef struct sli4_req_fcoe_rq_destroy_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rq_id:16, :16; #else #error big endian version not defined #endif } sli4_req_fcoe_rq_destroy_t; /** * @brief FCOE_READ_FCF_TABLE * * Retrieve a FCF database (also known as a table) entry created by the SLI Port * during FIP discovery. */ typedef struct sli4_req_fcoe_read_fcf_table_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fcf_index:16, :16; #else #error big endian version not defined #endif } sli4_req_fcoe_read_fcf_table_t; /* A FCF index of -1 on the request means return the first valid entry */ #define SLI4_FCOE_FCF_TABLE_FIRST (UINT16_MAX) /** * @brief FCF table entry * * This is the information returned by the FCOE_READ_FCF_TABLE command. */ typedef struct sli4_fcf_entry_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t max_receive_size; uint32_t fip_keep_alive; uint32_t fip_priority; uint8_t fcf_mac_address[6]; uint8_t fcf_available; uint8_t mac_address_provider; uint8_t fabric_name_id[8]; uint8_t fc_map[3]; uint8_t val:1, fc:1, :5, sol:1; uint32_t fcf_index:16, fcf_state:16; uint8_t vlan_bitmap[512]; uint8_t switch_name[8]; #else #error big endian version not defined #endif } sli4_fcf_entry_t; /** * @brief FCOE_READ_FCF_TABLE response. */ typedef struct sli4_res_fcoe_read_fcf_table_s { sli4_res_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t event_tag; uint32_t next_index:16, :16; sli4_fcf_entry_t fcf_entry; #else #error big endian version not defined #endif } sli4_res_fcoe_read_fcf_table_t; /* A next FCF index of -1 in the response means this is the last valid entry */ #define SLI4_FCOE_FCF_TABLE_LAST (UINT16_MAX) /** * @brief FCOE_POST_HDR_TEMPLATES */ typedef struct sli4_req_fcoe_post_hdr_templates_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rpi_offset:16, page_count:16; sli4_physical_page_descriptor_t page_descriptor[0]; #else #error big endian version not defined #endif } sli4_req_fcoe_post_hdr_templates_t; #define SLI4_FCOE_HDR_TEMPLATE_SIZE 64 /** * @brief FCOE_REDISCOVER_FCF */ typedef struct sli4_req_fcoe_rediscover_fcf_s { sli4_req_hdr_t hdr; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fcf_count:16, :16; uint32_t rsvd5; uint16_t fcf_index[16]; #else #error big endian version not defined #endif } sli4_req_fcoe_rediscover_fcf_t; /** * Work Queue Entry (WQE) types. */ #define SLI4_WQE_ABORT 0x0f #define SLI4_WQE_ELS_REQUEST64 0x8a #define SLI4_WQE_FCP_IBIDIR64 0xac #define SLI4_WQE_FCP_IREAD64 0x9a #define SLI4_WQE_FCP_IWRITE64 0x98 #define SLI4_WQE_FCP_ICMND64 0x9c #define SLI4_WQE_FCP_TRECEIVE64 0xa1 #define SLI4_WQE_FCP_CONT_TRECEIVE64 0xe5 #define SLI4_WQE_FCP_TRSP64 0xa3 #define SLI4_WQE_FCP_TSEND64 0x9f #define SLI4_WQE_GEN_REQUEST64 0xc2 #define SLI4_WQE_SEND_FRAME 0xe1 #define SLI4_WQE_XMIT_BCAST64 0X84 #define SLI4_WQE_XMIT_BLS_RSP 0x97 #define SLI4_WQE_ELS_RSP64 0x95 #define SLI4_WQE_XMIT_SEQUENCE64 0x82 #define SLI4_WQE_REQUEUE_XRI 0x93 /** * WQE command types. */ #define SLI4_CMD_FCP_IREAD64_WQE 0x00 #define SLI4_CMD_FCP_ICMND64_WQE 0x00 #define SLI4_CMD_FCP_IWRITE64_WQE 0x01 #define SLI4_CMD_FCP_TRECEIVE64_WQE 0x02 #define SLI4_CMD_FCP_TRSP64_WQE 0x03 #define SLI4_CMD_FCP_TSEND64_WQE 0x07 #define SLI4_CMD_GEN_REQUEST64_WQE 0x08 #define SLI4_CMD_XMIT_BCAST64_WQE 0x08 #define SLI4_CMD_XMIT_BLS_RSP64_WQE 0x08 #define SLI4_CMD_ABORT_WQE 0x08 #define SLI4_CMD_XMIT_SEQUENCE64_WQE 0x08 #define SLI4_CMD_REQUEUE_XRI_WQE 0x0A #define SLI4_CMD_SEND_FRAME_WQE 0x0a #define SLI4_WQE_SIZE 0x05 #define SLI4_WQE_EXT_SIZE 0x06 #define SLI4_WQE_BYTES (16 * sizeof(uint32_t)) #define SLI4_WQE_EXT_BYTES (32 * sizeof(uint32_t)) /* Mask for ccp (CS_CTL) */ #define SLI4_MASK_CCP 0xfe /* Upper 7 bits of CS_CTL is priority */ /** * @brief Generic WQE */ typedef struct sli4_generic_wqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t cmd_spec0_5[6]; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; #else #error big endian version not defined #endif } sli4_generic_wqe_t; /** * @brief WQE used to abort exchanges. */ typedef struct sli4_abort_wqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t rsvd0; uint32_t rsvd1; uint32_t ext_t_tag; uint32_t ia:1, ir:1, :6, criteria:8, :16; uint32_t ext_t_mask; uint32_t t_mask; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t t_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, :1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; #else #error big endian version not defined #endif } sli4_abort_wqe_t; #define SLI4_ABORT_CRITERIA_XRI_TAG 0x01 #define SLI4_ABORT_CRITERIA_ABORT_TAG 0x02 #define SLI4_ABORT_CRITERIA_REQUEST_TAG 0x03 #define SLI4_ABORT_CRITERIA_EXT_ABORT_TAG 0x04 typedef enum { SLI_ABORT_XRI, SLI_ABORT_ABORT_ID, SLI_ABORT_REQUEST_ID, SLI_ABORT_MAX, /* must be last */ } sli4_abort_type_e; /** * @brief WQE used to create an ELS request. */ typedef struct sli4_els_request64_wqe_s { sli4_bde_t els_request_payload; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t els_request_payload_length; uint32_t sid:24, sp:1, :7; uint32_t remote_id:24, :8; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, ar:1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, temporary_rpi:16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, els_id:3, wqec:1, :8, cq_id:16; sli4_bde_t els_response_payload_bde; uint32_t max_response_payload_length; #else #error big endian version not defined #endif } sli4_els_request64_wqe_t; #define SLI4_ELS_REQUEST64_CONTEXT_RPI 0x0 #define SLI4_ELS_REQUEST64_CONTEXT_VPI 0x1 #define SLI4_ELS_REQUEST64_CONTEXT_VFI 0x2 #define SLI4_ELS_REQUEST64_CONTEXT_FCFI 0x3 #define SLI4_ELS_REQUEST64_CLASS_2 0x1 #define SLI4_ELS_REQUEST64_CLASS_3 0x2 #define SLI4_ELS_REQUEST64_DIR_WRITE 0x0 #define SLI4_ELS_REQUEST64_DIR_READ 0x1 #define SLI4_ELS_REQUEST64_OTHER 0x0 #define SLI4_ELS_REQUEST64_LOGO 0x1 #define SLI4_ELS_REQUEST64_FDISC 0x2 #define SLI4_ELS_REQUEST64_FLOGIN 0x3 #define SLI4_ELS_REQUEST64_PLOGI 0x4 #define SLI4_ELS_REQUEST64_CMD_GEN 0x08 #define SLI4_ELS_REQUEST64_CMD_NON_FABRIC 0x0c #define SLI4_ELS_REQUEST64_CMD_FABRIC 0x0d /** * @brief WQE used to create an FCP initiator no data command. */ typedef struct sli4_fcp_icmnd64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t payload_offset_length:16, fcp_cmd_buffer_length:16; uint32_t rsvd4; uint32_t remote_n_port_id:24, :8; uint32_t xri_tag:16, context_tag:16; uint32_t dif:2, ct:2, bs:3, :1, command:8, class:3, :1, pu:2, erp:1, lnk:1, timer:8; uint32_t abort_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t rsvd12; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_fcp_icmnd64_wqe_t; /** * @brief WQE used to create an FCP initiator read. */ typedef struct sli4_fcp_iread64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t payload_offset_length:16, fcp_cmd_buffer_length:16; uint32_t total_transfer_length; uint32_t remote_n_port_id:24, :8; uint32_t xri_tag:16, context_tag:16; uint32_t dif:2, ct:2, bs:3, :1, command:8, class:3, :1, pu:2, erp:1, lnk:1, timer:8; uint32_t abort_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t rsvd12; #else #error big endian version not defined #endif sli4_bde_t first_data_bde; /* reserved if performance hints disabled */ } sli4_fcp_iread64_wqe_t; /** * @brief WQE used to create an FCP initiator write. */ typedef struct sli4_fcp_iwrite64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t payload_offset_length:16, fcp_cmd_buffer_length:16; uint32_t total_transfer_length; uint32_t initial_transfer_length; uint32_t xri_tag:16, context_tag:16; uint32_t dif:2, ct:2, bs:3, :1, command:8, class:3, :1, pu:2, erp:1, lnk:1, timer:8; uint32_t abort_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t remote_n_port_id:24, :8; #else #error big endian version not defined #endif sli4_bde_t first_data_bde; } sli4_fcp_iwrite64_wqe_t; typedef struct sli4_fcp_128byte_wqe_s { uint32_t dw[32]; } sli4_fcp_128byte_wqe_t; /** * @brief WQE used to create an FCP target receive, and FCP target * receive continue. */ typedef struct sli4_fcp_treceive64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t payload_offset_length; uint32_t relative_offset; /** * DWord 5 can either be the task retry identifier (HLM=0) or * the remote N_Port ID (HLM=1), or if implementing the Skyhawk * T10-PI workaround, the secondary xri tag */ union { uint32_t sec_xri_tag:16, :16; uint32_t dword; } dword5; uint32_t xri_tag:16, context_tag:16; uint32_t dif:2, ct:2, bs:3, :1, command:8, class:3, ar:1, pu:2, conf:1, lnk:1, timer:8; uint32_t abort_tag; uint32_t request_tag:16, remote_xid:16; uint32_t ebde_cnt:4, :1, app_id_valid:1, :1, len_loc:2, qosd:1, wchn:1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, sr:1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t fcp_data_receive_length; #else #error big endian version not defined #endif sli4_bde_t first_data_bde; /* For performance hints */ } sli4_fcp_treceive64_wqe_t; /** * @brief WQE used to create an FCP target response. */ typedef struct sli4_fcp_trsp64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t fcp_response_length; uint32_t rsvd4; /** * DWord 5 can either be the task retry identifier (HLM=0) or * the remote N_Port ID (HLM=1) */ uint32_t dword5; uint32_t xri_tag:16, rpi:16; uint32_t :2, ct:2, dnrx:1, :3, command:8, class:3, ag:1, pu:2, conf:1, lnk:1, timer:8; uint32_t abort_tag; uint32_t request_tag:16, remote_xid:16; uint32_t ebde_cnt:4, :1, app_id_valid:1, :1, len_loc:2, qosd:1, wchn:1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, sr:1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t rsvd12; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_fcp_trsp64_wqe_t; /** * @brief WQE used to create an FCP target send (DATA IN). */ typedef struct sli4_fcp_tsend64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t payload_offset_length; uint32_t relative_offset; /** * DWord 5 can either be the task retry identifier (HLM=0) or * the remote N_Port ID (HLM=1) */ uint32_t dword5; uint32_t xri_tag:16, rpi:16; uint32_t dif:2, ct:2, bs:3, :1, command:8, class:3, ar:1, pu:2, conf:1, lnk:1, timer:8; uint32_t abort_tag; uint32_t request_tag:16, remote_xid:16; uint32_t ebde_cnt:4, :1, app_id_valid:1, :1, len_loc:2, qosd:1, wchn:1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, sr:1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t fcp_data_transmit_length; #else #error big endian version not defined #endif sli4_bde_t first_data_bde; /* For performance hints */ } sli4_fcp_tsend64_wqe_t; #define SLI4_IO_CONTINUATION BIT(0) /** The XRI associated with this IO is already active */ #define SLI4_IO_AUTO_GOOD_RESPONSE BIT(1) /** Automatically generate a good RSP frame */ #define SLI4_IO_NO_ABORT BIT(2) #define SLI4_IO_DNRX BIT(3) /** Set the DNRX bit because no auto xref rdy buffer is posted */ /* WQE DIF field contents */ #define SLI4_DIF_DISABLED 0 #define SLI4_DIF_PASS_THROUGH 1 #define SLI4_DIF_STRIP 2 #define SLI4_DIF_INSERT 3 /** * @brief WQE used to create a general request. */ typedef struct sli4_gen_request64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t request_payload_length; uint32_t relative_offset; uint32_t :8, df_ctl:8, type:8, r_ctl:8; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t remote_n_port_id:24, :8; uint32_t rsvd13; uint32_t rsvd14; uint32_t max_response_payload_length; #else #error big endian version not defined #endif } sli4_gen_request64_wqe_t; /** * @brief WQE used to create a send frame request. */ typedef struct sli4_send_frame_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t frame_length; uint32_t fc_header_0_1[2]; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, eof:8, sof:8; uint32_t ebde_cnt:4, :3, lenloc:2, qosd:1, wchn:1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t fc_header_2_5[4]; #else #error big endian version not defined #endif } sli4_send_frame_wqe_t; /** * @brief WQE used to create a transmit sequence. */ typedef struct sli4_xmit_sequence64_wqe_s { sli4_bde_t bde; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t remote_n_port_id:24, :8; uint32_t relative_offset; uint32_t :2, si:1, ft:1, :2, xo:1, ls:1, df_ctl:8, type:8, r_ctl:8; uint32_t xri_tag:16, context_tag:16; uint32_t dif:2, ct:2, bs:3, :1, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, remote_xid:16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, sr:1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t sequence_payload_len; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_xmit_sequence64_wqe_t; /** * @brief WQE used unblock the specified XRI and to release it to the SLI Port's free pool. */ typedef struct sli4_requeue_xri_wqe_s { uint32_t rsvd0; uint32_t rsvd1; uint32_t rsvd2; uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t rsvd8; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, wchn:1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t rsvd12; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_requeue_xri_wqe_t; /** * @brief WQE used to send a single frame sequence to broadcast address */ typedef struct sli4_xmit_bcast64_wqe_s { sli4_bde_t sequence_payload; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t sequence_payload_length; uint32_t rsvd4; uint32_t :8, df_ctl:8, type:8, r_ctl:8; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, temporary_rpi:16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t rsvd12; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_xmit_bcast64_wqe_t; /** * @brief WQE used to create a BLS response. */ typedef struct sli4_xmit_bls_rsp_wqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t payload_word0; uint32_t rx_id:16, ox_id:16; uint32_t high_seq_cnt:16, low_seq_cnt:16; uint32_t rsvd3; uint32_t local_n_port_id:24, :8; uint32_t remote_id:24, :6, ar:1, xo:1; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, :16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t temporary_rpi:16, :16; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_xmit_bls_rsp_wqe_t; typedef enum { SLI_BLS_ACC, SLI_BLS_RJT, SLI_BLS_MAX } sli_bls_type_e; typedef struct sli_bls_payload_s { sli_bls_type_e type; uint16_t ox_id; uint16_t rx_id; union { struct { uint32_t seq_id_validity:8, seq_id_last:8, :16; uint16_t ox_id; uint16_t rx_id; uint16_t low_seq_cnt; uint16_t high_seq_cnt; } acc; struct { uint32_t vendor_unique:8, reason_explanation:8, reason_code:8, :8; } rjt; } u; } sli_bls_payload_t; /** * @brief WQE used to create an ELS response. */ typedef struct sli4_xmit_els_rsp64_wqe_s { sli4_bde_t els_response_payload; #if BYTE_ORDER == LITTLE_ENDIAN uint32_t els_response_payload_length; uint32_t s_id:24, sp:1, :7; uint32_t remote_id:24, :8; uint32_t xri_tag:16, context_tag:16; uint32_t :2, ct:2, :4, command:8, class:3, :1, pu:2, :2, timer:8; uint32_t abort_tag; uint32_t request_tag:16, ox_id:16; uint32_t ebde_cnt:4, :3, len_loc:2, qosd:1, :1, xbl:1, hlm:1, iod:1, dbde:1, wqes:1, pri:3, pv:1, eat:1, xc:1, :1, ccpe:1, ccp:8; uint32_t cmd_type:4, :3, wqec:1, :8, cq_id:16; uint32_t temporary_rpi:16, :16; uint32_t rsvd13; uint32_t rsvd14; uint32_t rsvd15; #else #error big endian version not defined #endif } sli4_xmit_els_rsp64_wqe_t; /** * @brief Asynchronouse Event: Link State ACQE. */ typedef struct sli4_link_state_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t link_number:6, link_type:2, port_link_status:8, port_duplex:8, port_speed:8; uint32_t port_fault:8, :8, logical_link_speed:16; uint32_t event_tag; uint32_t :8, event_code:8, event_type:8, /** values are protocol specific */ :6, ae:1, /** async event - this is an ACQE */ val:1; /** valid - contents of CQE are valid */ #else #error big endian version not defined #endif } sli4_link_state_t; #define SLI4_LINK_ATTN_TYPE_LINK_UP 0x01 #define SLI4_LINK_ATTN_TYPE_LINK_DOWN 0x02 #define SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA 0x03 #define SLI4_LINK_ATTN_P2P 0x01 #define SLI4_LINK_ATTN_FC_AL 0x02 #define SLI4_LINK_ATTN_INTERNAL_LOOPBACK 0x03 #define SLI4_LINK_ATTN_SERDES_LOOPBACK 0x04 #define SLI4_LINK_ATTN_1G 0x01 #define SLI4_LINK_ATTN_2G 0x02 #define SLI4_LINK_ATTN_4G 0x04 #define SLI4_LINK_ATTN_8G 0x08 #define SLI4_LINK_ATTN_10G 0x0a #define SLI4_LINK_ATTN_16G 0x10 #define SLI4_LINK_TYPE_ETHERNET 0x0 #define SLI4_LINK_TYPE_FC 0x1 /** * @brief Asynchronouse Event: FC Link Attention Event. */ typedef struct sli4_link_attention_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t link_number:8, attn_type:8, topology:8, port_speed:8; uint32_t port_fault:8, shared_link_status:8, logical_link_speed:16; uint32_t event_tag; uint32_t :8, event_code:8, event_type:8, /** values are protocol specific */ :6, ae:1, /** async event - this is an ACQE */ val:1; /** valid - contents of CQE are valid */ #else #error big endian version not defined #endif } sli4_link_attention_t; /** * @brief FC/FCoE event types. */ #define SLI4_LINK_STATE_PHYSICAL 0x00 #define SLI4_LINK_STATE_LOGICAL 0x01 #define SLI4_FCOE_FIP_FCF_DISCOVERED 0x01 #define SLI4_FCOE_FIP_FCF_TABLE_FULL 0x02 #define SLI4_FCOE_FIP_FCF_DEAD 0x03 #define SLI4_FCOE_FIP_FCF_CLEAR_VLINK 0x04 #define SLI4_FCOE_FIP_FCF_MODIFIED 0x05 #define SLI4_GRP5_QOS_SPEED 0x01 #define SLI4_FC_EVENT_LINK_ATTENTION 0x01 #define SLI4_FC_EVENT_SHARED_LINK_ATTENTION 0x02 #define SLI4_PORT_SPEED_NO_LINK 0x0 #define SLI4_PORT_SPEED_10_MBPS 0x1 #define SLI4_PORT_SPEED_100_MBPS 0x2 #define SLI4_PORT_SPEED_1_GBPS 0x3 #define SLI4_PORT_SPEED_10_GBPS 0x4 #define SLI4_PORT_DUPLEX_NONE 0x0 #define SLI4_PORT_DUPLEX_HWF 0x1 #define SLI4_PORT_DUPLEX_FULL 0x2 #define SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN 0x0 #define SLI4_PORT_LINK_STATUS_PHYSICAL_UP 0x1 #define SLI4_PORT_LINK_STATUS_LOGICAL_DOWN 0x2 #define SLI4_PORT_LINK_STATUS_LOGICAL_UP 0x3 /** * @brief Asynchronouse Event: FCoE/FIP ACQE. */ typedef struct sli4_fcoe_fip_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t event_information; uint32_t fcf_count:16, fcoe_event_type:16; uint32_t event_tag; uint32_t :8, event_code:8, event_type:8, /** values are protocol specific */ :6, ae:1, /** async event - this is an ACQE */ val:1; /** valid - contents of CQE are valid */ #else #error big endian version not defined #endif } sli4_fcoe_fip_t; /** * @brief FC/FCoE WQ completion queue entry. */ typedef struct sli4_fc_wcqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t hw_status:8, status:8, request_tag:16; uint32_t wqe_specific_1; uint32_t wqe_specific_2; uint32_t :15, qx:1, code:8, pri:3, pv:1, xb:1, :2, vld:1; #else #error big endian version not defined #endif } sli4_fc_wcqe_t; /** * @brief FC/FCoE WQ consumed CQ queue entry. */ typedef struct sli4_fc_wqec_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :32; uint32_t :32; uint32_t wqe_index:16, wq_id:16; uint32_t :16, code:8, :7, vld:1; #else #error big endian version not defined #endif } sli4_fc_wqec_t; /** * @brief FC/FCoE Completion Status Codes. */ #define SLI4_FC_WCQE_STATUS_SUCCESS 0x00 #define SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE 0x01 #define SLI4_FC_WCQE_STATUS_REMOTE_STOP 0x02 #define SLI4_FC_WCQE_STATUS_LOCAL_REJECT 0x03 #define SLI4_FC_WCQE_STATUS_NPORT_RJT 0x04 #define SLI4_FC_WCQE_STATUS_FABRIC_RJT 0x05 #define SLI4_FC_WCQE_STATUS_NPORT_BSY 0x06 #define SLI4_FC_WCQE_STATUS_FABRIC_BSY 0x07 #define SLI4_FC_WCQE_STATUS_LS_RJT 0x09 #define SLI4_FC_WCQE_STATUS_CMD_REJECT 0x0b #define SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK 0x0c #define SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 #define SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED 0x12 #define SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC 0x13 #define SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE 0x14 #define SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE 0x15 #define SLI4_FC_WCQE_STATUS_DI_ERROR 0x16 #define SLI4_FC_WCQE_STATUS_BA_RJT 0x17 #define SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED 0x18 #define SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC 0x19 #define SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT 0x1a #define SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST 0x1b /* driver generated status codes; better not overlap with chip's status codes! */ #define SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT 0xff #define SLI4_FC_WCQE_STATUS_SHUTDOWN 0xfe #define SLI4_FC_WCQE_STATUS_DISPATCH_ERROR 0xfd /** * @brief DI_ERROR Extended Status */ #define SLI4_FC_DI_ERROR_GE (1 << 0) /* Guard Error */ #define SLI4_FC_DI_ERROR_AE (1 << 1) /* Application Tag Error */ #define SLI4_FC_DI_ERROR_RE (1 << 2) /* Reference Tag Error */ #define SLI4_FC_DI_ERROR_TDPV (1 << 3) /* Total Data Placed Valid */ #define SLI4_FC_DI_ERROR_UDB (1 << 4) /* Uninitialized DIF Block */ #define SLI4_FC_DI_ERROR_EDIR (1 << 5) /* Error direction */ /** * @brief Local Reject Reason Codes. */ #define SLI4_FC_LOCAL_REJECT_MISSING_CONTINUE 0x01 #define SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT 0x02 #define SLI4_FC_LOCAL_REJECT_INTERNAL_ERROR 0x03 #define SLI4_FC_LOCAL_REJECT_INVALID_RPI 0x04 #define SLI4_FC_LOCAL_REJECT_NO_XRI 0x05 #define SLI4_FC_LOCAL_REJECT_ILLEGAL_COMMAND 0x06 #define SLI4_FC_LOCAL_REJECT_XCHG_DROPPED 0x07 #define SLI4_FC_LOCAL_REJECT_ILLEGAL_FIELD 0x08 #define SLI4_FC_LOCAL_REJECT_NO_ABORT_MATCH 0x0c #define SLI4_FC_LOCAL_REJECT_TX_DMA_FAILED 0x0d #define SLI4_FC_LOCAL_REJECT_RX_DMA_FAILED 0x0e #define SLI4_FC_LOCAL_REJECT_ILLEGAL_FRAME 0x0f #define SLI4_FC_LOCAL_REJECT_NO_RESOURCES 0x11 #define SLI4_FC_LOCAL_REJECT_FCP_CONF_FAILURE 0x12 #define SLI4_FC_LOCAL_REJECT_ILLEGAL_LENGTH 0x13 #define SLI4_FC_LOCAL_REJECT_UNSUPPORTED_FEATURE 0x14 #define SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS 0x15 #define SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED 0x16 #define SLI4_FC_LOCAL_REJECT_RCV_BUFFER_TIMEOUT 0x17 #define SLI4_FC_LOCAL_REJECT_LOOP_OPEN_FAILURE 0x18 #define SLI4_FC_LOCAL_REJECT_LINK_DOWN 0x1a #define SLI4_FC_LOCAL_REJECT_CORRUPTED_DATA 0x1b #define SLI4_FC_LOCAL_REJECT_CORRUPTED_RPI 0x1c #define SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_DATA 0x1d #define SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_ACK 0x1e #define SLI4_FC_LOCAL_REJECT_DUP_FRAME 0x1f #define SLI4_FC_LOCAL_REJECT_LINK_CONTROL_FRAME 0x20 #define SLI4_FC_LOCAL_REJECT_BAD_HOST_ADDRESS 0x21 #define SLI4_FC_LOCAL_REJECT_MISSING_HDR_BUFFER 0x23 #define SLI4_FC_LOCAL_REJECT_MSEQ_CHAIN_CORRUPTED 0x24 #define SLI4_FC_LOCAL_REJECT_ABORTMULT_REQUESTED 0x25 #define SLI4_FC_LOCAL_REJECT_BUFFER_SHORTAGE 0x28 #define SLI4_FC_LOCAL_REJECT_RCV_XRIBUF_WAITING 0x29 #define SLI4_FC_LOCAL_REJECT_INVALID_VPI 0x2e #define SLI4_FC_LOCAL_REJECT_MISSING_XRIBUF 0x30 #define SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET 0x40 #define SLI4_FC_LOCAL_REJECT_MISSING_RELOFFSET 0x41 #define SLI4_FC_LOCAL_REJECT_INSUFF_BUFFERSPACE 0x42 #define SLI4_FC_LOCAL_REJECT_MISSING_SI 0x43 #define SLI4_FC_LOCAL_REJECT_MISSING_ES 0x44 #define SLI4_FC_LOCAL_REJECT_INCOMPLETE_XFER 0x45 #define SLI4_FC_LOCAL_REJECT_SLER_FAILURE 0x46 #define SLI4_FC_LOCAL_REJECT_SLER_CMD_RCV_FAILURE 0x47 #define SLI4_FC_LOCAL_REJECT_SLER_REC_RJT_ERR 0x48 #define SLI4_FC_LOCAL_REJECT_SLER_REC_SRR_RETRY_ERR 0x49 #define SLI4_FC_LOCAL_REJECT_SLER_SRR_RJT_ERR 0x4a #define SLI4_FC_LOCAL_REJECT_SLER_RRQ_RJT_ERR 0x4c #define SLI4_FC_LOCAL_REJECT_SLER_RRQ_RETRY_ERR 0x4d #define SLI4_FC_LOCAL_REJECT_SLER_ABTS_ERR 0x4e typedef struct sli4_fc_async_rcqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :8, status:8, rq_element_index:12, :4; uint32_t rsvd1; uint32_t fcfi:6, rq_id:10, payload_data_placement_length:16; uint32_t sof_byte:8, eof_byte:8, code:8, header_data_placement_length:6, :1, vld:1; #else #error big endian version not defined #endif } sli4_fc_async_rcqe_t; typedef struct sli4_fc_async_rcqe_v1_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :8, status:8, rq_element_index:12, :4; uint32_t fcfi:6, :26; uint32_t rq_id:16, payload_data_placement_length:16; uint32_t sof_byte:8, eof_byte:8, code:8, header_data_placement_length:6, :1, vld:1; #else #error big endian version not defined #endif } sli4_fc_async_rcqe_v1_t; #define SLI4_FC_ASYNC_RQ_SUCCESS 0x10 #define SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED 0x11 #define SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED 0x12 #define SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC 0x13 #define SLI4_FC_ASYNC_RQ_DMA_FAILURE 0x14 typedef struct sli4_fc_coalescing_rcqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :8, status:8, rq_element_index:12, :4; uint32_t rsvd1; uint32_t rq_id:16, sequence_reporting_placement_length:16; uint32_t :16, code:8, :7, vld:1; #else #error big endian version not defined #endif } sli4_fc_coalescing_rcqe_t; #define SLI4_FC_COALESCE_RQ_SUCCESS 0x10 #define SLI4_FC_COALESCE_RQ_INSUFF_XRI_NEEDED 0x18 typedef struct sli4_fc_optimized_write_cmd_cqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :8, status:8, rq_element_index:15, iv:1; uint32_t fcfi:6, :8, oox:1, agxr:1, xri:16; uint32_t rq_id:16, payload_data_placement_length:16; uint32_t rpi:16, code:8, header_data_placement_length:6, :1, vld:1; #else #error big endian version not defined #endif } sli4_fc_optimized_write_cmd_cqe_t; typedef struct sli4_fc_optimized_write_data_cqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t hw_status:8, status:8, xri:16; uint32_t total_data_placed; uint32_t extended_status; uint32_t :16, code:8, pri:3, pv:1, xb:1, rha:1, :1, vld:1; #else #error big endian version not defined #endif } sli4_fc_optimized_write_data_cqe_t; typedef struct sli4_fc_xri_aborted_cqe_s { #if BYTE_ORDER == LITTLE_ENDIAN uint32_t :8, status:8, :16; uint32_t extended_status; uint32_t xri:16, remote_xid:16; uint32_t :16, code:8, xr:1, :3, eo:1, br:1, ia:1, vld:1; #else #error big endian version not defined #endif } sli4_fc_xri_aborted_cqe_t; /** * Code definitions applicable to all FC/FCoE CQE types. */ #define SLI4_CQE_CODE_OFFSET 14 #define SLI4_CQE_CODE_WORK_REQUEST_COMPLETION 0x01 #define SLI4_CQE_CODE_RELEASE_WQE 0x02 #define SLI4_CQE_CODE_RQ_ASYNC 0x04 #define SLI4_CQE_CODE_XRI_ABORTED 0x05 #define SLI4_CQE_CODE_RQ_COALESCING 0x06 #define SLI4_CQE_CODE_RQ_CONSUMPTION 0x07 #define SLI4_CQE_CODE_MEASUREMENT_REPORTING 0x08 #define SLI4_CQE_CODE_RQ_ASYNC_V1 0x09 #define SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD 0x0B #define SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA 0x0C extern int32_t sli_fc_process_link_state(sli4_t *, void *); extern int32_t sli_fc_process_link_attention(sli4_t *, void *); extern int32_t sli_fc_cqe_parse(sli4_t *, sli4_queue_t *, uint8_t *, sli4_qentry_e *, uint16_t *); extern uint32_t sli_fc_response_length(sli4_t *, uint8_t *); extern uint32_t sli_fc_io_length(sli4_t *, uint8_t *); extern int32_t sli_fc_els_did(sli4_t *, uint8_t *, uint32_t *); extern uint32_t sli_fc_ext_status(sli4_t *, uint8_t *); extern int32_t sli_fc_rqe_rqid_and_index(sli4_t *, uint8_t *, uint16_t *, uint32_t *); extern int32_t sli_fc_process_fcoe(sli4_t *, void *); extern int32_t sli_cmd_fcoe_wq_create(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t); extern int32_t sli_cmd_fcoe_wq_create_v1(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t); extern int32_t sli_cmd_fcoe_wq_destroy(sli4_t *, void *, size_t, uint16_t); extern int32_t sli_cmd_fcoe_post_sgl_pages(sli4_t *, void *, size_t, uint16_t, uint32_t, ocs_dma_t **, ocs_dma_t **, ocs_dma_t *); extern int32_t sli_cmd_fcoe_rq_create(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t); extern int32_t sli_cmd_fcoe_rq_create_v1(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t); extern int32_t sli_cmd_fcoe_rq_destroy(sli4_t *, void *, size_t, uint16_t); extern int32_t sli_cmd_fcoe_read_fcf_table(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t); extern int32_t sli_cmd_fcoe_post_hdr_templates(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, ocs_dma_t *); extern int32_t sli_cmd_fcoe_rediscover_fcf(sli4_t *, void *, size_t, uint16_t); extern int32_t sli_fc_rq_alloc(sli4_t *, sli4_queue_t *, uint32_t, uint32_t, sli4_queue_t *, uint16_t, uint8_t); extern int32_t sli_fc_rq_set_alloc(sli4_t *, uint32_t, sli4_queue_t *[], uint32_t, uint32_t, uint32_t, uint32_t, uint16_t); extern uint32_t sli_fc_get_rpi_requirements(sli4_t *, uint32_t); extern int32_t sli_abort_wqe(sli4_t *, void *, size_t, sli4_abort_type_e, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t); extern int32_t sli_els_request64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint8_t, uint32_t, uint32_t, uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *); extern int32_t sli_fcp_iread64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t); extern int32_t sli_fcp_iwrite64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t); extern int32_t sli_fcp_icmnd64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint8_t); extern int32_t sli_fcp_treceive64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint8_t, uint32_t); extern int32_t sli_fcp_trsp64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint32_t); extern int32_t sli_fcp_tsend64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint8_t, uint32_t); extern int32_t sli_fcp_cont_treceive64_wqe(sli4_t *, void*, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint8_t, uint32_t); extern int32_t sli_gen_request64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t,uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t); extern int32_t sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr, ocs_dma_t *payload, uint32_t req_len, uint8_t timeout, uint16_t xri, uint16_t req_tag); extern int32_t sli_xmit_sequence64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t); extern int32_t sli_xmit_bcast64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t); extern int32_t sli_xmit_bls_rsp64_wqe(sli4_t *, void *, size_t, sli_bls_payload_t *, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint32_t); extern int32_t sli_xmit_els_rsp64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint32_t, uint32_t); extern int32_t sli_requeue_xri_wqe(sli4_t *, void *, size_t, uint16_t, uint16_t, uint16_t); extern void sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout); +extern bool sli_persist_topology_enabled(sli4_t *sli4); + /** * @ingroup sli_fc * @brief Retrieve the received header and payload length. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * @param len_hdr Pointer where the header length is written. * @param len_data Pointer where the payload length is written. * * @return Returns 0 on success, or a non-zero value on failure. */ static inline int32_t sli_fc_rqe_length(sli4_t *sli4, void *cqe, uint32_t *len_hdr, uint32_t *len_data) { sli4_fc_async_rcqe_t *rcqe = cqe; *len_hdr = *len_data = 0; if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) { *len_hdr = rcqe->header_data_placement_length; *len_data = rcqe->payload_data_placement_length; return 0; } else { return -1; } } /** * @ingroup sli_fc * @brief Retrieve the received FCFI. * * @param sli4 SLI context. * @param cqe Pointer to the CQ entry. * * @return Returns the FCFI in the CQE. or UINT8_MAX if invalid CQE code. */ static inline uint8_t sli_fc_rqe_fcfi(sli4_t *sli4, void *cqe) { uint8_t code = ((uint8_t*)cqe)[SLI4_CQE_CODE_OFFSET]; uint8_t fcfi = UINT8_MAX; switch(code) { case SLI4_CQE_CODE_RQ_ASYNC: { sli4_fc_async_rcqe_t *rcqe = cqe; fcfi = rcqe->fcfi; break; } case SLI4_CQE_CODE_RQ_ASYNC_V1: { sli4_fc_async_rcqe_v1_t *rcqev1 = cqe; fcfi = rcqev1->fcfi; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: { sli4_fc_optimized_write_cmd_cqe_t *opt_wr = cqe; fcfi = opt_wr->fcfi; break; } } return fcfi; } extern const char *sli_fc_get_status_string(uint32_t status); #endif /* !_SLI4_H */