diff --git a/sys/dev/ocs_fc/ocs_device.c b/sys/dev/ocs_fc/ocs_device.c
index 7f0c5526b1c3..d9c283541d3c 100644
--- a/sys/dev/ocs_fc/ocs_device.c
+++ b/sys/dev/ocs_fc/ocs_device.c
@@ -1,1915 +1,1915 @@
/*-
* Copyright (c) 2017 Broadcom. All rights reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* Implement remote device state machine for target and initiator.
*/
/*!
@defgroup device_sm Node State Machine: Remote Device States
*/
#include "ocs.h"
#include "ocs_device.h"
#include "ocs_fabric.h"
#include "ocs_els.h"
static void *__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
static void *__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
static void *__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
static int32_t ocs_process_abts(ocs_io_t *io, fc_header_t *hdr);
/**
* @ingroup device_sm
* @brief Send response to PRLI.
*
*
Description
* For device nodes, this function sends a PRLI response.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id OX_ID of PRLI
*
* @return Returns None.
*/
void
ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id)
{
ocs_t *ocs = io->ocs;
ocs_node_t *node = io->node;
/* If the back-end doesn't support the fc-type, we send an LS_RJT */
if (ocs->fc_type != node->fc_type) {
node_printf(node, "PRLI rejected by target-server, fc-type not supported\n");
ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
}
/* If the back-end doesn't want to talk to this initiator, we send an LS_RJT */
if (node->sport->enable_tgt && (ocs_scsi_validate_initiator(node) == 0)) {
node_printf(node, "PRLI rejected by target-server\n");
ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL);
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
} else {
/*sm: process PRLI payload, send PRLI acc */
ocs_send_prli_acc(io, ox_id, ocs->fc_type, NULL, NULL);
/* Immediately go to ready state to avoid window where we're
* waiting for the PRLI LS_ACC to complete while holding FCP_CMNDs
*/
ocs_node_transition(node, __ocs_d_device_ready, NULL);
}
}
/**
* @ingroup device_sm
* @brief Device node state machine: Initiate node shutdown
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER: {
int32_t rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
ocs_scsi_io_alloc_disable(node);
/* make necessary delete upcall(s) */
if (node->init && !node->targ) {
ocs_log_debug(node->ocs,
"[%s] delete (initiator) WWPN %s WWNN %s\n",
node->display_name, node->wwpn, node->wwnn);
ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
if (node->sport->enable_tgt) {
rc = ocs_scsi_del_initiator(node,
OCS_SCSI_INITIATOR_DELETED);
}
if (rc == OCS_SCSI_CALL_COMPLETE) {
ocs_node_post_event(node,
OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
}
} else if (node->targ && !node->init) {
ocs_log_debug(node->ocs,
"[%s] delete (target) WWPN %s WWNN %s\n",
node->display_name, node->wwpn, node->wwnn);
ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
if (node->sport->enable_ini) {
rc = ocs_scsi_del_target(node,
OCS_SCSI_TARGET_DELETED);
}
if (rc == OCS_SCSI_CALL_COMPLETE) {
ocs_node_post_event(node,
OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
}
} else if (node->init && node->targ) {
ocs_log_debug(node->ocs,
"[%s] delete (initiator+target) WWPN %s WWNN %s\n",
node->display_name, node->wwpn, node->wwnn);
ocs_node_transition(node, __ocs_d_wait_del_ini_tgt, NULL);
if (node->sport->enable_tgt) {
rc = ocs_scsi_del_initiator(node,
OCS_SCSI_INITIATOR_DELETED);
}
if (rc == OCS_SCSI_CALL_COMPLETE) {
ocs_node_post_event(node,
OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
}
rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
if (node->sport->enable_ini) {
rc = ocs_scsi_del_target(node,
OCS_SCSI_TARGET_DELETED);
}
if (rc == OCS_SCSI_CALL_COMPLETE) {
ocs_node_post_event(node,
OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
}
}
/* we've initiated the upcalls as needed, now kick off the node
* detach to precipitate the aborting of outstanding exchanges
* associated with said node
*
* Beware: if we've made upcall(s), we've already transitioned
* to a new state by the time we execute this.
* TODO: consider doing this before the upcalls...
*/
if (node->attached) {
/* issue hw node free; don't care if succeeds right away
* or sometime later, will check node->attached later in
* shutdown process
*/
rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
if (node->rnode.free_group) {
ocs_remote_node_group_free(node->node_group);
node->node_group = NULL;
node->rnode.free_group = FALSE;
}
if (rc != OCS_HW_RTN_SUCCESS &&
rc != OCS_HW_RTN_SUCCESS_SYNC) {
node_printf(node,
"Failed freeing HW node, rc=%d\n", rc);
}
}
/* if neither initiator nor target, proceed to cleanup */
if (!node->init && !node->targ){
/*
* node has either been detached or is in the process
* of being detached, call common node's initiate
* cleanup function.
*/
ocs_node_initiate_cleanup(node);
}
break;
}
case OCS_EVT_ALL_CHILD_NODES_FREE:
/* Ignore, this can happen if an ELS is aborted,
* while in a delay/retry state */
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Common device event handler.
*
* Description
* For device nodes, this event handler manages default and common events.
*
* @param funcname Function name text.
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
static void *
__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
ocs_node_t *node = NULL;
ocs_t *ocs = NULL;
ocs_assert(ctx, NULL);
node = ctx->app;
ocs_assert(node, NULL);
ocs = node->ocs;
ocs_assert(ocs, NULL);
switch(evt) {
/* Handle shutdown events */
case OCS_EVT_SHUTDOWN:
ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
default:
/* call default event handler common to all nodes */
__ocs_node_common(funcname, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for a domain-attach completion in loop topology.
*
* Description
* State waits for a domain-attached completion while in loop topology.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_DOMAIN_ATTACH_OK: {
/* send PLOGI automatically if initiator */
ocs_node_init_device(node, TRUE);
break;
}
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief state: wait for node resume event
*
* State is entered when a node is in I+T mode and sends a delete initiator/target
* call to the target-server/initiator-client and needs to wait for that work to complete.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg per event optional argument
*
* @return returns NULL
*/
void *
__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
/* Fall through */
case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
case OCS_EVT_ALL_CHILD_NODES_FREE:
/* These are expected events. */
break;
case OCS_EVT_NODE_DEL_INI_COMPLETE:
case OCS_EVT_NODE_DEL_TGT_COMPLETE:
ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_REQ_FAIL:
/* Can happen as ELS IO IO's complete */
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
break;
/* ignore shutdown events as we're already in shutdown path */
case OCS_EVT_SHUTDOWN:
/* have default shutdown event take precedence */
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
/* fall through */
case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
node_printf(node, "%s received\n", ocs_sm_event_name(evt));
break;
case OCS_EVT_DOMAIN_ATTACH_OK:
/* don't care about domain_attach_ok */
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief state: Wait for node resume event.
*
* State is entered when a node sends a delete initiator/target call to the
* target-server/initiator-client and needs to wait for that work to complete.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
/* Fall through */
case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
case OCS_EVT_ALL_CHILD_NODES_FREE:
/* These are expected events. */
break;
case OCS_EVT_NODE_DEL_INI_COMPLETE:
case OCS_EVT_NODE_DEL_TGT_COMPLETE:
/*
* node has either been detached or is in the process of being detached,
* call common node's initiate cleanup function
*/
ocs_node_initiate_cleanup(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_REQ_FAIL:
/* Can happen as ELS IO IO's complete */
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
break;
/* ignore shutdown events as we're already in shutdown path */
case OCS_EVT_SHUTDOWN:
/* have default shutdown event take precedence */
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
/* fall through */
case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
node_printf(node, "%s received\n", ocs_sm_event_name(evt));
break;
case OCS_EVT_DOMAIN_ATTACH_OK:
/* don't care about domain_attach_ok */
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @brief Save the OX_ID for sending LS_ACC sometime later.
*
* Description
* When deferring the response to an ELS request, the OX_ID of the request
* is saved using this function.
*
* @param io Pointer to a SCSI IO object.
* @param hdr Pointer to the FC header.
* @param ls Defines the type of ELS to send: LS_ACC, LS_ACC for PLOGI;
* or LSS_ACC for PRLI.
*
* @return None.
*/
void
ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls)
{
ocs_node_t *node = io->node;
uint16_t ox_id = ocs_be16toh(hdr->ox_id);
ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_NONE);
node->ls_acc_oxid = ox_id;
node->send_ls_acc = ls;
node->ls_acc_io = io;
node->ls_acc_did = fc_be24toh(hdr->d_id);
}
/**
* @brief Process the PRLI payload.
*
* Description
* The PRLI payload is processed; the initiator/target capabilities of the
* remote node are extracted and saved in the node object.
*
* @param node Pointer to the node object.
* @param prli Pointer to the PRLI payload.
*
* @return None.
*/
void
ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli)
{
node->init = (ocs_be16toh(prli->service_params) & FC_PRLI_INITIATOR_FUNCTION) != 0;
node->targ = (ocs_be16toh(prli->service_params) & FC_PRLI_TARGET_FUNCTION) != 0;
node->fcp2device = (ocs_be16toh(prli->service_params) & FC_PRLI_RETRY) != 0;
node->fc_type = prli->type;
}
/**
* @brief Process the ABTS.
*
* Description
* Common code to process a received ABTS. If an active IO can be found
* that matches the OX_ID of the ABTS request, a call is made to the
* backend. Otherwise, a BA_ACC is returned to the initiator.
*
* @param io Pointer to a SCSI IO object.
* @param hdr Pointer to the FC header.
*
* @return Returns 0 on success, or a negative error value on failure.
*/
static int32_t
ocs_process_abts(ocs_io_t *io, fc_header_t *hdr)
{
ocs_node_t *node = io->node;
ocs_t *ocs = node->ocs;
uint16_t ox_id = ocs_be16toh(hdr->ox_id);
uint16_t rx_id = ocs_be16toh(hdr->rx_id);
ocs_io_t *abortio;
abortio = ocs_io_find_tgt_io(ocs, node, ox_id, rx_id);
/* If an IO was found, attempt to take a reference on it */
if (abortio != NULL && (ocs_ref_get_unless_zero(&abortio->ref) != 0)) {
/* Got a reference on the IO. Hold it until backend is notified below */
node_printf(node, "Abort request: ox_id [%04x] rx_id [%04x]\n",
ox_id, rx_id);
/*
* Save the ox_id for the ABTS as the init_task_tag in our manufactured
* TMF IO object
*/
io->display_name = "abts";
io->init_task_tag = ox_id;
/* don't set tgt_task_tag, don't want to confuse with XRI */
/*
* Save the rx_id from the ABTS as it is needed for the BLS response,
* regardless of the IO context's rx_id
*/
io->abort_rx_id = rx_id;
/* Call target server command abort */
io->tmf_cmd = OCS_SCSI_TMF_ABORT_TASK;
ocs_scsi_recv_tmf(io, abortio->tgt_io.lun, OCS_SCSI_TMF_ABORT_TASK, abortio, 0);
/*
* Backend will have taken an additional reference on the IO if needed;
* done with current reference.
*/
ocs_ref_put(&abortio->ref); /* ocs_ref_get(): same function */
} else {
/*
* Either IO was not found or it has been freed between finding it
* and attempting to get the reference,
*/
node_printf(node, "Abort request: ox_id [%04x], IO not found (exists=%d)\n",
ox_id, (abortio != NULL));
/* Send a BA_ACC */
ocs_bls_send_acc_hdr(io, hdr);
}
return 0;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for the PLOGI accept to complete.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_CMPL_FAIL:
ocs_assert(node->els_cmpl_cnt, NULL);
node->els_cmpl_cnt--;
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
case OCS_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
ocs_assert(node->els_cmpl_cnt, NULL);
node->els_cmpl_cnt--;
ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for the LOGO response.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
/* TODO: may want to remove this;
* if we'll want to know about PLOGI */
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_REQ_OK:
case OCS_EVT_SRRS_ELS_REQ_RJT:
case OCS_EVT_SRRS_ELS_REQ_FAIL:
/* LOGO response received, sent shutdown */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_LOGO, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
node_printf(node, "LOGO sent (evt=%s), shutdown node\n", ocs_sm_event_name(evt));
/* sm: post explicit logout */
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
break;
/* TODO: PLOGI: abort LOGO and process PLOGI? (SHUTDOWN_EXPLICIT/IMPLICIT_LOGO?) */
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for the PRLO response.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_REQ_OK:
case OCS_EVT_SRRS_ELS_REQ_RJT:
case OCS_EVT_SRRS_ELS_REQ_FAIL:
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLO, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
node_printf(node, "PRLO sent (evt=%s)\n", ocs_sm_event_name(evt));
ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
break;
default:
__ocs_node_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @brief Initialize device node.
*
* Initialize device node. If a node is an initiator, then send a PLOGI and transition
* to __ocs_d_wait_plogi_rsp, otherwise transition to __ocs_d_init.
*
* @param node Pointer to the node object.
* @param send_plogi Boolean indicating to send PLOGI command or not.
*
* @return none
*/
void
ocs_node_init_device(ocs_node_t *node, int send_plogi)
{
node->send_plogi = send_plogi;
if ((node->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NEW_NODES) && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)) {
node->nodedb_state = __ocs_d_init;
ocs_node_transition(node, __ocs_node_paused, NULL);
} else {
ocs_node_transition(node, __ocs_d_init, NULL);
}
}
/**
* @ingroup device_sm
* @brief Device node state machine: Initial node state for an initiator or a target.
*
* Description
* This state is entered when a node is instantiated, either having been
* discovered from a name services query, or having received a PLOGI/FLOGI.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
* - OCS_EVT_ENTER: (uint8_t *) - 1 to send a PLOGI on
* entry (initiator-only); 0 indicates a PLOGI is
* not sent on entry (initiator-only). Not applicable for a target.
*
* @return Returns NULL.
*/
void *
__ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc;
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
/* check if we need to send PLOGI */
if (node->send_plogi) {
/* only send if we have initiator capability, and domain is attached */
if (node->sport->enable_ini && node->sport->domain->attached) {
ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_plogi_rsp, NULL);
} else {
node_printf(node, "not sending plogi sport.ini=%d, domain attached=%d\n",
node->sport->enable_ini, node->sport->domain->attached);
}
}
break;
case OCS_EVT_PLOGI_RCVD: {
/* T, or I+T */
fc_header_t *hdr = cbdata->header->dma.virt;
uint32_t d_id = fc_be24toh(hdr->d_id);
ocs_node_save_sparms(node, cbdata->payload->dma.virt);
ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
/* domain already attached */
if (node->sport->domain->attached) {
rc = ocs_node_attach(node);
ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
}
break;
}
/* domain not attached; several possibilities: */
switch (node->sport->topology) {
case OCS_SPORT_TOPOLOGY_P2P:
/* we're not attached and sport is p2p, need to attach */
ocs_domain_attach(node->sport->domain, d_id);
ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
break;
case OCS_SPORT_TOPOLOGY_FABRIC:
/* we're not attached and sport is fabric, domain attach should have
* already been requested as part of the fabric state machine, wait for it
*/
ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
break;
case OCS_SPORT_TOPOLOGY_UNKNOWN:
/* Two possibilities:
* 1. received a PLOGI before our FLOGI has completed (possible since
* completion comes in on another CQ), thus we don't know what we're
* connected to yet; transition to a state to wait for the fabric
* node to tell us;
* 2. PLOGI received before link went down and we haven't performed
* domain attach yet.
* Note: we cannot distinguish between 1. and 2. so have to assume PLOGI
* was received after link back up.
*/
node_printf(node, "received PLOGI, with unknown topology did=0x%x\n", d_id);
ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL);
break;
default:
- node_printf(node, "received PLOGI, with unexpectd topology %d\n",
+ node_printf(node, "received PLOGI, with unexpected topology %d\n",
node->sport->topology);
ocs_assert(FALSE, NULL);
break;
}
break;
}
case OCS_EVT_FDISC_RCVD: {
__ocs_d_common(__func__, ctx, evt, arg);
break;
}
case OCS_EVT_FLOGI_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
/* this better be coming from an NPort */
ocs_assert(ocs_rnode_is_nport(cbdata->payload->dma.virt), NULL);
/* sm: save sparams, send FLOGI acc */
ocs_domain_save_sparms(node->sport->domain, cbdata->payload->dma.virt);
/* send FC LS_ACC response, override s_id */
ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
ocs_send_flogi_p2p_acc(cbdata->io, ocs_be16toh(hdr->ox_id), fc_be24toh(hdr->d_id), NULL, NULL);
if (ocs_p2p_setup(node->sport)) {
node_printf(node, "p2p setup failed, shutting down node\n");
ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
} else {
ocs_node_transition(node, __ocs_p2p_wait_flogi_acc_cmpl, NULL);
}
break;
}
case OCS_EVT_LOGO_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
if (!node->sport->domain->attached) {
/* most likely a frame left over from before a link down; drop and
* shut node down w/ "explicit logout" so pending frames are processed */
node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
break;
}
ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
break;
}
case OCS_EVT_PRLI_RCVD:
case OCS_EVT_PRLO_RCVD:
case OCS_EVT_PDISC_RCVD:
case OCS_EVT_ADISC_RCVD:
case OCS_EVT_RSCN_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
if (!node->sport->domain->attached) {
/* most likely a frame left over from before a link down; drop and
* shut node down w/ "explicit logout" so pending frames are processed */
node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
break;
}
node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
NULL, NULL);
break;
}
case OCS_EVT_FCP_CMD_RCVD: {
/* note: problem, we're now expecting an ELS REQ completion
* from both the LOGO and PLOGI */
if (!node->sport->domain->attached) {
/* most likely a frame left over from before a link down; drop and
* shut node down w/ "explicit logout" so pending frames are processed */
node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
break;
}
/* Send LOGO */
node_printf(node, "FCP_CMND received, send LOGO\n");
if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 0, NULL, NULL) == NULL) {
/* failed to send LOGO, go ahead and cleanup node anyways */
node_printf(node, "Failed to send LOGO\n");
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
} else {
/* sent LOGO, wait for response */
ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
}
break;
}
case OCS_EVT_DOMAIN_ATTACH_OK:
/* don't care about domain_attach_ok */
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait on a response for a sent PLOGI.
*
* Description
* State is entered when an initiator-capable node has sent
* a PLOGI and is waiting for a response.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc;
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_PLOGI_RCVD: {
/* T, or I+T */
/* received PLOGI with svc parms, go ahead and attach node
* when PLOGI that was sent ultimately completes, it'll be a no-op
*/
/* TODO: there is an outstanding PLOGI sent, can we set a flag
* to indicate that we don't want to retry it if it times out? */
ocs_node_save_sparms(node, cbdata->payload->dma.virt);
ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
/* sm: domain->attached / ocs_node_attach */
rc = ocs_node_attach(node);
ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
}
break;
}
case OCS_EVT_PRLI_RCVD:
/* I, or I+T */
/* sent PLOGI and before completion was seen, received the
* PRLI from the remote node (WCQEs and RCQEs come in on
* different queues and order of processing cannot be assumed)
* Save OXID so PRLI can be sent after the attach and continue
* to wait for PLOGI response
*/
ocs_process_prli_payload(node, cbdata->payload->dma.virt);
if (ocs->fc_type == node->fc_type) {
ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
ocs_node_transition(node, __ocs_d_wait_plogi_rsp_recvd_prli, NULL);
} else {
/* TODO this need to be looked at. What do we do here ? */
}
break;
/* TODO this need to be looked at. we could very well be logged in */
case OCS_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
case OCS_EVT_PRLO_RCVD:
case OCS_EVT_PDISC_RCVD:
case OCS_EVT_FDISC_RCVD:
case OCS_EVT_ADISC_RCVD:
case OCS_EVT_RSCN_RCVD:
case OCS_EVT_SCR_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
NULL, NULL);
break;
}
case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
/* Completion from PLOGI sent */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
/* sm: save sparams, ocs_node_attach */
ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
((uint8_t*)cbdata->els->els_rsp.virt) + 4);
rc = ocs_node_attach(node);
ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
}
break;
case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
/* PLOGI failed, shutdown the node */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
break;
case OCS_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
break;
case OCS_EVT_FCP_CMD_RCVD: {
/* not logged in yet and outstanding PLOGI so don't send LOGO,
* just drop
*/
node_printf(node, "FCP_CMND received, drop\n");
break;
}
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Waiting on a response for a
* sent PLOGI.
*
* Description
* State is entered when an initiator-capable node has sent
* a PLOGI and is waiting for a response. Before receiving the
* response, a PRLI was received, implying that the PLOGI was
* successful.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc;
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
/*
* Since we've received a PRLI, we have a port login and will
* just need to wait for the PLOGI response to do the node
* attach and then we can send the LS_ACC for the PRLI. If,
* during this time, we receive FCP_CMNDs (which is possible
* since we've already sent a PRLI and our peer may have accepted).
* At this time, we are not waiting on any other unsolicited
* frames to continue with the login process. Thus, it will not
* hurt to hold frames here.
*/
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
/* Completion from PLOGI sent */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
/* sm: save sparams, ocs_node_attach */
ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
((uint8_t*)cbdata->els->els_rsp.virt) + 4);
rc = ocs_node_attach(node);
ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
}
break;
case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
case OCS_EVT_SRRS_ELS_REQ_RJT:
/* PLOGI failed, shutdown the node */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for a domain attach.
*
* Description
* Waits for a domain-attach complete ok event.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_DOMAIN_ATTACH_OK:
ocs_assert(node->sport->domain->attached, NULL);
/* sm: ocs_node_attach */
rc = ocs_node_attach(node);
ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
}
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for topology
* notification
*
* Description
* Waits for topology notification from fabric node, then
* attaches domain and node.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: {
ocs_sport_topology_e topology = (ocs_sport_topology_e)(uintptr_t)arg;
ocs_assert(!node->sport->domain->attached, NULL);
ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
node_printf(node, "topology notification, topology=%d\n", topology);
/* At the time the PLOGI was received, the topology was unknown,
* so we didn't know which node would perform the domain attach:
* 1. The node from which the PLOGI was sent (p2p) or
* 2. The node to which the FLOGI was sent (fabric).
*/
if (topology == OCS_SPORT_TOPOLOGY_P2P) {
/* if this is p2p, need to attach to the domain using the
* d_id from the PLOGI received
*/
ocs_domain_attach(node->sport->domain, node->ls_acc_did);
}
/* else, if this is fabric, the domain attach should be performed
* by the fabric node (node sending FLOGI); just wait for attach
* to complete
*/
ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
break;
}
case OCS_EVT_DOMAIN_ATTACH_OK:
ocs_assert(node->sport->domain->attached, NULL);
node_printf(node, "domain attach ok\n");
/*sm: ocs_node_attach */
rc = ocs_node_attach(node);
ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
}
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for a node attach when found by a remote node.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_NODE_ATTACH_OK:
node->attached = TRUE;
switch (node->send_ls_acc) {
case OCS_NODE_SEND_LS_ACC_PLOGI: {
/* sm: send_plogi_acc is set / send PLOGI acc */
/* Normal case for T, or I+T */
ocs_send_plogi_acc(node->ls_acc_io, node->ls_acc_oxid, NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_plogi_acc_cmpl, NULL);
node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
node->ls_acc_io = NULL;
break;
}
case OCS_NODE_SEND_LS_ACC_PRLI: {
ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
node->ls_acc_io = NULL;
break;
}
case OCS_NODE_SEND_LS_ACC_NONE:
default:
/* Normal case for I */
/* sm: send_plogi_acc is not set / send PLOGI acc */
ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
break;
}
break;
case OCS_EVT_NODE_ATTACH_FAIL:
/* node attach failed, shutdown the node */
node->attached = FALSE;
node_printf(node, "node attach failed\n");
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
/* Handle shutdown events */
case OCS_EVT_SHUTDOWN:
node_printf(node, "%s received\n", ocs_sm_event_name(evt));
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
break;
case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
node_printf(node, "%s received\n", ocs_sm_event_name(evt));
node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
break;
case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
node_printf(node, "%s received\n", ocs_sm_event_name(evt));
node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for a node/domain
* attach then shutdown node.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
/* wait for any of these attach events and then shutdown */
case OCS_EVT_NODE_ATTACH_OK:
node->attached = TRUE;
node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
case OCS_EVT_NODE_ATTACH_FAIL:
/* node attach failed, shutdown the node */
node->attached = FALSE;
node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
break;
/* ignore shutdown events as we're already in shutdown path */
case OCS_EVT_SHUTDOWN:
/* have default shutdown event take precedence */
node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
/* fall through */
case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
node_printf(node, "%s received\n", ocs_sm_event_name(evt));
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Port is logged in.
*
* Description
* This state is entered when a remote port has completed port login (PLOGI).
*
* @param ctx Remote node state machine context.
* @param evt Event to process
* @param arg Per event optional argument
*
* @return Returns NULL.
*/
void *
__ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
node_sm_trace();
/* TODO: I+T: what if PLOGI response not yet received ? */
switch(evt) {
case OCS_EVT_ENTER:
/* Normal case for I or I+T */
if (node->sport->enable_ini && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)
&& !node->sent_prli) {
/* sm: if enable_ini / send PRLI */
ocs_send_prli(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
node->sent_prli = TRUE;
/* can now expect ELS_REQ_OK/FAIL/RJT */
}
break;
case OCS_EVT_FCP_CMD_RCVD: {
/* For target functionality send PRLO and drop the CMD frame. */
if (node->sport->enable_tgt) {
if (ocs_send_prlo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL)) {
ocs_node_transition(node, __ocs_d_wait_prlo_rsp, NULL);
}
}
break;
}
case OCS_EVT_PRLI_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
/* Normal for T or I+T */
ocs_process_prli_payload(node, cbdata->payload->dma.virt);
ocs_d_send_prli_rsp(cbdata->io, ocs_be16toh(hdr->ox_id));
break;
}
case OCS_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
/* Normal case for I or I+T */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
/* sm: process PRLI payload */
ocs_process_prli_payload(node, cbdata->els->els_rsp.virt);
ocs_node_transition(node, __ocs_d_device_ready, NULL);
break;
}
case OCS_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
/* I, I+T, assume some link failure, shutdown node */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
break;
}
case OCS_EVT_SRRS_ELS_REQ_RJT: {/* PRLI rejected by remote */
/* Normal for I, I+T (connected to an I) */
/* Node doesn't want to be a target, stay here and wait for a PRLI from the remote node
* if it really wants to connect to us as target */
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
break;
}
case OCS_EVT_SRRS_ELS_CMPL_OK: {
/* Normal T, I+T, target-server rejected the process login */
/* This would be received only in the case where we sent LS_RJT for the PRLI, so
* do nothing. (note: as T only we could shutdown the node)
*/
ocs_assert(node->els_cmpl_cnt, NULL);
node->els_cmpl_cnt--;
break;
}
case OCS_EVT_PLOGI_RCVD: {
/* sm: save sparams, set send_plogi_acc, post implicit logout
* Save plogi parameters */
ocs_node_save_sparms(node, cbdata->payload->dma.virt);
ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
/* Restart node attach with new service parameters, and send ACC */
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
break;
}
case OCS_EVT_LOGO_RCVD: {
/* I, T, I+T */
fc_header_t *hdr = cbdata->header->dma.virt;
node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
break;
}
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for a LOGO accept.
*
* Description
* Waits for a LOGO accept completion.
*
* @param ctx Remote node state machine context.
* @param evt Event to process
* @param arg Per event optional argument
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_node_hold_frames(node);
break;
case OCS_EVT_EXIT:
ocs_node_accept_frames(node);
break;
case OCS_EVT_SRRS_ELS_CMPL_OK:
case OCS_EVT_SRRS_ELS_CMPL_FAIL:
/* sm: / post explicit logout */
ocs_assert(node->els_cmpl_cnt, NULL);
node->els_cmpl_cnt--;
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Device is ready.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
if (evt != OCS_EVT_FCP_CMD_RCVD) {
node_sm_trace();
}
switch(evt) {
case OCS_EVT_ENTER:
node->fcp_enabled = TRUE;
if (node->init) {
device_printf(ocs->dev, "[%s] found (initiator) WWPN %s WWNN %s\n", node->display_name,
node->wwpn, node->wwnn);
if (node->sport->enable_tgt)
ocs_scsi_new_initiator(node);
}
if (node->targ) {
device_printf(ocs->dev, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name,
node->wwpn, node->wwnn);
if (node->sport->enable_ini)
ocs_scsi_new_target(node);
}
break;
case OCS_EVT_EXIT:
node->fcp_enabled = FALSE;
break;
case OCS_EVT_PLOGI_RCVD: {
/* sm: save sparams, set send_plogi_acc, post implicit logout
* Save plogi parameters */
ocs_node_save_sparms(node, cbdata->payload->dma.virt);
ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
/* Restart node attach with new service parameters, and send ACC */
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
break;
}
case OCS_EVT_PDISC_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
break;
}
case OCS_EVT_PRLI_RCVD: {
/* T, I+T: remote initiator is slow to get started */
fc_header_t *hdr = cbdata->header->dma.virt;
ocs_process_prli_payload(node, cbdata->payload->dma.virt);
/* sm: send PRLI acc/reject */
if (ocs->fc_type == node->fc_type)
ocs_send_prli_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
else
ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
break;
}
case OCS_EVT_PRLO_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
fc_prlo_payload_t *prlo = cbdata->payload->dma.virt;
/* sm: send PRLO acc/reject */
if (ocs->fc_type == prlo->type)
ocs_send_prlo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
else
ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
/*TODO: need implicit logout */
break;
}
case OCS_EVT_LOGO_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
break;
}
case OCS_EVT_ADISC_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
ocs_send_adisc_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
break;
}
case OCS_EVT_RRQ_RCVD: {
fc_header_t *hdr = cbdata->header->dma.virt;
/* Send LS_ACC */
ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
break;
}
case OCS_EVT_ABTS_RCVD:
ocs_process_abts(cbdata->io, cbdata->header->dma.virt);
break;
case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
break;
case OCS_EVT_NODE_REFOUND:
break;
case OCS_EVT_NODE_MISSING:
if (node->sport->enable_rscn) {
ocs_node_transition(node, __ocs_d_device_gone, NULL);
}
break;
case OCS_EVT_SRRS_ELS_CMPL_OK:
/* T, or I+T, PRLI accept completed ok */
ocs_assert(node->els_cmpl_cnt, NULL);
node->els_cmpl_cnt--;
break;
case OCS_EVT_SRRS_ELS_CMPL_FAIL:
/* T, or I+T, PRLI accept failed to complete */
ocs_assert(node->els_cmpl_cnt, NULL);
node->els_cmpl_cnt--;
node_printf(node, "Failed to send PRLI LS_ACC\n");
break;
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Node is gone (absent from GID_PT).
*
* Description
* State entered when a node is detected as being gone (absent from GID_PT).
*
* @param ctx Remote node state machine context.
* @param evt Event to process
* @param arg Per event optional argument
*
* @return Returns NULL.
*/
void *
__ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc = OCS_SCSI_CALL_COMPLETE;
int32_t rc_2 = OCS_SCSI_CALL_COMPLETE;
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_ENTER: {
const char *labels[] = {"none", "initiator", "target", "initiator+target"};
device_printf(ocs->dev, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name,
labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn);
switch(ocs_node_get_enable(node)) {
case OCS_NODE_ENABLE_T_TO_T:
case OCS_NODE_ENABLE_I_TO_T:
case OCS_NODE_ENABLE_IT_TO_T:
rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
break;
case OCS_NODE_ENABLE_T_TO_I:
case OCS_NODE_ENABLE_I_TO_I:
case OCS_NODE_ENABLE_IT_TO_I:
rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
break;
case OCS_NODE_ENABLE_T_TO_IT:
rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
break;
case OCS_NODE_ENABLE_I_TO_IT:
rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
break;
case OCS_NODE_ENABLE_IT_TO_IT:
rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
rc_2 = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
break;
default:
rc = OCS_SCSI_CALL_COMPLETE;
break;
}
if ((rc == OCS_SCSI_CALL_COMPLETE) && (rc_2 == OCS_SCSI_CALL_COMPLETE)) {
ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
}
break;
}
case OCS_EVT_NODE_REFOUND:
/* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
/* reauthenticate with PLOGI/PRLI */
/* ocs_node_transition(node, __ocs_d_discovered, NULL); */
/* reauthenticate with ADISC
* sm: send ADISC */
ocs_send_adisc(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_adisc_rsp, NULL);
break;
case OCS_EVT_PLOGI_RCVD: {
/* sm: save sparams, set send_plogi_acc, post implicit logout
* Save plogi parameters */
ocs_node_save_sparms(node, cbdata->payload->dma.virt);
ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
/* Restart node attach with new service parameters, and send ACC */
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
break;
}
case OCS_EVT_FCP_CMD_RCVD: {
/* most likely a stale frame (received prior to link down), if attempt
* to send LOGO, will probably timeout and eat up 20s; thus, drop FCP_CMND
*/
node_printf(node, "FCP_CMND received, drop\n");
break;
}
case OCS_EVT_LOGO_RCVD: {
/* I, T, I+T */
fc_header_t *hdr = cbdata->header->dma.virt;
node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
/* sm: send LOGO acc */
ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
break;
}
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
/**
* @ingroup device_sm
* @brief Device node state machine: Wait for the ADISC response.
*
* Description
* Waits for the ADISC response from the remote node.
*
* @param ctx Remote node state machine context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
ocs_node_cb_t *cbdata = arg;
std_node_state_decl();
node_sm_trace();
switch(evt) {
case OCS_EVT_SRRS_ELS_REQ_OK:
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
ocs_node_transition(node, __ocs_d_device_ready, NULL);
break;
case OCS_EVT_SRRS_ELS_REQ_RJT:
/* received an LS_RJT, in this case, send shutdown (explicit logo)
* event which will unregister the node, and start over with PLOGI
*/
if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
return NULL;
}
ocs_assert(node->els_req_cnt, NULL);
node->els_req_cnt--;
/*sm: post explicit logout */
ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
break;
case OCS_EVT_LOGO_RCVD: {
/* In this case, we have the equivalent of an LS_RJT for the ADISC,
* so we need to abort the ADISC, and re-login with PLOGI
*/
/*sm: request abort, send LOGO acc */
fc_header_t *hdr = cbdata->header->dma.virt;
node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
break;
}
default:
__ocs_d_common(__func__, ctx, evt, arg);
return NULL;
}
return NULL;
}
diff --git a/sys/dev/ocs_fc/ocs_els.c b/sys/dev/ocs_fc/ocs_els.c
index c62f71d4eb4f..cf4f01477f69 100644
--- a/sys/dev/ocs_fc/ocs_els.c
+++ b/sys/dev/ocs_fc/ocs_els.c
@@ -1,2761 +1,2761 @@
/*-
* Copyright (c) 2017 Broadcom. All rights reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* Functions to build and send ELS/CT/BLS commands and responses.
*/
/*!
@defgroup els_api ELS/BLS/CT Command and Response Functions
*/
#include "ocs.h"
#include "ocs_els.h"
#include "ocs_scsi.h"
#include "ocs_device.h"
#define ELS_IOFMT "[i:%04x t:%04x h:%04x]"
#define ELS_IOFMT_ARGS(els) els->init_task_tag, els->tgt_task_tag, els->hw_tag
#define node_els_trace() \
do { \
if (OCS_LOG_ENABLE_ELS_TRACE(ocs)) \
ocs_log_info(ocs, "[%s] %-20s\n", node->display_name, __func__); \
} while (0)
#define els_io_printf(els, fmt, ...) \
ocs_log_debug(els->node->ocs, "[%s]" ELS_IOFMT " %-8s " fmt, els->node->display_name, ELS_IOFMT_ARGS(els), els->display_name, ##__VA_ARGS__);
static int32_t ocs_els_send(ocs_io_t *els, uint32_t reqlen, uint32_t timeout_sec, ocs_hw_srrs_cb_t cb);
static int32_t ocs_els_send_rsp(ocs_io_t *els, uint32_t rsplen);
static int32_t ocs_els_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg);
static ocs_io_t *ocs_bls_send_acc(ocs_io_t *io, uint32_t s_id, uint16_t ox_id, uint16_t rx_id);
static int32_t ocs_bls_send_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length,
int32_t status, uint32_t ext_status, void *app);
static void ocs_io_transition(ocs_io_t *els, ocs_sm_function_t state, void *data);
static ocs_io_t *ocs_els_abort_io(ocs_io_t *els, int send_abts);
static void _ocs_els_io_free(void *arg);
static void ocs_els_delay_timer_cb(void *arg);
/**
* @ingroup els_api
* @brief ELS state machine transition wrapper.
*
* Description
* This function is the transition wrapper for the ELS state machine. It grabs
* the node lock prior to making the transition to protect
* against multiple threads accessing a particular ELS. For example,
* one thread transitioning from __els_init to
* __ocs_els_wait_resp and another thread (tasklet) handling the
* completion of that ELS request.
*
* @param els Pointer to the IO context.
* @param state State to transition to.
* @param data Data to pass in with the transition.
*
* @return None.
*/
static void
ocs_io_transition(ocs_io_t *els, ocs_sm_function_t state, void *data)
{
/* protect ELS events with node lock */
ocs_node_t *node = els->node;
ocs_node_lock(node);
ocs_sm_transition(&els->els_sm, state, data);
ocs_node_unlock(node);
}
/**
* @ingroup els_api
* @brief ELS state machine post event wrapper.
*
* Description
* Post an event wrapper for the ELS state machine. This function grabs
* the node lock prior to posting the event.
*
* @param els Pointer to the IO context.
* @param evt Event to process.
* @param data Data to pass in with the transition.
*
* @return None.
*/
void
ocs_els_post_event(ocs_io_t *els, ocs_sm_event_t evt, void *data)
{
/* protect ELS events with node lock */
ocs_node_t *node = els->node;
ocs_node_lock(node);
els->els_evtdepth ++;
ocs_sm_post_event(&els->els_sm, evt, data);
els->els_evtdepth --;
ocs_node_unlock(node);
if (els->els_evtdepth == 0 && els->els_req_free) {
ocs_els_io_free(els);
}
}
/**
* @ingroup els_api
* @brief Allocate an IO structure for an ELS IO context.
*
* Description
* Allocate an IO for an ELS context. Uses OCS_ELS_RSP_LEN as response size.
*
* @param node node to associate ELS IO with
* @param reqlen Length of ELS request
* @param role Role of ELS (originator/responder)
*
* @return pointer to IO structure allocated
*/
ocs_io_t *
ocs_els_io_alloc(ocs_node_t *node, uint32_t reqlen, ocs_els_role_e role)
{
return ocs_els_io_alloc_size(node, reqlen, OCS_ELS_RSP_LEN, role);
}
/**
* @ingroup els_api
* @brief Allocate an IO structure for an ELS IO context.
*
* Description
* Allocate an IO for an ELS context, allowing the caller to specify the size of the response.
*
* @param node node to associate ELS IO with
* @param reqlen Length of ELS request
* @param rsplen Length of ELS response
* @param role Role of ELS (originator/responder)
*
* @return pointer to IO structure allocated
*/
ocs_io_t *
ocs_els_io_alloc_size(ocs_node_t *node, uint32_t reqlen, uint32_t rsplen, ocs_els_role_e role)
{
ocs_t *ocs;
ocs_xport_t *xport;
ocs_io_t *els;
ocs_assert(node, NULL);
ocs_assert(node->ocs, NULL);
ocs = node->ocs;
ocs_assert(ocs->xport, NULL);
xport = ocs->xport;
ocs_lock(&node->active_ios_lock);
if (!node->io_alloc_enabled) {
ocs_log_debug(ocs, "called with io_alloc_enabled = FALSE\n");
ocs_unlock(&node->active_ios_lock);
return NULL;
}
els = ocs_io_alloc(ocs);
if (els == NULL) {
ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
ocs_unlock(&node->active_ios_lock);
return NULL;
}
/* initialize refcount */
ocs_ref_init(&els->ref, _ocs_els_io_free, els);
switch (role) {
case OCS_ELS_ROLE_ORIGINATOR:
els->cmd_ini = TRUE;
els->cmd_tgt = FALSE;
break;
case OCS_ELS_ROLE_RESPONDER:
els->cmd_ini = FALSE;
els->cmd_tgt = TRUE;
break;
}
/* IO should not have an associated HW IO yet. Assigned below. */
if (els->hio != NULL) {
ocs_log_err(ocs, "assertion failed. HIO is not null\n");
ocs_io_free(ocs, els);
ocs_unlock(&node->active_ios_lock);
return NULL;
}
/* populate generic io fields */
els->ocs = ocs;
els->node = node;
/* set type and ELS-specific fields */
els->io_type = OCS_IO_TYPE_ELS;
els->display_name = "pending";
if (reqlen > OCS_ELS_REQ_LEN) {
ocs_log_err(ocs, "ELS command request len greater than allocated\n");
ocs_io_free(ocs, els);
ocs_unlock(&node->active_ios_lock);
return NULL;
}
if (rsplen > OCS_ELS_GID_PT_RSP_LEN) {
ocs_log_err(ocs, "ELS command response len: %d "
"greater than allocated\n", rsplen);
ocs_io_free(ocs, els);
ocs_unlock(&node->active_ios_lock);
return NULL;
}
els->els_req.size = reqlen;
els->els_rsp.size = rsplen;
if (els != NULL) {
ocs_memset(&els->els_sm, 0, sizeof(els->els_sm));
els->els_sm.app = els;
/* initialize fields */
els->els_retries_remaining = OCS_FC_ELS_DEFAULT_RETRIES;
els->els_evtdepth = 0;
els->els_pend = 0;
els->els_active = 0;
/* add els structure to ELS IO list */
ocs_list_add_tail(&node->els_io_pend_list, els);
els->els_pend = 1;
}
ocs_unlock(&node->active_ios_lock);
return els;
}
/**
* @ingroup els_api
* @brief Free IO structure for an ELS IO context.
*
* Description
Free IO for an ELS
* IO context
*
* @param els ELS IO structure for which IO is allocated
*
* @return None
*/
void
ocs_els_io_free(ocs_io_t *els)
{
ocs_ref_put(&els->ref);
}
/**
* @ingroup els_api
* @brief Free IO structure for an ELS IO context.
*
* Description
Free IO for an ELS
* IO context
*
* @param arg ELS IO structure for which IO is allocated
*
* @return None
*/
static void
_ocs_els_io_free(void *arg)
{
ocs_io_t *els = (ocs_io_t *)arg;
ocs_t *ocs;
ocs_node_t *node;
int send_empty_event = FALSE;
ocs_assert(els);
ocs_assert(els->node);
ocs_assert(els->node->ocs);
ocs = els->node->ocs;
node = els->node;
ocs = node->ocs;
ocs_lock(&node->active_ios_lock);
if (els->els_active) {
/* if active, remove from active list and check empty */
ocs_list_remove(&node->els_io_active_list, els);
/* Send list empty event if the IO allocator is disabled, and the list is empty
* If node->io_alloc_enabled was not checked, the event would be posted continually
*/
send_empty_event = (!node->io_alloc_enabled) && ocs_list_empty(&node->els_io_active_list);
els->els_active = 0;
} else if (els->els_pend) {
/* if pending, remove from pending list; node shutdown isn't
* gated off the pending list (only the active list), so no
* need to check if pending list is empty
*/
ocs_list_remove(&node->els_io_pend_list, els);
els->els_pend = 0;
} else {
- ocs_log_err(ocs, "assertion failed: niether els->els_pend nor els->active set\n");
+ ocs_log_err(ocs, "assertion failed: neither els->els_pend nor els->active set\n");
ocs_unlock(&node->active_ios_lock);
return;
}
ocs_unlock(&node->active_ios_lock);
ocs_io_free(ocs, els);
if (send_empty_event) {
ocs_node_post_event(node, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
}
ocs_scsi_check_pending(ocs);
}
/**
* @ingroup els_api
* @brief Make ELS IO active
*
* @param els Pointer to the IO context to make active.
*
* @return Returns 0 on success; or a negative error code value on failure.
*/
static void
ocs_els_make_active(ocs_io_t *els)
{
ocs_node_t *node = els->node;
/* move ELS from pending list to active list */
ocs_lock(&node->active_ios_lock);
if (els->els_pend) {
if (els->els_active) {
ocs_log_err(node->ocs, "assertion failed: both els->els_pend and els->active set\n");
ocs_unlock(&node->active_ios_lock);
return;
} else {
/* remove from pending list */
ocs_list_remove(&node->els_io_pend_list, els);
els->els_pend = 0;
/* add els structure to ELS IO list */
ocs_list_add_tail(&node->els_io_active_list, els);
els->els_active = 1;
}
} else {
/* must be retrying; make sure it's already active */
if (!els->els_active) {
- ocs_log_err(node->ocs, "assertion failed: niether els->els_pend nor els->active set\n");
+ ocs_log_err(node->ocs, "assertion failed: neither els->els_pend nor els->active set\n");
}
}
ocs_unlock(&node->active_ios_lock);
}
/**
* @ingroup els_api
* @brief Send the ELS command.
*
* Description
* The command, given by the \c els IO context, is sent to the node that the IO was
* configured with, using ocs_hw_srrs_send(). Upon completion,
* the \c cb callback is invoked,
* with the application-specific argument set to the \c els IO context.
*
* @param els Pointer to the IO context.
* @param reqlen Byte count in the payload to send.
* @param timeout_sec Command timeout, in seconds (0 -> 2*R_A_TOV).
* @param cb Completion callback.
*
* @return Returns 0 on success; or a negative error code value on failure.
*/
static int32_t
ocs_els_send(ocs_io_t *els, uint32_t reqlen, uint32_t timeout_sec, ocs_hw_srrs_cb_t cb)
{
ocs_node_t *node = els->node;
/* update ELS request counter */
node->els_req_cnt++;
/* move ELS from pending list to active list */
ocs_els_make_active(els);
els->wire_len = reqlen;
return ocs_scsi_io_dispatch(els, cb);
}
/**
* @ingroup els_api
* @brief Send the ELS response.
*
* Description
* The ELS response, given by the \c els IO context, is sent to the node
* that the IO was configured with, using ocs_hw_srrs_send().
*
* @param els Pointer to the IO context.
* @param rsplen Byte count in the payload to send.
*
* @return Returns 0 on success; or a negative error value on failure.
*/
static int32_t
ocs_els_send_rsp(ocs_io_t *els, uint32_t rsplen)
{
ocs_node_t *node = els->node;
/* increment ELS completion counter */
node->els_cmpl_cnt++;
/* move ELS from pending list to active list */
ocs_els_make_active(els);
els->wire_len = rsplen;
return ocs_scsi_io_dispatch(els, ocs_els_acc_cb);
}
/**
* @ingroup els_api
* @brief Handle ELS IO request completions.
*
* Description
* This callback is used for several ELS send operations.
*
* @param hio Pointer to the HW IO context that completed.
* @param rnode Pointer to the remote node.
* @param length Length of the returned payload data.
* @param status Status of the completion.
* @param ext_status Extended status of the completion.
* @param arg Application-specific argument (generally a pointer to the ELS IO context).
*
* @return Returns 0 on success; or a negative error value on failure.
*/
static int32_t
ocs_els_req_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg)
{
ocs_io_t *els;
ocs_node_t *node;
ocs_t *ocs;
ocs_node_cb_t cbdata;
ocs_io_t *io;
ocs_assert(arg, -1);
io = arg;
els = io;
ocs_assert(els, -1);
ocs_assert(els->node, -1);
node = els->node;
ocs_assert(node->ocs, -1);
ocs = node->ocs;
ocs_assert(io->hio, -1);
ocs_assert(hio == io->hio, -1);
if (status != 0) {
els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
}
/* set the response len element of els->rsp */
els->els_rsp.len = length;
cbdata.status = status;
cbdata.ext_status = ext_status;
cbdata.header = NULL;
cbdata.els = els;
/* FW returns the number of bytes received on the link in
* the WCQE, not the amount placed in the buffer; use this info to
* check if there was an overrun.
*/
if (length > els->els_rsp.size) {
ocs_log_warn(ocs, "ELS response returned len=%d > buflen=%zu\n",
length, els->els_rsp.size);
ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
return 0;
}
/* Post event to ELS IO object */
switch (status) {
case SLI4_FC_WCQE_STATUS_SUCCESS:
ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_OK, &cbdata);
break;
case SLI4_FC_WCQE_STATUS_LS_RJT:
ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_RJT, &cbdata);
break;
case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
switch (ext_status) {
case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
ocs_els_post_event(els, OCS_EVT_ELS_REQ_TIMEOUT, &cbdata);
break;
case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
ocs_els_post_event(els, OCS_EVT_ELS_REQ_ABORTED, &cbdata);
break;
default:
ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
break;
}
break;
default:
ocs_log_warn(ocs, "els req complete: failed status x%x, ext_status, x%x\n", status, ext_status);
ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
break;
}
return 0;
}
/**
* @ingroup els_api
* @brief Handle ELS IO accept/response completions.
*
* Description
* This callback is used for several ELS send operations.
*
* @param hio Pointer to the HW IO context that completed.
* @param rnode Pointer to the remote node.
* @param length Length of the returned payload data.
* @param status Status of the completion.
* @param ext_status Extended status of the completion.
* @param arg Application-specific argument (generally a pointer to the ELS IO context).
*
* @return Returns 0 on success; or a negative error value on failure.
*/
static int32_t
ocs_els_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg)
{
ocs_io_t *els;
ocs_node_t *node;
ocs_t *ocs;
ocs_node_cb_t cbdata;
ocs_io_t *io;
ocs_assert(arg, -1);
io = arg;
els = io;
ocs_assert(els, -1);
ocs_assert(els->node, -1);
node = els->node;
ocs_assert(node->ocs, -1);
ocs = node->ocs;
ocs_assert(io->hio, -1);
ocs_assert(hio == io->hio, -1);
cbdata.status = status;
cbdata.ext_status = ext_status;
cbdata.header = NULL;
cbdata.els = els;
/* Post node event */
switch (status) {
case SLI4_FC_WCQE_STATUS_SUCCESS:
ocs_node_post_event(node, OCS_EVT_SRRS_ELS_CMPL_OK, &cbdata);
break;
default:
ocs_log_warn(ocs, "[%s] %-8s failed status x%x, ext_status x%x\n",
node->display_name, els->display_name, status, ext_status);
ocs_log_warn(ocs, "els acc complete: failed status x%x, ext_status, x%x\n", status, ext_status);
ocs_node_post_event(node, OCS_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
break;
}
/* If this IO has a callback, invoke it */
if (els->els_callback) {
(*els->els_callback)(node, &cbdata, els->els_callback_arg);
}
ocs_els_io_free(els);
return 0;
}
/**
* @ingroup els_api
* @brief Format and send a PLOGI ELS command.
*
* Description
* Construct a PLOGI payload using the domain SLI port service parameters,
* and send to the \c node.
*
* @param node Node to which the PLOGI is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_plogi(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
void (*cb)(ocs_node_t *node, ocs_node_cb_t *cbdata, void *arg), void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fc_plogi_payload_t *plogi;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*plogi), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "plogi";
/* Build PLOGI request */
plogi = els->els_req.virt;
ocs_memcpy(plogi, node->sport->service_params, sizeof(*plogi));
plogi->command_code = FC_ELS_CMD_PLOGI;
plogi->resv1 = 0;
ocs_display_sparams(node->display_name, "plogi send req", 0, NULL, plogi->common_service_parameters);
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Format and send a FLOGI ELS command.
*
* Description
* Construct an FLOGI payload, and send to the \c node.
*
* @param node Node to which the FLOGI is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_flogi(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs;
fc_plogi_payload_t *flogi;
ocs_assert(node, NULL);
ocs_assert(node->ocs, NULL);
ocs_assert(node->sport, NULL);
ocs = node->ocs;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*flogi), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "flogi";
/* Build FLOGI request */
flogi = els->els_req.virt;
ocs_memcpy(flogi, node->sport->service_params, sizeof(*flogi));
flogi->command_code = FC_ELS_CMD_FLOGI;
flogi->resv1 = 0;
/* Priority tagging support */
flogi->common_service_parameters[1] |= ocs_htobe32(1U << 23);
ocs_display_sparams(node->display_name, "flogi send req", 0, NULL, flogi->common_service_parameters);
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Format and send a FDISC ELS command.
*
* Description
* Construct an FDISC payload, and send to the \c node.
*
* @param node Node to which the FDISC is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_fdisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs;
fc_plogi_payload_t *fdisc;
ocs_assert(node, NULL);
ocs_assert(node->ocs, NULL);
ocs = node->ocs;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*fdisc), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "fdisc";
/* Build FDISC request */
fdisc = els->els_req.virt;
ocs_memcpy(fdisc, node->sport->service_params, sizeof(*fdisc));
fdisc->command_code = FC_ELS_CMD_FDISC;
fdisc->resv1 = 0;
ocs_display_sparams(node->display_name, "fdisc send req", 0, NULL, fdisc->common_service_parameters);
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a PRLI ELS command.
*
* Description
* Construct a PRLI ELS command, and send to the \c node.
*
* @param node Node to which the PRLI is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_prli(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_t *ocs = node->ocs;
ocs_io_t *els;
fc_prli_payload_t *prli;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*prli), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "prli";
/* Build PRLI request */
prli = els->els_req.virt;
ocs_memset(prli, 0, sizeof(*prli));
prli->command_code = FC_ELS_CMD_PRLI;
prli->page_length = 16;
prli->payload_length = ocs_htobe16(sizeof(fc_prli_payload_t));
prli->type = FC_TYPE_FCP;
prli->type_ext = 0;
prli->flags = ocs_htobe16(FC_PRLI_ESTABLISH_IMAGE_PAIR);
prli->service_params = ocs_htobe16(FC_PRLI_READ_XRDY_DISABLED |
(node->sport->enable_ini ? FC_PRLI_INITIATOR_FUNCTION : 0) |
(node->sport->enable_tgt ? FC_PRLI_TARGET_FUNCTION : 0));
/* For Tape Drive support */
prli->service_params |= ocs_htobe16(FC_PRLI_CONFIRMED_COMPLETION | FC_PRLI_RETRY |
FC_PRLI_TASK_RETRY_ID_REQ| FC_PRLI_REC_SUPPORT);
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a PRLO ELS command.
*
* Description
* Construct a PRLO ELS command, and send to the \c node.
*
* @param node Node to which the PRLO is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_prlo(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_t *ocs = node->ocs;
ocs_io_t *els;
fc_prlo_payload_t *prlo;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*prlo), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "prlo";
/* Build PRLO request */
prlo = els->els_req.virt;
ocs_memset(prlo, 0, sizeof(*prlo));
prlo->command_code = FC_ELS_CMD_PRLO;
prlo->page_length = 16;
prlo->payload_length = ocs_htobe16(sizeof(fc_prlo_payload_t));
prlo->type = FC_TYPE_FCP;
prlo->type_ext = 0;
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a LOGO ELS command.
*
* Description
* Format a LOGO, and send to the \c node.
*
* @param node Node to which the LOGO is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_logo(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs;
fc_logo_payload_t *logo;
fc_plogi_payload_t *sparams;
ocs = node->ocs;
node_els_trace();
sparams = (fc_plogi_payload_t*) node->sport->service_params;
els = ocs_els_io_alloc(node, sizeof(*logo), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "logo";
/* Build LOGO request */
logo = els->els_req.virt;
ocs_memset(logo, 0, sizeof(*logo));
logo->command_code = FC_ELS_CMD_LOGO;
logo->resv1 = 0;
logo->port_id = fc_htobe24(node->rnode.sport->fc_id);
logo->port_name_hi = sparams->port_name_hi;
logo->port_name_lo = sparams->port_name_lo;
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send an ADISC ELS command.
*
* Description
* Construct an ADISC ELS command, and send to the \c node.
*
* @param node Node to which the ADISC is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_adisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs;
fc_adisc_payload_t *adisc;
fc_plogi_payload_t *sparams;
ocs_sport_t *sport = node->sport;
ocs = node->ocs;
node_els_trace();
sparams = (fc_plogi_payload_t*) node->sport->service_params;
els = ocs_els_io_alloc(node, sizeof(*adisc), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "adisc";
/* Build ADISC request */
adisc = els->els_req.virt;
sparams = (fc_plogi_payload_t*) node->sport->service_params;
ocs_memset(adisc, 0, sizeof(*adisc));
adisc->command_code = FC_ELS_CMD_ADISC;
adisc->hard_address = fc_htobe24(sport->fc_id);
adisc->port_name_hi = sparams->port_name_hi;
adisc->port_name_lo = sparams->port_name_lo;
adisc->node_name_hi = sparams->node_name_hi;
adisc->node_name_lo = sparams->node_name_lo;
adisc->port_id = fc_htobe24(node->rnode.sport->fc_id);
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a PDISC ELS command.
*
* Description
* Construct a PDISC ELS command, and send to the \c node.
*
* @param node Node to which the PDISC is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_pdisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fc_plogi_payload_t *pdisc;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*pdisc), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "pdisc";
pdisc = els->els_req.virt;
ocs_memcpy(pdisc, node->sport->service_params, sizeof(*pdisc));
pdisc->command_code = FC_ELS_CMD_PDISC;
pdisc->resv1 = 0;
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send an SCR ELS command.
*
* Description
* Format an SCR, and send to the \c node.
*
* @param node Node to which the SCR is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function
* @param cbarg Callback function arg
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_scr(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fc_scr_payload_t *req;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*req), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "scr";
req = els->els_req.virt;
ocs_memset(req, 0, sizeof(*req));
req->command_code = FC_ELS_CMD_SCR;
req->function = FC_SCR_REG_FULL;
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send an RRQ ELS command.
*
* Description
* Format an RRQ, and send to the \c node.
*
* @param node Node to which the RRQ is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function
* @param cbarg Callback function arg
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_rrq(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fc_scr_payload_t *req;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*req), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "scr";
req = els->els_req.virt;
ocs_memset(req, 0, sizeof(*req));
req->command_code = FC_ELS_CMD_RRQ;
req->function = FC_SCR_REG_FULL;
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send an RSCN ELS command.
*
* Description
* Format an RSCN, and send to the \c node.
*
* @param node Node to which the RRQ is sent.
* @param timeout_sec Command timeout, in seconds.
* @param retries Number of times to retry errors before reporting a failure.
* @param port_ids Pointer to port IDs
* @param port_ids_count Count of port IDs
* @param cb Callback function
* @param cbarg Callback function arg
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_rscn(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
void *port_ids, uint32_t port_ids_count, els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fc_rscn_payload_t *req;
uint32_t payload_length = sizeof(fc_rscn_affected_port_id_page_t)*(port_ids_count - 1) +
sizeof(fc_rscn_payload_t);
node_els_trace();
els = ocs_els_io_alloc(node, payload_length, OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->els_timeout_sec = timeout_sec;
els->els_retries_remaining = retries;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "rscn";
req = els->els_req.virt;
req->command_code = FC_ELS_CMD_RSCN;
req->page_length = sizeof(fc_rscn_affected_port_id_page_t);
req->payload_length = ocs_htobe16(sizeof(*req) +
sizeof(fc_rscn_affected_port_id_page_t)*(port_ids_count-1));
els->hio_type = OCS_HW_ELS_REQ;
els->iparam.els.timeout = timeout_sec;
/* copy in the payload */
ocs_memcpy(req->port_list, port_ids, port_ids_count*sizeof(fc_rscn_affected_port_id_page_t));
/* Submit the request */
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @brief Send an LS_RJT ELS response.
*
* Description
* Send an LS_RJT ELS response.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID being responded to.
* @param reason_code Reason code value for LS_RJT.
* @param reason_code_expl Reason code explanation value for LS_RJT.
* @param vendor_unique Vendor-unique value for LS_RJT.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_ls_rjt(ocs_io_t *io, uint32_t ox_id, uint32_t reason_code, uint32_t reason_code_expl,
uint32_t vendor_unique, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_ls_rjt_payload_t *rjt;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "ls_rjt";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
rjt = io->els_req.virt;
ocs_memset(rjt, 0, sizeof(*rjt));
rjt->command_code = FC_ELS_CMD_RJT;
rjt->reason_code = reason_code;
rjt->reason_code_exp = reason_code_expl;
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*rjt)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send a PLOGI accept response.
*
* Description
* Construct a PLOGI LS_ACC, and send to the \c node, using the originator exchange ID
* \c ox_id.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID being responsed to.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_plogi_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_plogi_payload_t *plogi;
fc_plogi_payload_t *req = (fc_plogi_payload_t *)node->service_params;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "plog_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
plogi = io->els_req.virt;
/* copy our port's service parameters to payload */
ocs_memcpy(plogi, node->sport->service_params, sizeof(*plogi));
plogi->command_code = FC_ELS_CMD_ACC;
plogi->resv1 = 0;
/* Set Application header support bit if requested */
if (req->common_service_parameters[1] & ocs_htobe32(1U << 24)) {
plogi->common_service_parameters[1] |= ocs_htobe32(1U << 24);
}
/* Priority tagging support. */
if (req->common_service_parameters[1] & ocs_htobe32(1U << 23)) {
plogi->common_service_parameters[1] |= ocs_htobe32(1U << 23);
}
ocs_display_sparams(node->display_name, "plogi send resp", 0, NULL, plogi->common_service_parameters);
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*plogi)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send an FLOGI accept response for point-to-point negotiation.
*
* Description
* Construct an FLOGI accept response, and send to the \c node using the originator
* exchange id \c ox_id. The \c s_id is used for the response frame source FC ID.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID for the response.
* @param s_id Source FC ID to be used in the response frame.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_flogi_p2p_acc(ocs_io_t *io, uint32_t ox_id, uint32_t s_id, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_plogi_payload_t *flogi;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "flogi_p2p_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els_sid.ox_id = ox_id;
io->iparam.els_sid.s_id = s_id;
flogi = io->els_req.virt;
/* copy our port's service parameters to payload */
ocs_memcpy(flogi, node->sport->service_params, sizeof(*flogi));
flogi->command_code = FC_ELS_CMD_ACC;
flogi->resv1 = 0;
ocs_memset(flogi->class1_service_parameters, 0, sizeof(flogi->class1_service_parameters));
ocs_memset(flogi->class2_service_parameters, 0, sizeof(flogi->class1_service_parameters));
ocs_memset(flogi->class3_service_parameters, 0, sizeof(flogi->class1_service_parameters));
ocs_memset(flogi->class4_service_parameters, 0, sizeof(flogi->class1_service_parameters));
io->hio_type = OCS_HW_ELS_RSP_SID;
if ((rc = ocs_els_send_rsp(io, sizeof(*flogi)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
ocs_io_t *
ocs_send_flogi_acc(ocs_io_t *io, uint32_t ox_id, uint32_t is_fport, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_plogi_payload_t *flogi;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "flogi_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els_sid.ox_id = ox_id;
io->iparam.els_sid.s_id = io->node->sport->fc_id;
flogi = io->els_req.virt;
/* copy our port's service parameters to payload */
ocs_memcpy(flogi, node->sport->service_params, sizeof(*flogi));
/* Set F_port */
if (is_fport) {
/* Set F_PORT and Multiple N_PORT_ID Assignment */
flogi->common_service_parameters[1] |= ocs_be32toh(3U << 28);
}
flogi->command_code = FC_ELS_CMD_ACC;
flogi->resv1 = 0;
ocs_display_sparams(node->display_name, "flogi send resp", 0, NULL, flogi->common_service_parameters);
ocs_memset(flogi->class1_service_parameters, 0, sizeof(flogi->class1_service_parameters));
ocs_memset(flogi->class2_service_parameters, 0, sizeof(flogi->class1_service_parameters));
ocs_memset(flogi->class3_service_parameters, 0, sizeof(flogi->class1_service_parameters));
ocs_memset(flogi->class4_service_parameters, 0, sizeof(flogi->class1_service_parameters));
io->hio_type = OCS_HW_ELS_RSP_SID;
if ((rc = ocs_els_send_rsp(io, sizeof(*flogi)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send a PRLI accept response
*
* Description
* Construct a PRLI LS_ACC response, and send to the \c node, using the originator
* \c ox_id exchange ID.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_prli_acc(ocs_io_t *io, uint32_t ox_id, uint8_t fc_type, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_prli_payload_t *prli;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "prli_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
prli = io->els_req.virt;
ocs_memset(prli, 0, sizeof(*prli));
prli->command_code = FC_ELS_CMD_ACC;
prli->page_length = 16;
prli->payload_length = ocs_htobe16(sizeof(fc_prli_payload_t));
prli->type = fc_type;
prli->type_ext = 0;
prli->flags = ocs_htobe16(FC_PRLI_ESTABLISH_IMAGE_PAIR | FC_PRLI_REQUEST_EXECUTED);
prli->service_params = ocs_htobe16(FC_PRLI_READ_XRDY_DISABLED |
(node->sport->enable_ini ? FC_PRLI_INITIATOR_FUNCTION : 0) |
(node->sport->enable_tgt ? FC_PRLI_TARGET_FUNCTION : 0));
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*prli)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send a PRLO accept response.
*
* Description
* Construct a PRLO LS_ACC response, and send to the \c node, using the originator
* exchange ID \c ox_id.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_prlo_acc(ocs_io_t *io, uint32_t ox_id, uint8_t fc_type, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_prlo_acc_payload_t *prlo_acc;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "prlo_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
prlo_acc = io->els_req.virt;
ocs_memset(prlo_acc, 0, sizeof(*prlo_acc));
prlo_acc->command_code = FC_ELS_CMD_ACC;
prlo_acc->page_length = 16;
prlo_acc->payload_length = ocs_htobe16(sizeof(fc_prlo_acc_payload_t));
prlo_acc->type = fc_type;
prlo_acc->type_ext = 0;
prlo_acc->response_code = FC_PRLO_REQUEST_EXECUTED;
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*prlo_acc)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send a generic LS_ACC response without a payload.
*
* Description
* A generic LS_ACC response is sent to the \c node using the originator exchange ID
* \c ox_id.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange id.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_ls_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_acc_payload_t *acc;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "ls_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
acc = io->els_req.virt;
ocs_memset(acc, 0, sizeof(*acc));
acc->command_code = FC_ELS_CMD_ACC;
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*acc)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send a LOGO accept response.
*
* Description
* Construct a LOGO LS_ACC response, and send to the \c node, using the originator
* exchange ID \c ox_id.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_logo_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
ocs_t *ocs = node->ocs;
fc_acc_payload_t *logo;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "logo_acc";
io->init_task_tag = ox_id;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
logo = io->els_req.virt;
ocs_memset(logo, 0, sizeof(*logo));
logo->command_code = FC_ELS_CMD_ACC;
logo->resv1 = 0;
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*logo)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send an ADISC accept response.
*
* Description
* Construct an ADISC LS__ACC, and send to the \c node, using the originator
* exchange id \c ox_id.
*
* @param io Pointer to a SCSI IO object.
* @param ox_id Originator exchange ID.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_send_adisc_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
{
ocs_node_t *node = io->node;
int32_t rc;
fc_adisc_payload_t *adisc;
fc_plogi_payload_t *sparams;
ocs_t *ocs;
ocs_assert(node, NULL);
ocs_assert(node->ocs, NULL);
ocs = node->ocs;
node_els_trace();
io->els_callback = cb;
io->els_callback_arg = cbarg;
io->display_name = "adisc_acc";
io->init_task_tag = ox_id;
/* Go ahead and send the ELS_ACC */
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.els.ox_id = ox_id;
sparams = (fc_plogi_payload_t*) node->sport->service_params;
adisc = io->els_req.virt;
ocs_memset(adisc, 0, sizeof(fc_adisc_payload_t));
adisc->command_code = FC_ELS_CMD_ACC;
adisc->hard_address = 0;
adisc->port_name_hi = sparams->port_name_hi;
adisc->port_name_lo = sparams->port_name_lo;
adisc->node_name_hi = sparams->node_name_hi;
adisc->node_name_lo = sparams->node_name_lo;
adisc->port_id = fc_htobe24(node->rnode.sport->fc_id);
io->hio_type = OCS_HW_ELS_RSP;
if ((rc = ocs_els_send_rsp(io, sizeof(*adisc)))) {
ocs_els_io_free(io);
io = NULL;
}
return io;
}
/**
* @ingroup els_api
* @brief Send a RFTID CT request.
*
* Description
* Construct an RFTID CT request, and send to the \c node.
*
* @param node Node to which the RFTID request is sent.
* @param timeout_sec Time, in seconds, to wait before timing out the ELS.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_ns_send_rftid(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fcct_rftid_req_t *rftid;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*rftid), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->iparam.fc_ct.r_ctl = FC_RCTL_ELS;
els->iparam.fc_ct.type = FC_TYPE_GS;
els->iparam.fc_ct.df_ctl = 0;
els->iparam.fc_ct.timeout = timeout_sec;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "rftid";
rftid = els->els_req.virt;
ocs_memset(rftid, 0, sizeof(*rftid));
fcct_build_req_header(&rftid->hdr, FC_GS_NAMESERVER_RFT_ID, (OCS_ELS_RSP_LEN - sizeof(rftid->hdr)));
rftid->port_id = ocs_htobe32(node->rnode.sport->fc_id);
rftid->fc4_types[FC_GS_TYPE_WORD(FC_TYPE_FCP)] = ocs_htobe32(1 << FC_GS_TYPE_BIT(FC_TYPE_FCP));
els->hio_type = OCS_HW_FC_CT;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a RFFID CT request.
*
* Description
* Construct an RFFID CT request, and send to the \c node.
*
* @param node Node to which the RFFID request is sent.
* @param timeout_sec Time, in seconds, to wait before timing out the ELS.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_ns_send_rffid(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fcct_rffid_req_t *rffid;
node_els_trace();
els = ocs_els_io_alloc(node, sizeof(*rffid), OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->iparam.fc_ct.r_ctl = FC_RCTL_ELS;
els->iparam.fc_ct.type = FC_TYPE_GS;
els->iparam.fc_ct.df_ctl = 0;
els->iparam.fc_ct.timeout = timeout_sec;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "rffid";
rffid = els->els_req.virt;
ocs_memset(rffid, 0, sizeof(*rffid));
fcct_build_req_header(&rffid->hdr, FC_GS_NAMESERVER_RFF_ID, (OCS_ELS_RSP_LEN - sizeof(rffid->hdr)));
rffid->port_id = ocs_htobe32(node->rnode.sport->fc_id);
if (node->sport->enable_ini) {
rffid->fc4_feature_bits |= FC4_FEATURE_INITIATOR;
}
if (node->sport->enable_tgt) {
rffid->fc4_feature_bits |= FC4_FEATURE_TARGET;
}
rffid->type = FC_TYPE_FCP;
els->hio_type = OCS_HW_FC_CT;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a GIDPT CT request.
*
* Description
* Construct a GIDPT CT request, and send to the \c node.
*
* @param node Node to which the GIDPT request is sent.
* @param timeout_sec Time, in seconds, to wait before timing out the ELS.
* @param retries Number of times to retry errors before reporting a failure.
* @param cb Callback function.
* @param cbarg Callback function argument.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_ns_send_gidpt(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
els_cb_t cb, void *cbarg)
{
ocs_io_t *els;
ocs_t *ocs = node->ocs;
fcct_gidpt_req_t *gidpt;
node_els_trace();
els = ocs_els_io_alloc_size(node, sizeof(*gidpt), OCS_ELS_GID_PT_RSP_LEN, OCS_ELS_ROLE_ORIGINATOR);
if (els == NULL) {
ocs_log_err(ocs, "IO alloc failed\n");
} else {
els->iparam.fc_ct.r_ctl = FC_RCTL_ELS;
els->iparam.fc_ct.type = FC_TYPE_GS;
els->iparam.fc_ct.df_ctl = 0;
els->iparam.fc_ct.timeout = timeout_sec;
els->els_callback = cb;
els->els_callback_arg = cbarg;
els->display_name = "gidpt";
gidpt = els->els_req.virt;
ocs_memset(gidpt, 0, sizeof(*gidpt));
fcct_build_req_header(&gidpt->hdr, FC_GS_NAMESERVER_GID_PT, (OCS_ELS_GID_PT_RSP_LEN - sizeof(gidpt->hdr)) );
gidpt->domain_id_scope = 0;
gidpt->area_id_scope = 0;
gidpt->port_type = 0x7f;
els->hio_type = OCS_HW_FC_CT;
ocs_io_transition(els, __ocs_els_init, NULL);
}
return els;
}
/**
* @ingroup els_api
* @brief Send a BA_ACC given the request's FC header
*
* Description
* Using the S_ID/D_ID from the request's FC header, generate a BA_ACC.
*
* @param io Pointer to a SCSI IO object.
* @param hdr Pointer to the FC header.
*
* @return Returns pointer to IO object, or NULL if error.
*/
ocs_io_t *
ocs_bls_send_acc_hdr(ocs_io_t *io, fc_header_t *hdr)
{
uint16_t ox_id = ocs_be16toh(hdr->ox_id);
uint16_t rx_id = ocs_be16toh(hdr->rx_id);
uint32_t d_id = fc_be24toh(hdr->d_id);
return ocs_bls_send_acc(io, d_id, ox_id, rx_id);
}
/**
* @ingroup els_api
* @brief Send a BLS BA_ACC response.
*
* Description
* Construct a BLS BA_ACC response, and send to the \c node.
*
* @param io Pointer to a SCSI IO object.
* @param s_id S_ID to use for the response. If UINT32_MAX, then use our SLI port
* (sport) S_ID.
* @param ox_id Originator exchange ID.
* @param rx_id Responder exchange ID.
*
* @return Returns pointer to IO object, or NULL if error.
*/
static ocs_io_t *
ocs_bls_send_acc(ocs_io_t *io, uint32_t s_id, uint16_t ox_id, uint16_t rx_id)
{
ocs_node_t *node = io->node;
int32_t rc;
fc_ba_acc_payload_t *acc;
ocs_t *ocs;
ocs_assert(node, NULL);
ocs_assert(node->ocs, NULL);
ocs = node->ocs;
if (node->rnode.sport->fc_id == s_id) {
s_id = UINT32_MAX;
}
/* fill out generic fields */
io->ocs = ocs;
io->node = node;
io->cmd_tgt = TRUE;
/* fill out BLS Response-specific fields */
io->io_type = OCS_IO_TYPE_BLS_RESP;
io->display_name = "ba_acc";
io->hio_type = OCS_HW_BLS_ACC_SID;
io->init_task_tag = ox_id;
/* fill out iparam fields */
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.bls_sid.s_id = s_id;
io->iparam.bls_sid.ox_id = ox_id;
io->iparam.bls_sid.rx_id = rx_id;
acc = (void *)io->iparam.bls_sid.payload;
ocs_memset(io->iparam.bls_sid.payload, 0, sizeof(io->iparam.bls_sid.payload));
acc->ox_id = io->iparam.bls_sid.ox_id;
acc->rx_id = io->iparam.bls_sid.rx_id;
acc->high_seq_cnt = UINT16_MAX;
if ((rc = ocs_scsi_io_dispatch(io, ocs_bls_send_acc_cb))) {
ocs_log_err(ocs, "ocs_scsi_io_dispatch() failed: %d\n", rc);
ocs_scsi_io_free(io);
io = NULL;
}
return io;
}
/**
* @brief Handle the BLS accept completion.
*
* Description
* Upon completion of sending a BA_ACC, this callback is invoked by the HW.
*
* @param hio Pointer to the HW IO object.
* @param rnode Pointer to the HW remote node.
* @param length Length of the response payload, in bytes.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param app Callback private argument.
*
* @return Returns 0 on success; or a negative error value on failure.
*/
static int32_t
ocs_bls_send_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
{
ocs_io_t *io = app;
ocs_assert(io, -1);
ocs_scsi_io_free(io);
return 0;
}
/**
* @brief ELS abort callback.
*
* Description
* This callback is invoked by the HW when an ELS IO is aborted.
*
* @param hio Pointer to the HW IO object.
* @param rnode Pointer to the HW remote node.
* @param length Length of the response payload, in bytes.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param app Callback private argument.
*
* @return Returns 0 on success; or a negative error value on failure.
*/
static int32_t
ocs_els_abort_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
{
ocs_io_t *els;
ocs_io_t *abort_io = NULL; /* IO structure used to abort ELS */
ocs_t *ocs;
ocs_assert(app, -1);
abort_io = app;
els = abort_io->io_to_abort;
ocs_assert(els->node, -1);
ocs_assert(els->node->ocs, -1);
ocs = els->node->ocs;
if (status != 0) {
ocs_log_warn(ocs, "status x%x ext x%x\n", status, ext_status);
}
/* now free the abort IO */
ocs_io_free(ocs, abort_io);
/* send completion event to indicate abort process is complete
* Note: The ELS SM will already be receiving ELS_REQ_OK/FAIL/RJT/ABORTED
*/
ocs_els_post_event(els, OCS_EVT_ELS_ABORT_CMPL, NULL);
/* done with ELS IO to abort */
ocs_ref_put(&els->ref); /* ocs_ref_get(): ocs_els_abort_io() */
return 0;
}
/**
* @brief Abort an ELS IO.
*
* Description
* The ELS IO is aborted by making a HW abort IO request,
* optionally requesting that an ABTS is sent.
*
* \b Note: This function allocates a HW IO, and associates the HW IO
* with the ELS IO that it is aborting. It does not associate
* the HW IO with the node directly, like for ELS requests. The
* abort completion is propagated up to the node once the
* original WQE and the abort WQE are complete (the original WQE
* completion is not propagated up to node).
*
* @param els Pointer to the ELS IO.
* @param send_abts Boolean to indicate if hardware will automatically generate an ABTS.
*
* @return Returns pointer to Abort IO object, or NULL if error.
*/
static ocs_io_t *
ocs_els_abort_io(ocs_io_t *els, int send_abts)
{
ocs_t *ocs;
ocs_xport_t *xport;
int32_t rc;
ocs_io_t *abort_io = NULL;
ocs_assert(els, NULL);
ocs_assert(els->node, NULL);
ocs_assert(els->node->ocs, NULL);
ocs = els->node->ocs;
ocs_assert(ocs->xport, NULL);
xport = ocs->xport;
/* take a reference on IO being aborted */
if ((ocs_ref_get_unless_zero(&els->ref) == 0)) {
/* command no longer active */
ocs_log_debug(ocs, "els no longer active\n");
return NULL;
}
/* allocate IO structure to send abort */
abort_io = ocs_io_alloc(ocs);
if (abort_io == NULL) {
ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
} else {
ocs_assert(abort_io->hio == NULL, NULL);
/* set generic fields */
abort_io->ocs = ocs;
abort_io->node = els->node;
abort_io->cmd_ini = TRUE;
/* set type and ABORT-specific fields */
abort_io->io_type = OCS_IO_TYPE_ABORT;
abort_io->display_name = "abort_els";
abort_io->io_to_abort = els;
abort_io->send_abts = send_abts;
/* now dispatch IO */
if ((rc = ocs_scsi_io_dispatch_abort(abort_io, ocs_els_abort_cb))) {
ocs_log_err(ocs, "ocs_scsi_io_dispatch failed: %d\n", rc);
ocs_io_free(ocs, abort_io);
abort_io = NULL;
}
}
/* if something failed, put reference on ELS to abort */
if (abort_io == NULL) {
ocs_ref_put(&els->ref); /* ocs_ref_get(): same function */
}
return abort_io;
}
/*
* ELS IO State Machine
*/
#define std_els_state_decl(...) \
ocs_io_t *els = NULL; \
ocs_node_t *node = NULL; \
ocs_t *ocs = NULL; \
ocs_assert(ctx != NULL, NULL); \
els = ctx->app; \
ocs_assert(els != NULL, NULL); \
node = els->node; \
ocs_assert(node != NULL, NULL); \
ocs = node->ocs; \
ocs_assert(ocs != NULL, NULL);
#define els_sm_trace(...) \
do { \
if (OCS_LOG_ENABLE_ELS_TRACE(ocs)) \
ocs_log_info(ocs, "[%s] %-8s %-20s %-20s\n", node->display_name, els->display_name, \
__func__, ocs_sm_event_name(evt)); \
} while (0)
/**
* @brief Cleanup an ELS IO
*
* Description
* Cleans up an ELS IO by posting the requested event to the owning node object;
* invoking the callback, if one is provided; and then freeing the
* ELS IO object.
*
* @param els Pointer to the ELS IO.
* @param node_evt Node SM event to post.
* @param arg Node SM event argument.
*
* @return None.
*/
void
ocs_els_io_cleanup(ocs_io_t *els, ocs_sm_event_t node_evt, void *arg)
{
ocs_assert(els);
/* don't want further events that could come; e.g. abort requests
* from the node state machine; thus, disable state machine
*/
ocs_sm_disable(&els->els_sm);
ocs_node_post_event(els->node, node_evt, arg);
/* If this IO has a callback, invoke it */
if (els->els_callback) {
(*els->els_callback)(els->node, arg, els->els_callback_arg);
}
els->els_req_free = 1;
}
/**
* @brief Common event handler for the ELS IO state machine.
*
* Description
* Provide handler for events for which default actions are desired.
*
* @param funcname Name of the calling function (for logging).
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_els_state_decl();
switch(evt) {
case OCS_EVT_ENTER:
case OCS_EVT_REENTER:
case OCS_EVT_EXIT:
break;
/* If ELS_REQ_FAIL is not handled in state, then we'll terminate this ELS and
* pass the event to the node
*/
case OCS_EVT_SRRS_ELS_REQ_FAIL:
ocs_log_warn(els->node->ocs, "[%s] %-20s %-20s not handled - terminating ELS\n", node->display_name, funcname,
ocs_sm_event_name(evt));
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
break;
default:
ocs_log_warn(els->node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname,
ocs_sm_event_name(evt));
break;
}
return NULL;
}
/**
* @brief Initial ELS IO state
*
* Description
* This is the initial ELS IO state. Upon entry, the requested ELS/CT is submitted to
* the hardware.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc = 0;
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_ENTER: {
rc = ocs_els_send(els, els->els_req.size, els->els_timeout_sec, ocs_els_req_cb);
if (rc) {
ocs_node_cb_t cbdata;
cbdata.status = cbdata.ext_status = (~0);
cbdata.els = els;
ocs_log_err(ocs, "ocs_els_send failed: %d\n", rc);
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
} else {
ocs_io_transition(els, __ocs_els_wait_resp, NULL);
}
break;
}
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Wait for the ELS request to complete.
*
* Description
* This is the ELS IO state that waits for the submitted ELS event to complete.
* If an error completion event is received, the requested ELS is aborted.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_wait_resp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
ocs_io_t *io;
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_SRRS_ELS_REQ_OK: {
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_OK, arg);
break;
}
case OCS_EVT_SRRS_ELS_REQ_FAIL: {
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
break;
}
case OCS_EVT_ELS_REQ_TIMEOUT: {
els_io_printf(els, "Timed out, retry (%d tries remaining)\n",
els->els_retries_remaining-1);
ocs_io_transition(els, __ocs_els_retry, NULL);
break;
}
case OCS_EVT_SRRS_ELS_REQ_RJT: {
ocs_node_cb_t *cbdata = arg;
uint32_t reason_code = (cbdata->ext_status >> 16) & 0xff;
/* delay and retry if reason code is Logical Busy */
switch (reason_code) {
case FC_REASON_LOGICAL_BUSY:
els->node->els_req_cnt--;
els_io_printf(els, "LS_RJT Logical Busy response, delay and retry\n");
ocs_io_transition(els, __ocs_els_delay_retry, NULL);
break;
default:
ocs_els_io_cleanup(els, evt, arg);
break;
}
break;
}
case OCS_EVT_ABORT_ELS: {
/* request to abort this ELS without an ABTS */
els_io_printf(els, "ELS abort requested\n");
els->els_retries_remaining = 0; /* Set retries to zero, we are done */
io = ocs_els_abort_io(els, FALSE);
if (io == NULL) {
ocs_log_err(ocs, "ocs_els_send failed\n");
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
} else {
ocs_io_transition(els, __ocs_els_aborting, NULL);
}
break;
}
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Wait for the ELS IO abort request to complete, and retry the ELS.
*
* Description
* This state is entered when waiting for an abort of an ELS
* request to complete so the request can be retried.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
int32_t rc = 0;
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_ENTER: {
/* handle event for ABORT_XRI WQE
* once abort is complete, retry if retries left;
* don't need to wait for OCS_EVT_SRRS_ELS_REQ_* event because we got
* by receiving OCS_EVT_ELS_REQ_TIMEOUT
*/
ocs_node_cb_t node_cbdata;
node_cbdata.status = node_cbdata.ext_status = (~0);
node_cbdata.els = els;
if (els->els_retries_remaining && --els->els_retries_remaining) {
/* Use a different XRI for the retry (would like a new oxid),
* so free the HW IO (dispatch will allocate a new one). It's an
* optimization to only free the HW IO here and not the ocs_io_t;
* Freeing the ocs_io_t object would require copying all the necessary
* info from the old ocs_io_t object to the * new one; and allocating
* a new ocs_io_t could fail.
*/
ocs_assert(els->hio, NULL);
ocs_hw_io_free(&ocs->hw, els->hio);
els->hio = NULL;
/* result isn't propagated up to node sm, need to decrement req cnt */
ocs_assert(els->node->els_req_cnt, NULL);
els->node->els_req_cnt--;
rc = ocs_els_send(els, els->els_req.size, els->els_timeout_sec, ocs_els_req_cb);
if (rc) {
ocs_log_err(ocs, "ocs_els_send failed: %d\n", rc);
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &node_cbdata);
}
ocs_io_transition(els, __ocs_els_wait_resp, NULL);
} else {
els_io_printf(els, "Retries exhausted\n");
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &node_cbdata);
}
break;
}
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Wait for a retry timer to expire having received an abort request
*
* Description
* This state is entered when waiting for a timer event, after having received
* an abort request, to avoid a race condition with the timer handler
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_aborted_delay_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
/* mod/resched the timer for a short duration */
ocs_mod_timer(&els->delay_timer, 1);
break;
case OCS_EVT_TIMER_EXPIRED:
/* Cancel the timer, skip post node event, and free the io */
node->els_req_cnt++;
ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
break;
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Wait for a retry timer to expire
*
* Description
* This state is entered when waiting for a timer event, so that
* the ELS request can be retried.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_delay_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_ENTER:
ocs_setup_timer(ocs, &els->delay_timer, ocs_els_delay_timer_cb, els, 5000);
break;
case OCS_EVT_TIMER_EXPIRED:
/* Retry delay timer expired, retry the ELS request, Free the HW IO so
* that a new oxid is used.
*/
if (els->hio != NULL) {
ocs_hw_io_free(&ocs->hw, els->hio);
els->hio = NULL;
}
ocs_io_transition(els, __ocs_els_init, NULL);
break;
case OCS_EVT_ABORT_ELS:
ocs_io_transition(els, __ocs_els_aborted_delay_retry, NULL);
break;
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Wait for the ELS IO abort request to complete.
*
* Description
* This state is entered after we abort an ELS WQE and are
* waiting for either the original ELS WQE request or the abort
* to complete.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_aborting(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_SRRS_ELS_REQ_OK:
case OCS_EVT_SRRS_ELS_REQ_FAIL:
case OCS_EVT_SRRS_ELS_REQ_RJT:
case OCS_EVT_ELS_REQ_TIMEOUT:
case OCS_EVT_ELS_REQ_ABORTED: {
/* completion for ELS received first, transition to wait for abort cmpl */
els_io_printf(els, "request cmpl evt=%s\n", ocs_sm_event_name(evt));
ocs_io_transition(els, __ocs_els_aborting_wait_abort_cmpl, NULL);
break;
}
case OCS_EVT_ELS_ABORT_CMPL: {
/* completion for abort was received first, transition to wait for req cmpl */
els_io_printf(els, "abort cmpl evt=%s\n", ocs_sm_event_name(evt));
ocs_io_transition(els, __ocs_els_aborting_wait_req_cmpl, NULL);
break;
}
case OCS_EVT_ABORT_ELS:
/* nothing we can do but wait */
break;
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief cleanup ELS after abort
*
* @param els ELS IO to cleanup
*
* @return Returns None.
*/
static void
ocs_els_abort_cleanup(ocs_io_t *els)
{
/* handle event for ABORT_WQE
* whatever state ELS happened to be in, propagate aborted event up
* to node state machine in lieu of OCS_EVT_SRRS_ELS_* event
*/
ocs_node_cb_t cbdata;
cbdata.status = cbdata.ext_status = 0;
cbdata.els = els;
els_io_printf(els, "Request aborted\n");
ocs_els_io_cleanup(els, OCS_EVT_ELS_REQ_ABORTED, &cbdata);
}
/**
* @brief Wait for the ELS IO abort request to complete.
*
* Description
* This state is entered after we abort an ELS WQE, we received
* the abort completion first and are waiting for the original
* ELS WQE request to complete.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_aborting_wait_req_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_SRRS_ELS_REQ_OK:
case OCS_EVT_SRRS_ELS_REQ_FAIL:
case OCS_EVT_SRRS_ELS_REQ_RJT:
case OCS_EVT_ELS_REQ_TIMEOUT:
case OCS_EVT_ELS_REQ_ABORTED: {
/* completion for ELS that was aborted */
ocs_els_abort_cleanup(els);
break;
}
case OCS_EVT_ABORT_ELS:
/* nothing we can do but wait */
break;
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Wait for the ELS IO abort request to complete.
*
* Description
* This state is entered after we abort an ELS WQE, we received
* the original ELS WQE request completion first and are waiting
* for the abort to complete.
*
* @param ctx Remote node SM context.
* @param evt Event to process.
* @param arg Per event optional argument.
*
* @return Returns NULL.
*/
void *
__ocs_els_aborting_wait_abort_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
{
std_els_state_decl();
els_sm_trace();
switch(evt) {
case OCS_EVT_ELS_ABORT_CMPL: {
ocs_els_abort_cleanup(els);
break;
}
case OCS_EVT_ABORT_ELS:
/* nothing we can do but wait */
break;
default:
__ocs_els_common(__func__, ctx, evt, arg);
break;
}
return NULL;
}
/**
* @brief Generate ELS context ddump data.
*
* Description
* Generate the ddump data for an ELS context.
*
* @param textbuf Pointer to the text buffer.
* @param els Pointer to the ELS context.
*
* @return None.
*/
void
ocs_ddump_els(ocs_textbuf_t *textbuf, ocs_io_t *els)
{
ocs_ddump_section(textbuf, "els", -1);
ocs_ddump_value(textbuf, "req_free", "%d", els->els_req_free);
ocs_ddump_value(textbuf, "evtdepth", "%d", els->els_evtdepth);
ocs_ddump_value(textbuf, "pend", "%d", els->els_pend);
ocs_ddump_value(textbuf, "active", "%d", els->els_active);
ocs_ddump_io(textbuf, els);
ocs_ddump_endsection(textbuf, "els", -1);
}
/**
* @brief return TRUE if given ELS list is empty (while taking proper locks)
*
* Test if given ELS list is empty while holding the node->active_ios_lock.
*
* @param node pointer to node object
* @param list pointer to list
*
* @return TRUE if els_io_list is empty
*/
int32_t
ocs_els_io_list_empty(ocs_node_t *node, ocs_list_t *list)
{
int empty;
ocs_lock(&node->active_ios_lock);
empty = ocs_list_empty(list);
ocs_unlock(&node->active_ios_lock);
return empty;
}
/**
* @brief Handle CT send response completion
*
* Called when CT response completes, free IO
*
* @param hio Pointer to the HW IO context that completed.
* @param rnode Pointer to the remote node.
* @param length Length of the returned payload data.
* @param status Status of the completion.
* @param ext_status Extended status of the completion.
* @param arg Application-specific argument (generally a pointer to the ELS IO context).
*
* @return returns 0
*/
static int32_t
ocs_ct_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg)
{
ocs_io_t *io = arg;
ocs_els_io_free(io);
return 0;
}
/**
* @brief Send CT response
*
* Sends a CT response frame with payload
*
* @param io Pointer to the IO context.
* @param ox_id Originator exchange ID
* @param ct_hdr Pointer to the CT IU
* @param cmd_rsp_code CT response code
* @param reason_code Reason code
* @param reason_code_explanation Reason code explanation
*
* @return returns 0 for success, a negative error code value for failure.
*/
int32_t
ocs_send_ct_rsp(ocs_io_t *io, uint32_t ox_id, fcct_iu_header_t *ct_hdr, uint32_t cmd_rsp_code, uint32_t reason_code, uint32_t reason_code_explanation)
{
fcct_iu_header_t *rsp = io->els_rsp.virt;
io->io_type = OCS_IO_TYPE_CT_RESP;
*rsp = *ct_hdr;
fcct_build_req_header(rsp, cmd_rsp_code, 0);
rsp->reason_code = reason_code;
rsp->reason_code_explanation = reason_code_explanation;
io->display_name = "ct response";
io->init_task_tag = ox_id;
io->wire_len += sizeof(*rsp);
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->io_type = OCS_IO_TYPE_CT_RESP;
io->hio_type = OCS_HW_FC_CT_RSP;
io->iparam.fc_ct_rsp.ox_id = ocs_htobe16(ox_id);
io->iparam.fc_ct_rsp.r_ctl = 3;
io->iparam.fc_ct_rsp.type = FC_TYPE_GS;
io->iparam.fc_ct_rsp.df_ctl = 0;
io->iparam.fc_ct_rsp.timeout = 5;
if (ocs_scsi_io_dispatch(io, ocs_ct_acc_cb) < 0) {
ocs_els_io_free(io);
return -1;
}
return 0;
}
/**
* @brief Handle delay retry timeout
*
* Callback is invoked when the delay retry timer expires.
*
* @param arg pointer to the ELS IO object
*
* @return none
*/
static void
ocs_els_delay_timer_cb(void *arg)
{
ocs_io_t *els = arg;
ocs_node_t *node = els->node;
/*
* There is a potential deadlock here since is Linux executes timers
* in a soft IRQ context. The lock may be aready locked by the interrupt
* thread. Handle this case by attempting to take the node lock and reset the
* timer if we fail to acquire the lock.
*
* Note: This code relies on the fact that the node lock is recursive.
*/
if (ocs_node_lock_try(node)) {
ocs_els_post_event(els, OCS_EVT_TIMER_EXPIRED, NULL);
ocs_node_unlock(node);
} else {
ocs_setup_timer(els->ocs, &els->delay_timer, ocs_els_delay_timer_cb, els, 1);
}
}
diff --git a/sys/dev/ocs_fc/ocs_gendump.c b/sys/dev/ocs_fc/ocs_gendump.c
index 83155d90c3a3..6a1abfefadfc 100644
--- a/sys/dev/ocs_fc/ocs_gendump.c
+++ b/sys/dev/ocs_fc/ocs_gendump.c
@@ -1,388 +1,388 @@
/*
* Copyright (c) 2021 Broadcom. All rights reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "ocs.h"
#include "ocs_gendump.h"
/* Reset all the functions associated with a bus/dev */
static int
ocs_gen_dump_reset(uint8_t bus, uint8_t dev)
{
uint32_t index = 0;
ocs_t *ocs;
int rc = 0;
while ((ocs = ocs_get_instance(index++)) != NULL) {
uint8_t ocs_bus, ocs_dev, ocs_func;
ocs_domain_t *domain;
ocs_get_bus_dev_func(ocs, &ocs_bus, &ocs_dev, &ocs_func);
if (!(ocs_bus == bus && ocs_dev == dev))
continue;
if (ocs_hw_reset(&ocs->hw, OCS_HW_RESET_FUNCTION)) {
ocs_log_test(ocs, "failed to reset port\n");
rc = -1;
continue;
}
ocs_log_debug(ocs, "successfully reset port\n");
while ((domain = ocs_list_get_head(&ocs->domain_list)) != NULL) {
ocs_log_debug(ocs, "free domain %p\n", domain);
ocs_domain_force_free(domain);
}
/* now initialize hw so user can read the dump in */
if (ocs_hw_init(&ocs->hw)) {
ocs_log_err(ocs, "failed to initialize hw\n");
rc = -1;
} else {
ocs_log_debug(ocs, "successfully initialized hw\n");
}
}
return rc;
}
int
ocs_gen_dump(ocs_t *ocs)
{
uint32_t reset_required;
uint32_t dump_ready;
uint32_t ms_waited;
uint8_t bus, dev, func;
int rc = 0;
int index = 0, port_index = 0;
ocs_t *nxt_ocs;
uint8_t nxt_bus, nxt_dev, nxt_func;
uint8_t prev_port_state[OCS_MAX_HBA_PORTS] = {0,};
ocs_xport_stats_t link_status;
ocs_get_bus_dev_func(ocs, &bus, &dev, &func);
/* Drop link on all ports belongs to this HBA*/
while ((nxt_ocs = ocs_get_instance(index++)) != NULL) {
ocs_get_bus_dev_func(nxt_ocs, &nxt_bus, &nxt_dev, &nxt_func);
if (!(bus == nxt_bus && dev == nxt_dev))
continue;
if ((port_index >= OCS_MAX_HBA_PORTS))
continue;
/* Check current link status and save for future use */
if (ocs_xport_status(nxt_ocs->xport, OCS_XPORT_PORT_STATUS,
&link_status) == 0) {
if (link_status.value == OCS_XPORT_PORT_ONLINE) {
prev_port_state[port_index] = 1;
ocs_xport_control(nxt_ocs->xport,
OCS_XPORT_PORT_OFFLINE);
} else {
prev_port_state[port_index] = 0;
}
}
port_index++;
}
/* Wait until all ports have quiesced */
for (index = 0; (nxt_ocs = ocs_get_instance(index++)) != NULL; ) {
ms_waited = 0;
for (;;) {
ocs_xport_stats_t status;
ocs_xport_status(nxt_ocs->xport, OCS_XPORT_IS_QUIESCED,
&status);
if (status.value) {
ocs_log_debug(nxt_ocs, "port quiesced\n");
break;
}
ocs_msleep(10);
ms_waited += 10;
if (ms_waited > 60000) {
ocs_log_test(nxt_ocs,
"timed out waiting for port to quiesce\n");
break;
}
}
}
/* Initiate dump */
if (ocs_hw_raise_ue(&ocs->hw, 1) == OCS_HW_RTN_SUCCESS) {
/* Wait for dump to complete */
ocs_log_debug(ocs, "Dump requested, wait for completion.\n");
dump_ready = 0;
ms_waited = 0;
while ((!dump_ready) && (ms_waited < 30000)) {
ocs_hw_get(&ocs->hw, OCS_HW_DUMP_READY, &dump_ready);
ocs_udelay(10000);
ms_waited += 10;
}
if (!dump_ready) {
ocs_log_test(ocs, "Failed to see dump after 30 secs\n");
rc = -1;
} else {
- ocs_log_debug(ocs, "sucessfully generated dump\n");
+ ocs_log_debug(ocs, "successfully generated dump\n");
}
/* now reset port */
ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required);
ocs_log_debug(ocs, "reset required=%d\n", reset_required);
if (reset_required) {
if (ocs_gen_dump_reset(bus, dev) == 0) {
ocs_log_debug(ocs, "all devices reset\n");
} else {
ocs_log_test(ocs, "all devices NOT reset\n");
}
}
} else {
ocs_log_test(ocs, "dump request to hw failed\n");
rc = -1;
}
index = port_index = 0;
nxt_ocs = NULL;
/* Bring links on each HBA port to previous state*/
while ((nxt_ocs = ocs_get_instance(index++)) != NULL) {
ocs_get_bus_dev_func(nxt_ocs, &nxt_bus, &nxt_dev, &nxt_func);
if (port_index > OCS_MAX_HBA_PORTS) {
ocs_log_err(NULL, "port index(%d) out of boundary\n",
port_index);
rc = -1;
break;
}
if ((bus == nxt_bus) && (dev == nxt_dev) &&
prev_port_state[port_index++]) {
ocs_xport_control(nxt_ocs->xport, OCS_XPORT_PORT_ONLINE);
}
}
return rc;
}
int
ocs_fdb_dump(ocs_t *ocs)
{
uint32_t dump_ready;
uint32_t ms_waited;
int rc = 0;
#define FDB 2
/* Initiate dump */
if (ocs_hw_raise_ue(&ocs->hw, FDB) == OCS_HW_RTN_SUCCESS) {
/* Wait for dump to complete */
ocs_log_debug(ocs, "Dump requested, wait for completion.\n");
dump_ready = 0;
ms_waited = 0;
while ((!(dump_ready == FDB)) && (ms_waited < 10000)) {
ocs_hw_get(&ocs->hw, OCS_HW_DUMP_READY, &dump_ready);
ocs_udelay(10000);
ms_waited += 10;
}
if (!dump_ready) {
ocs_log_err(ocs, "Failed to see dump after 10 secs\n");
return -1;
}
- ocs_log_debug(ocs, "sucessfully generated dump\n");
+ ocs_log_debug(ocs, "successfully generated dump\n");
} else {
ocs_log_err(ocs, "dump request to hw failed\n");
rc = -1;
}
return rc;
}
/**
* @brief Create a Lancer dump into a memory buffer
* @par Description
* This function creates a DMA buffer to hold a Lancer dump,
* sets the dump location to point to that buffer, then calls
* ocs_gen_dump to cause a dump to be transferred to the buffer.
* After the dump is complete it copies the dump to the provided
* user space buffer.
*
* @param ocs Pointer to ocs structure
* @param buf User space buffer in which to store the dump
* @param buflen Length of the user buffer in bytes
*
* @return Returns 0 on success, non-zero on error.
*/
int
ocs_dump_to_host(ocs_t *ocs, void *buf, uint32_t buflen)
{
int rc;
uint32_t i, num_buffers;
ocs_dma_t *dump_buffers;
uint32_t rem_bytes, offset;
if (buflen == 0) {
ocs_log_test(ocs, "zero buffer length is invalid\n");
return -1;
}
num_buffers = ((buflen + OCS_MAX_DMA_ALLOC - 1) / OCS_MAX_DMA_ALLOC);
dump_buffers = ocs_malloc(ocs, sizeof(ocs_dma_t) * num_buffers,
OCS_M_ZERO | OCS_M_NOWAIT);
if (dump_buffers == NULL) {
ocs_log_err(ocs, "Failed to dump buffers\n");
return -1;
}
/* Allocate a DMA buffers to hold the dump */
rem_bytes = buflen;
for (i = 0; i < num_buffers; i++) {
uint32_t num_bytes = MIN(rem_bytes, OCS_MAX_DMA_ALLOC);
rc = ocs_dma_alloc(ocs, &dump_buffers[i], num_bytes,
OCS_MIN_DMA_ALIGNMENT);
if (rc) {
ocs_log_err(ocs, "Failed to allocate dump buffer\n");
/* Free any previously allocated buffers */
goto free_and_return;
}
rem_bytes -= num_bytes;
}
rc = ocs_hw_set_dump_location(&ocs->hw, num_buffers, dump_buffers, 0);
if (rc) {
ocs_log_test(ocs, "ocs_hw_set_dump_location failed\n");
goto free_and_return;
}
/* Generate the dump */
rc = ocs_gen_dump(ocs);
if (rc) {
ocs_log_test(ocs, "ocs_gen_dump failed\n");
goto free_and_return;
}
/* Copy the dump from the DMA buffer into the user buffer */
offset = 0;
for (i = 0; i < num_buffers; i++) {
if (ocs_copy_to_user((uint8_t*)buf + offset,
dump_buffers[i].virt, dump_buffers[i].size)) {
ocs_log_test(ocs, "ocs_copy_to_user failed\n");
rc = -1;
}
offset += dump_buffers[i].size;
}
free_and_return:
/* Free the DMA buffer and return */
for (i = 0; i < num_buffers; i++) {
ocs_dma_free(ocs, &dump_buffers[i]);
}
ocs_free(ocs, dump_buffers, sizeof(ocs_dma_t) * num_buffers);
return rc;
}
int
ocs_function_speciic_dump(ocs_t *ocs, void *buf, uint32_t buflen)
{
int rc;
uint32_t i, num_buffers;
ocs_dma_t *dump_buffers;
uint32_t rem_bytes, offset;
if (buflen == 0) {
ocs_log_err(ocs, "zero buffer length is invalid\n");
return -1;
}
num_buffers = ((buflen + OCS_MAX_DMA_ALLOC - 1) / OCS_MAX_DMA_ALLOC);
dump_buffers = ocs_malloc(ocs, sizeof(ocs_dma_t) * num_buffers,
OCS_M_ZERO | OCS_M_NOWAIT);
if (dump_buffers == NULL) {
ocs_log_err(ocs, "Failed to allocate dump buffers\n");
return -1;
}
/* Allocate a DMA buffers to hold the dump */
rem_bytes = buflen;
for (i = 0; i < num_buffers; i++) {
uint32_t num_bytes = MIN(rem_bytes, OCS_MAX_DMA_ALLOC);
rc = ocs_dma_alloc(ocs, &dump_buffers[i], num_bytes,
OCS_MIN_DMA_ALIGNMENT);
if (rc) {
ocs_log_err(ocs, "Failed to allocate dma buffer\n");
/* Free any previously allocated buffers */
goto free_and_return;
}
rem_bytes -= num_bytes;
}
/* register buffers for function spcific dump */
rc = ocs_hw_set_dump_location(&ocs->hw, num_buffers, dump_buffers, 1);
if (rc) {
ocs_log_err(ocs, "ocs_hw_set_dump_location failed\n");
goto free_and_return;
}
/* Invoke dump by setting fdd=1 and ip=1 in sliport_control register */
rc = ocs_fdb_dump(ocs);
if (rc) {
ocs_log_err(ocs, "ocs_gen_dump failed\n");
goto free_and_return;
}
/* Copy the dump from the DMA buffer into the user buffer */
offset = 0;
for (i = 0; i < num_buffers; i++) {
if (ocs_copy_to_user((uint8_t*)buf + offset,
dump_buffers[i].virt, dump_buffers[i].size)) {
ocs_log_err(ocs, "ocs_copy_to_user failed\n");
rc = -1;
}
offset += dump_buffers[i].size;
}
free_and_return:
/* Free the DMA buffer and return */
for (i = 0; i < num_buffers; i++) {
ocs_dma_free(ocs, &dump_buffers[i]);
}
ocs_free(ocs, dump_buffers, sizeof(ocs_dma_t) * num_buffers);
return rc;
}
diff --git a/sys/dev/ocs_fc/ocs_ioctl.c b/sys/dev/ocs_fc/ocs_ioctl.c
index 71ba17d5f72a..d3cea434b2be 100644
--- a/sys/dev/ocs_fc/ocs_ioctl.c
+++ b/sys/dev/ocs_fc/ocs_ioctl.c
@@ -1,1240 +1,1240 @@
/*-
* Copyright (c) 2017 Broadcom. All rights reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "ocs.h"
#include "ocs_utils.h"
#include
#include
#include
#include
#include
#include
#include
static d_open_t ocs_open;
static d_close_t ocs_close;
static d_ioctl_t ocs_ioctl;
static struct cdevsw ocs_cdevsw = {
.d_version = D_VERSION,
.d_open = ocs_open,
.d_close = ocs_close,
.d_ioctl = ocs_ioctl,
.d_name = "ocs_fc"
};
int
ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len, uint8_t *change_status);
static int
ocs_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
{
return 0;
}
static int
ocs_close(struct cdev *cdev, int flag, int fmt, struct thread *td)
{
return 0;
}
static int32_t
__ocs_ioctl_mbox_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
{
struct ocs_softc *ocs = arg;
/* wait for the ioctl to sleep before calling wakeup */
mtx_lock(&ocs->dbg_lock);
mtx_unlock(&ocs->dbg_lock);
wakeup(arg);
return 0;
}
static int
ocs_process_sli_config (ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd, ocs_dma_t *dma)
{
sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)mcmd->payload;
int error;
if (sli_config->emb) {
sli4_req_hdr_t *req = (sli4_req_hdr_t *)sli_config->payload.embed;
switch (req->opcode) {
case SLI4_OPC_COMMON_READ_OBJECT:
if (mcmd->out_bytes) {
sli4_req_common_read_object_t *rdobj =
(sli4_req_common_read_object_t *)sli_config->payload.embed;
if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) {
device_printf(ocs->dev, "%s: COMMON_READ_OBJECT - %lld allocation failed\n",
__func__, (unsigned long long)mcmd->out_bytes);
return ENXIO;
}
memset(dma->virt, 0, mcmd->out_bytes);
rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64;
rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes;
rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
}
break;
case SLI4_OPC_COMMON_WRITE_OBJECT:
{
sli4_req_common_write_object_t *wrobj =
(sli4_req_common_write_object_t *)sli_config->payload.embed;
if (ocs_dma_alloc(ocs, dma, wrobj->desired_write_length, 4096)) {
device_printf(ocs->dev, "%s: COMMON_WRITE_OBJECT - %d allocation failed\n",
__func__, wrobj->desired_write_length);
return ENXIO;
}
/* setup the descriptor */
wrobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64;
wrobj->host_buffer_descriptor[0].buffer_length = wrobj->desired_write_length;
wrobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
wrobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
/* copy the data into the DMA buffer */
error = copyin((void *)(uintptr_t)mcmd->in_addr, dma->virt, mcmd->in_bytes);
if (error != 0) {
device_printf(ocs->dev, "%s: COMMON_WRITE_OBJECT - copyin failed: %d\n",
__func__, error);
ocs_dma_free(ocs, dma);
return error;
}
}
break;
case SLI4_OPC_COMMON_DELETE_OBJECT:
break;
case SLI4_OPC_COMMON_READ_OBJECT_LIST:
if (mcmd->out_bytes) {
sli4_req_common_read_object_list_t *rdobj =
(sli4_req_common_read_object_list_t *)sli_config->payload.embed;
if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) {
device_printf(ocs->dev, "%s: COMMON_READ_OBJECT_LIST - %lld allocation failed\n",
__func__,(unsigned long long) mcmd->out_bytes);
return ENXIO;
}
memset(dma->virt, 0, mcmd->out_bytes);
rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64;
rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes;
rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
}
break;
case SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA:
break;
default:
device_printf(ocs->dev, "%s: in=%p (%lld) out=%p (%lld)\n", __func__,
(void *)(uintptr_t)mcmd->in_addr, (unsigned long long)mcmd->in_bytes,
(void *)(uintptr_t)mcmd->out_addr, (unsigned long long)mcmd->out_bytes);
device_printf(ocs->dev, "%s: unknown (opc=%#x)\n", __func__,
req->opcode);
hexdump(mcmd, mcmd->size, NULL, 0);
break;
}
} else {
uint32_t max_bytes = max(mcmd->in_bytes, mcmd->out_bytes);
if (ocs_dma_alloc(ocs, dma, max_bytes, 4096)) {
device_printf(ocs->dev, "%s: non-embedded - %u allocation failed\n",
__func__, max_bytes);
return ENXIO;
}
error = copyin((void *)(uintptr_t)mcmd->in_addr, dma->virt, mcmd->in_bytes);
if (error != 0) {
device_printf(ocs->dev, "%s: non-embedded - copyin failed: %d\n",
__func__, error);
ocs_dma_free(ocs, dma);
return error;
}
sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys);
sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys);
sli_config->payload.mem.length = max_bytes;
}
return 0;
}
static int
ocs_process_mbx_ioctl(ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd)
{
ocs_dma_t dma = { 0 };
int error;
error = 0;
if ((ELXU_BSD_MAGIC != mcmd->magic) ||
(sizeof(ocs_ioctl_elxu_mbox_t) != mcmd->size)) {
device_printf(ocs->dev, "%s: malformed command m=%08x s=%08x\n",
__func__, mcmd->magic, mcmd->size);
return EINVAL;
}
switch(((sli4_mbox_command_header_t *)mcmd->payload)->command) {
case SLI4_MBOX_COMMAND_SLI_CONFIG:
if (ENXIO == ocs_process_sli_config(ocs, mcmd, &dma))
return ENXIO;
break;
case SLI4_MBOX_COMMAND_READ_REV:
case SLI4_MBOX_COMMAND_READ_STATUS:
case SLI4_MBOX_COMMAND_READ_LNK_STAT:
break;
default:
device_printf(ocs->dev, "command %d\n",((sli4_mbox_command_header_t *)mcmd->payload)->command);
device_printf(ocs->dev, "%s, command not support\n", __func__);
goto no_support;
break;
}
/*
* The dbg_lock usage here insures the command completion code
* (__ocs_ioctl_mbox_cb), which calls wakeup(), does not run until
* after first calling msleep()
*
* 1. ioctl grabs dbg_lock
* 2. ioctl issues command
* if the command completes before msleep(), the
* command completion code (__ocs_ioctl_mbox_cb) will spin
* on dbg_lock before calling wakeup()
* 3. ioctl calls msleep which releases dbg_lock before sleeping
* and reacquires it before waking
* 4. command completion handler acquires the dbg_lock, immediately
* releases it, and calls wakeup
* 5. msleep returns, re-acquiring the lock
* 6. ioctl code releases the lock
*/
mtx_lock(&ocs->dbg_lock);
if (ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT,
__ocs_ioctl_mbox_cb, ocs)) {
device_printf(ocs->dev, "%s: command- %x failed\n", __func__,
((sli4_mbox_command_header_t *)mcmd->payload)->command);
}
msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0);
mtx_unlock(&ocs->dbg_lock);
if( SLI4_MBOX_COMMAND_SLI_CONFIG == ((sli4_mbox_command_header_t *)mcmd->payload)->command
&& mcmd->out_bytes && dma.virt) {
error = copyout(dma.virt, (void *)(uintptr_t)mcmd->out_addr, mcmd->out_bytes);
}
no_support:
ocs_dma_free(ocs, &dma);
return error;
}
/**
* @brief perform requested Elx CoreDump helper function
*
* The Elx CoreDump facility used for BE3 diagnostics uses the OCS_IOCTL_CMD_ECD_HELPER
* ioctl function to execute requested "help" functions
*
* @param ocs pointer to ocs structure
* @param req pointer to helper function request
*
* @return returns 0 for success, a negative error code value for failure.
*/
static int
ocs_process_ecd_helper (ocs_t *ocs, ocs_ioctl_ecd_helper_t *req)
{
int32_t rc = 0;
uint8_t v8;
uint16_t v16;
uint32_t v32;
/* Check the BAR read/write commands for valid bar */
switch(req->cmd) {
case OCS_ECD_HELPER_BAR_READ8:
case OCS_ECD_HELPER_BAR_READ16:
case OCS_ECD_HELPER_BAR_READ32:
case OCS_ECD_HELPER_BAR_WRITE8:
case OCS_ECD_HELPER_BAR_WRITE16:
case OCS_ECD_HELPER_BAR_WRITE32:
if (req->bar >= PCI_MAX_BAR) {
device_printf(ocs->dev, "Error: bar %d out of range\n", req->bar);
return -EFAULT;
}
if (ocs->reg[req->bar].res == NULL) {
device_printf(ocs->dev, "Error: bar %d not defined\n", req->bar);
return -EFAULT;
}
break;
default:
break;
}
switch(req->cmd) {
case OCS_ECD_HELPER_CFG_READ8:
v8 = ocs_config_read8(ocs, req->offset);
req->data = v8;
break;
case OCS_ECD_HELPER_CFG_READ16:
v16 = ocs_config_read16(ocs, req->offset);
req->data = v16;
break;
case OCS_ECD_HELPER_CFG_READ32:
v32 = ocs_config_read32(ocs, req->offset);
req->data = v32;
break;
case OCS_ECD_HELPER_CFG_WRITE8:
ocs_config_write8(ocs, req->offset, req->data);
break;
case OCS_ECD_HELPER_CFG_WRITE16:
ocs_config_write16(ocs, req->offset, req->data);
break;
case OCS_ECD_HELPER_CFG_WRITE32:
ocs_config_write32(ocs, req->offset, req->data);
break;
case OCS_ECD_HELPER_BAR_READ8:
req->data = ocs_reg_read8(ocs, req->bar, req->offset);
break;
case OCS_ECD_HELPER_BAR_READ16:
req->data = ocs_reg_read16(ocs, req->bar, req->offset);
break;
case OCS_ECD_HELPER_BAR_READ32:
req->data = ocs_reg_read32(ocs, req->bar, req->offset);
break;
case OCS_ECD_HELPER_BAR_WRITE8:
ocs_reg_write8(ocs, req->bar, req->offset, req->data);
break;
case OCS_ECD_HELPER_BAR_WRITE16:
ocs_reg_write16(ocs, req->bar, req->offset, req->data);
break;
case OCS_ECD_HELPER_BAR_WRITE32:
ocs_reg_write32(ocs, req->bar, req->offset, req->data);
break;
default:
device_printf(ocs->dev, "Invalid helper command=%d\n", req->cmd);
break;
}
return rc;
}
static int
ocs_ioctl(struct cdev *cdev, u_long cmd, caddr_t addr, int flag, struct thread *td)
{
int status = 0;
struct ocs_softc *ocs = cdev->si_drv1;
device_t dev = ocs->dev;
switch (cmd) {
case OCS_IOCTL_CMD_ELXU_MBOX: {
/* "copyin" done by kernel; thus, just dereference addr */
ocs_ioctl_elxu_mbox_t *mcmd = (void *)addr;
status = ocs_process_mbx_ioctl(ocs, mcmd);
break;
}
case OCS_IOCTL_CMD_ECD_HELPER: {
/* "copyin" done by kernel; thus, just dereference addr */
ocs_ioctl_ecd_helper_t *req = (void *)addr;
status = ocs_process_ecd_helper(ocs, req);
break;
}
case OCS_IOCTL_CMD_VPORT: {
int32_t rc = 0;
ocs_ioctl_vport_t *req = (ocs_ioctl_vport_t*) addr;
ocs_domain_t *domain;
domain = ocs_domain_get_instance(ocs, req->domain_index);
if (domain == NULL) {
device_printf(ocs->dev, "domain [%d] nod found\n",
req->domain_index);
return -EFAULT;
}
if (req->req_create) {
rc = ocs_sport_vport_new(domain, req->wwpn, req->wwnn,
UINT32_MAX, req->enable_ini,
req->enable_tgt, NULL, NULL, TRUE);
} else {
rc = ocs_sport_vport_del(ocs, domain, req->wwpn, req->wwnn);
}
return rc;
}
case OCS_IOCTL_CMD_GET_DDUMP: {
ocs_ioctl_ddump_t *req = (ocs_ioctl_ddump_t*) addr;
ocs_textbuf_t textbuf;
int x;
/* Build a text buffer */
if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
device_printf(ocs->dev, "Error: ocs_textbuf_alloc failed\n");
return -EFAULT;
}
switch (req->args.action) {
case OCS_IOCTL_DDUMP_GET:
case OCS_IOCTL_DDUMP_GET_SAVED: {
uint32_t remaining;
uint32_t written;
uint32_t idx;
int32_t n;
ocs_textbuf_t *ptbuf = NULL;
uint32_t flags = 0;
if (req->args.action == OCS_IOCTL_DDUMP_GET_SAVED) {
if (ocs_textbuf_initialized(&ocs->ddump_saved)) {
ptbuf = &ocs->ddump_saved;
}
} else {
if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
return -EFAULT;
}
/* translate IOCTL ddump flags to ddump flags */
if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_WQES) {
flags |= OCS_DDUMP_FLAGS_WQES;
}
if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_CQES) {
flags |= OCS_DDUMP_FLAGS_CQES;
}
if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_MQES) {
flags |= OCS_DDUMP_FLAGS_MQES;
}
if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_RQES) {
flags |= OCS_DDUMP_FLAGS_RQES;
}
if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_EQES) {
flags |= OCS_DDUMP_FLAGS_EQES;
}
/* Try 3 times to get the dump */
for(x=0; x<3; x++) {
if (ocs_ddump(ocs, &textbuf, flags, req->args.q_entries) != 0) {
ocs_textbuf_reset(&textbuf);
} else {
/* Success */
x = 0;
break;
}
}
if (x != 0 ) {
/* Retries failed */
ocs_log_test(ocs, "ocs_ddump failed\n");
} else {
ptbuf = &textbuf;
}
}
written = 0;
if (ptbuf != NULL) {
/* Process each textbuf segment */
remaining = req->user_buffer_len;
for (idx = 0; remaining; idx++) {
n = ocs_textbuf_ext_get_written(ptbuf, idx);
if (n < 0) {
break;
}
if ((uint32_t)n >= remaining) {
n = (int32_t)remaining;
}
if (ocs_copy_to_user(req->user_buffer + written,
ocs_textbuf_ext_get_buffer(ptbuf, idx), n)) {
ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__);
}
written += n;
remaining -= (uint32_t)n;
}
}
req->bytes_written = written;
if (ptbuf == &textbuf) {
ocs_textbuf_free(ocs, &textbuf);
}
break;
}
case OCS_IOCTL_DDUMP_CLR_SAVED:
ocs_clear_saved_ddump(ocs);
break;
default:
ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
break;
}
break;
}
case OCS_IOCTL_CMD_DRIVER_INFO: {
ocs_ioctl_driver_info_t *req = (ocs_ioctl_driver_info_t*)addr;
ocs_memset(req, 0, sizeof(*req));
req->pci_vendor = ocs->pci_vendor;
req->pci_device = ocs->pci_device;
ocs_strncpy(req->businfo, ocs->businfo, sizeof(req->businfo));
req->sli_intf = ocs_config_read32(ocs, SLI4_INTF_REG);
ocs_strncpy(req->desc, device_get_desc(dev), sizeof(req->desc));
ocs_strncpy(req->fw_rev, ocs->fwrev, sizeof(req->fw_rev));
if (ocs->domain && ocs->domain->sport) {
*((uint64_t*)req->hw_addr.fc.wwnn) = ocs_htobe64(ocs->domain->sport->wwnn);
*((uint64_t*)req->hw_addr.fc.wwpn) = ocs_htobe64(ocs->domain->sport->wwpn);
}
ocs_strncpy(req->serialnum, ocs->serialnum, sizeof(req->serialnum));
break;
}
case OCS_IOCTL_CMD_MGMT_LIST: {
ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr;
ocs_textbuf_t textbuf;
/* Build a text buffer */
if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
return -EFAULT;
}
ocs_mgmt_get_list(ocs, &textbuf);
if (ocs_textbuf_get_written(&textbuf)) {
if (ocs_copy_to_user(req->user_buffer,
ocs_textbuf_get_buffer(&textbuf),
ocs_textbuf_get_written(&textbuf))) {
ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__);
}
}
req->bytes_written = ocs_textbuf_get_written(&textbuf);
ocs_textbuf_free(ocs, &textbuf);
break;
}
case OCS_IOCTL_CMD_MGMT_GET_ALL: {
ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr;
ocs_textbuf_t textbuf;
int32_t n;
uint32_t idx;
uint32_t copied = 0;
/* Build a text buffer */
if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
return -EFAULT;
}
ocs_mgmt_get_all(ocs, &textbuf);
for (idx = 0; (n = ocs_textbuf_ext_get_written(&textbuf, idx)) > 0; idx++) {
if(ocs_copy_to_user(req->user_buffer + copied,
ocs_textbuf_ext_get_buffer(&textbuf, idx),
ocs_textbuf_ext_get_written(&textbuf, idx))) {
ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
}
copied += n;
}
req->bytes_written = copied;
ocs_textbuf_free(ocs, &textbuf);
break;
}
case OCS_IOCTL_CMD_MGMT_GET: {
ocs_ioctl_cmd_get_t* req = (ocs_ioctl_cmd_get_t*)addr;
ocs_textbuf_t textbuf;
char name[OCS_MGMT_MAX_NAME];
/* Copy the name value in from user space */
if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) {
ocs_log_test(ocs, "ocs_copy_from_user failed\n");
ocs_ioctl_free(ocs, req, sizeof(ocs_ioctl_cmd_get_t));
return -EFAULT;
}
/* Build a text buffer */
if (ocs_textbuf_alloc(ocs, &textbuf, req->value_length)) {
ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
return -EFAULT;
}
ocs_mgmt_get(ocs, name, &textbuf);
if (ocs_textbuf_get_written(&textbuf)) {
if (ocs_copy_to_user(req->value,
ocs_textbuf_get_buffer(&textbuf),
ocs_textbuf_get_written(&textbuf))) {
ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__);
}
}
req->value_length = ocs_textbuf_get_written(&textbuf);
ocs_textbuf_free(ocs, &textbuf);
break;
}
case OCS_IOCTL_CMD_MGMT_SET: {
char name[OCS_MGMT_MAX_NAME];
char value[OCS_MGMT_MAX_VALUE];
ocs_ioctl_cmd_set_t* req = (ocs_ioctl_cmd_set_t*)addr;
// Copy the name in from user space
if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) {
ocs_log_test(ocs, "Error: copy from user failed\n");
ocs_ioctl_free(ocs, req, sizeof(*req));
return -EFAULT;
}
// Copy the value in from user space
if (ocs_copy_from_user(value, req->value, OCS_MGMT_MAX_VALUE)) {
ocs_log_test(ocs, "Error: copy from user failed\n");
ocs_ioctl_free(ocs, req, sizeof(*req));
return -EFAULT;
}
req->result = ocs_mgmt_set(ocs, name, value);
break;
}
case OCS_IOCTL_CMD_MGMT_EXEC: {
ocs_ioctl_action_t* req = (ocs_ioctl_action_t*) addr;
char action_name[OCS_MGMT_MAX_NAME];
if (ocs_copy_from_user(action_name, req->name, sizeof(action_name))) {
ocs_log_test(ocs, "Error: copy req.name from user failed\n");
ocs_ioctl_free(ocs, req, sizeof(*req));
return -EFAULT;
}
req->result = ocs_mgmt_exec(ocs, action_name, req->arg_in, req->arg_in_length,
req->arg_out, req->arg_out_length);
break;
}
default:
ocs_log_test(ocs, "Error: unknown cmd %#lx\n", cmd);
status = -ENOTTY;
break;
}
return status;
}
static void
ocs_fw_write_cb(int32_t status, uint32_t actual_write_length,
uint32_t change_status, void *arg)
{
ocs_mgmt_fw_write_result_t *result = arg;
result->status = status;
result->actual_xfer = actual_write_length;
result->change_status = change_status;
ocs_sem_v(&(result->semaphore));
}
int
ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len,
uint8_t *change_status)
{
int rc = 0;
uint32_t bytes_left;
uint32_t xfer_size;
uint32_t offset;
ocs_dma_t dma;
int last = 0;
ocs_mgmt_fw_write_result_t result;
ocs_sem_init(&(result.semaphore), 0, "fw_write");
bytes_left = buf_len;
offset = 0;
if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) {
ocs_log_err(ocs, "ocs_firmware_write: malloc failed\n");
return -ENOMEM;
}
while (bytes_left > 0) {
if (bytes_left > FW_WRITE_BUFSIZE) {
xfer_size = FW_WRITE_BUFSIZE;
} else {
xfer_size = bytes_left;
}
ocs_memcpy(dma.virt, buf + offset, xfer_size);
if (bytes_left == xfer_size) {
last = 1;
}
ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset,
last, ocs_fw_write_cb, &result);
if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
rc = -ENXIO;
break;
}
if (result.actual_xfer == 0 || result.status != 0) {
rc = -EFAULT;
break;
}
if (last) {
*change_status = result.change_status;
}
bytes_left -= result.actual_xfer;
offset += result.actual_xfer;
}
ocs_dma_free(ocs, &dma);
return rc;
}
static int
ocs_sys_fwupgrade(SYSCTL_HANDLER_ARGS)
{
char file_name[256] = {0};
char fw_change_status;
uint32_t rc = 1;
ocs_t *ocs = (ocs_t *)arg1;
const struct firmware *fw;
const struct ocs_hw_grp_hdr *fw_image;
rc = sysctl_handle_string(oidp, file_name, sizeof(file_name), req);
if (rc || !req->newptr)
return rc;
fw = firmware_get(file_name);
if (fw == NULL) {
device_printf(ocs->dev, "Unable to get Firmware. "
"Make sure %s is copied to /boot/modules\n", file_name);
return ENOENT;
}
fw_image = (const struct ocs_hw_grp_hdr *)fw->data;
/* Check if firmware provided is compatible with this particular
* Adapter of not*/
if ((ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G5) &&
(ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G6)) {
device_printf(ocs->dev,
"Invalid FW image found Magic: 0x%x Size: %zu \n",
ocs_be32toh(fw_image->magic_number), fw->datasize);
rc = -1;
goto exit;
}
if (!strncmp(ocs->fw_version, fw_image->revision,
strnlen(fw_image->revision, 16))) {
device_printf(ocs->dev, "No update req. "
"Firmware is already up to date. \n");
rc = 0;
goto exit;
}
device_printf(ocs->dev, "Upgrading Firmware from %s to %s \n",
ocs->fw_version, fw_image->revision);
rc = ocs_firmware_write(ocs, fw->data, fw->datasize, &fw_change_status);
if (rc) {
ocs_log_err(ocs, "Firmware update failed with status = %d\n", rc);
} else {
ocs_log_info(ocs, "Firmware updated successfully\n");
switch (fw_change_status) {
case 0x00:
device_printf(ocs->dev,
"No reset needed, new firmware is active.\n");
break;
case 0x01:
device_printf(ocs->dev,
"A physical device reset (host reboot) is "
"needed to activate the new firmware\n");
break;
case 0x02:
case 0x03:
device_printf(ocs->dev,
"firmware is resetting to activate the new "
"firmware, Host reboot is needed \n");
break;
default:
ocs_log_warn(ocs,
- "Unexected value change_status: %d\n",
+ "Unexpected value change_status: %d\n",
fw_change_status);
break;
}
}
exit:
/* Release Firmware*/
firmware_put(fw, FIRMWARE_UNLOAD);
return rc;
}
static int
ocs_sysctl_wwnn(SYSCTL_HANDLER_ARGS)
{
uint32_t rc = 1;
ocs_t *ocs = oidp->oid_arg1;
char old[64];
char new[64];
uint64_t *wwnn = NULL;
ocs_xport_t *xport = ocs->xport;
if (xport->req_wwnn) {
wwnn = &xport->req_wwnn;
memset(old, 0, sizeof(old));
snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) *wwnn);
} else {
wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE);
memset(old, 0, sizeof(old));
snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) ocs_htobe64(*wwnn));
}
/*Read wwnn*/
if (!req->newptr) {
return (sysctl_handle_string(oidp, old, sizeof(old), req));
}
/*Configure port wwn*/
rc = sysctl_handle_string(oidp, new, sizeof(new), req);
if (rc)
return (rc);
if (strncmp(old, new, strlen(old)) == 0) {
return 0;
}
return (set_req_wwnn(ocs, NULL, new));
}
static int
ocs_sysctl_wwpn(SYSCTL_HANDLER_ARGS)
{
uint32_t rc = 1;
ocs_t *ocs = oidp->oid_arg1;
char old[64];
char new[64];
uint64_t *wwpn = NULL;
ocs_xport_t *xport = ocs->xport;
if (xport->req_wwpn) {
wwpn = &xport->req_wwpn;
memset(old, 0, sizeof(old));
snprintf(old, sizeof(old), "0x%llx",(unsigned long long) *wwpn);
} else {
wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT);
memset(old, 0, sizeof(old));
snprintf(old, sizeof(old), "0x%llx",(unsigned long long) ocs_htobe64(*wwpn));
}
/*Read wwpn*/
if (!req->newptr) {
return (sysctl_handle_string(oidp, old, sizeof(old), req));
}
/*Configure port wwn*/
rc = sysctl_handle_string(oidp, new, sizeof(new), req);
if (rc)
return (rc);
if (strncmp(old, new, strlen(old)) == 0) {
return 0;
}
return (set_req_wwpn(ocs, NULL, new));
}
static int
ocs_sysctl_current_topology(SYSCTL_HANDLER_ARGS)
{
ocs_t *ocs = oidp->oid_arg1;
uint32_t value;
ocs_hw_get(&ocs->hw, OCS_HW_TOPOLOGY, &value);
return (sysctl_handle_int(oidp, &value, 0, req));
}
static int
ocs_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
{
ocs_t *ocs = oidp->oid_arg1;
uint32_t value;
ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &value);
return (sysctl_handle_int(oidp, &value, 0, req));
}
static int
ocs_sysctl_config_topology(SYSCTL_HANDLER_ARGS)
{
uint32_t rc = 1;
ocs_t *ocs = oidp->oid_arg1;
uint32_t old_value;
uint32_t new_value;
char buf[64];
ocs_hw_get(&ocs->hw, OCS_HW_CONFIG_TOPOLOGY, &old_value);
/*Read topo*/
if (!req->newptr) {
return (sysctl_handle_int(oidp, &old_value, 0, req));
}
/*Configure port wwn*/
rc = sysctl_handle_int(oidp, &new_value, 0, req);
if (rc)
return (rc);
if (new_value == old_value) {
return 0;
}
snprintf(buf, sizeof(buf), "%d",new_value);
rc = set_configured_topology(ocs, NULL, buf);
return rc;
}
static int
ocs_sysctl_config_speed(SYSCTL_HANDLER_ARGS)
{
uint32_t rc = 1;
ocs_t *ocs = oidp->oid_arg1;
uint32_t old_value;
uint32_t new_value;
char buf[64];
ocs_hw_get(&ocs->hw, OCS_HW_LINK_CONFIG_SPEED, &old_value);
/*Read topo*/
if (!req->newptr) {
return (sysctl_handle_int(oidp, &old_value, 0, req));
}
/*Configure port wwn*/
rc = sysctl_handle_int(oidp, &new_value, 0, req);
if (rc)
return (rc);
if (new_value == old_value) {
return 0;
}
snprintf(buf, sizeof(buf), "%d",new_value);
rc = set_configured_speed(ocs, NULL,buf);
return rc;
}
static int
ocs_sysctl_fcid(SYSCTL_HANDLER_ARGS)
{
ocs_t *ocs = oidp->oid_arg1;
char buf[64];
memset(buf, 0, sizeof(buf));
if (ocs->domain && ocs->domain->attached) {
snprintf(buf, sizeof(buf), "0x%06x",
ocs->domain->sport->fc_id);
}
return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
}
static int
ocs_sysctl_port_state(SYSCTL_HANDLER_ARGS)
{
char new[256] = {0};
uint32_t rc = 1;
ocs_xport_stats_t old;
ocs_t *ocs = (ocs_t *)arg1;
ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &old);
/*Read port state */
if (!req->newptr) {
snprintf(new, sizeof(new), "%s",
(old.value == OCS_XPORT_PORT_OFFLINE) ?
"offline" : "online");
return (sysctl_handle_string(oidp, new, sizeof(new), req));
}
/*Configure port state*/
rc = sysctl_handle_string(oidp, new, sizeof(new), req);
if (rc)
return (rc);
if (ocs_strcasecmp(new, "offline") == 0) {
if (old.value == OCS_XPORT_PORT_OFFLINE) {
return (0);
}
ocs_log_debug(ocs, "Setting port to %s\n", new);
rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
if (rc != 0) {
ocs_log_err(ocs, "Setting port to offline failed\n");
}
} else if (ocs_strcasecmp(new, "online") == 0) {
if (old.value == OCS_XPORT_PORT_ONLINE) {
return (0);
}
ocs_log_debug(ocs, "Setting port to %s\n", new);
rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
if (rc != 0) {
ocs_log_err(ocs, "Setting port to online failed\n");
}
} else {
ocs_log_err(ocs, "Unsupported link state %s\n", new);
rc = 1;
}
return (rc);
}
static int
ocs_sysctl_vport_wwpn(SYSCTL_HANDLER_ARGS)
{
ocs_fcport *fcp = oidp->oid_arg1;
char str_wwpn[64];
memset(str_wwpn, 0, sizeof(str_wwpn));
snprintf(str_wwpn, sizeof(str_wwpn), "0x%llx", (unsigned long long)fcp->vport->wwpn);
return (sysctl_handle_string(oidp, str_wwpn, sizeof(str_wwpn), req));
}
static int
ocs_sysctl_vport_wwnn(SYSCTL_HANDLER_ARGS)
{
ocs_fcport *fcp = oidp->oid_arg1;
char str_wwnn[64];
memset(str_wwnn, 0, sizeof(str_wwnn));
snprintf(str_wwnn, sizeof(str_wwnn), "0x%llx", (unsigned long long)fcp->vport->wwnn);
return (sysctl_handle_string(oidp, str_wwnn, sizeof(str_wwnn), req));
}
/**
* @brief Initialize sysctl
*
* Initialize sysctl so elxsdkutil can query device information.
*
* @param ocs pointer to ocs
* @return void
*/
static void
ocs_sysctl_init(ocs_t *ocs)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(ocs->dev);
struct sysctl_oid *tree = device_get_sysctl_tree(ocs->dev);
struct sysctl_oid *vtree;
const char *str = NULL;
char name[16];
uint32_t rev, if_type, family, i;
ocs_fcport *fcp = NULL;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"devid", CTLFLAG_RD, NULL,
pci_get_devid(ocs->dev), "Device ID");
memset(ocs->modeldesc, 0, sizeof(ocs->modeldesc));
if (0 == pci_get_vpd_ident(ocs->dev, &str)) {
snprintf(ocs->modeldesc, sizeof(ocs->modeldesc), "%s", str);
}
SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"modeldesc", CTLFLAG_RD,
ocs->modeldesc,
0, "Model Description");
memset(ocs->serialnum, 0, sizeof(ocs->serialnum));
if (0 == pci_get_vpd_readonly(ocs->dev, "SN", &str)) {
snprintf(ocs->serialnum, sizeof(ocs->serialnum), "%s", str);
}
SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"sn", CTLFLAG_RD,
ocs->serialnum,
0, "Serial Number");
ocs_hw_get(&ocs->hw, OCS_HW_SLI_REV, &rev);
ocs_hw_get(&ocs->hw, OCS_HW_IF_TYPE, &if_type);
ocs_hw_get(&ocs->hw, OCS_HW_SLI_FAMILY, &family);
memset(ocs->fwrev, 0, sizeof(ocs->fwrev));
snprintf(ocs->fwrev, sizeof(ocs->fwrev), "%s, sli-%d:%d:%x",
(char *)ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV),
rev, if_type, family);
SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"fwrev", CTLFLAG_RD,
ocs->fwrev,
0, "Firmware Revision");
memset(ocs->sli_intf, 0, sizeof(ocs->sli_intf));
snprintf(ocs->sli_intf, sizeof(ocs->sli_intf), "%08x",
ocs_config_read32(ocs, SLI4_INTF_REG));
SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"sli_intf", CTLFLAG_RD,
ocs->sli_intf,
0, "SLI Interface");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fw_upgrade",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, (void *)ocs, 0,
ocs_sys_fwupgrade, "A", "Firmware grp file");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"wwnn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_wwnn, "A",
"World Wide Node Name, wwnn should be in the format 0x");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"wwpn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_wwpn, "A",
"World Wide Port Name, wwpn should be in the format 0x");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"current_topology", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_current_topology, "IU",
"Current Topology, 1-NPort; 2-Loop; 3-None");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"current_speed", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_current_speed, "IU",
"Current Speed");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"configured_topology", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_config_topology, "IU",
"Configured Topology, 0-Auto; 1-NPort; 2-Loop");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"configured_speed", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_config_speed, "IU",
"Configured Speed, 0-Auto, 2000, 4000, 8000, 16000, 32000");
SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"businfo", CTLFLAG_RD, ocs->businfo, 0, "Bus Info");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"fcid", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_fcid, "A", "Port FC ID");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"port_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
ocs, 0, ocs_sysctl_port_state, "A", "configured port state");
for (i = 0; i < ocs->num_vports; i++) {
fcp = FCPORT(ocs, i+1);
memset(name, 0, sizeof(name));
snprintf(name, sizeof(name), "vport%d", i);
vtree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Virtual port");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO,
"wwnn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
fcp, 0, ocs_sysctl_vport_wwnn, "A",
"World Wide Node Name");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO,
"wwpn", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
fcp, 0, ocs_sysctl_vport_wwpn, "A", "World Wide Port Name");
}
}
/**
* @brief Initialize the debug module
*
* Parse device hints (similar to Linux module parameters) here. To use,
* run the command
* kenv hint.ocs.U.P=V
* from the command line replacing U with the unit # (0,1,...),
* P with the parameter name (debug_mask), and V with the value
*/
void
ocs_debug_attach(void *os)
{
struct ocs_softc *ocs = os;
int error = 0;
char *resname = NULL;
int32_t unit = INT32_MAX;
uint32_t ocs_debug_mask = 0;
resname = "debug_mask";
if (0 == (error = resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
resname, &ocs_debug_mask))) {
device_printf(ocs->dev, "setting %s to %010x\n", resname, ocs_debug_mask);
ocs_debug_enable(ocs_debug_mask);
}
unit = device_get_unit(ocs->dev);
ocs->cdev = make_dev(&ocs_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
"ocs%d", unit);
if (ocs->cdev) {
ocs->cdev->si_drv1 = ocs;
}
/* initialize sysctl interface */
ocs_sysctl_init(ocs);
mtx_init(&ocs->dbg_lock, "ocs_dbg_lock", NULL, MTX_DEF);
}
/**
* @brief Free the debug module
*/
void
ocs_debug_detach(void *os)
{
struct ocs_softc *ocs = os;
mtx_destroy(&ocs->dbg_lock);
if (ocs->cdev) {
ocs->cdev->si_drv1 = NULL;
destroy_dev(ocs->cdev);
}
}
diff --git a/sys/dev/ocs_fc/ocs_scsi.c b/sys/dev/ocs_fc/ocs_scsi.c
index af9fc798b01c..1bbf60b9014b 100644
--- a/sys/dev/ocs_fc/ocs_scsi.c
+++ b/sys/dev/ocs_fc/ocs_scsi.c
@@ -1,2951 +1,2951 @@
/*-
* Copyright (c) 2017 Broadcom. All rights reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* OCS Linux SCSI API base driver implementation.
*/
/**
* @defgroup scsi_api_base SCSI Base Target/Initiator
*/
#include "ocs.h"
#include "ocs_els.h"
#include "ocs_scsi.h"
#include "ocs_vpd.h"
#include "ocs_utils.h"
#include "ocs_device.h"
#define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]"
#define SCSI_ITT_SIZE(ocs) ((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8)
#define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag
#define enable_tsend_auto_resp(ocs) ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TSEND) == 0)
#define enable_treceive_auto_resp(ocs) ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TRECEIVE) == 0)
#define scsi_io_printf(io, fmt, ...) ocs_log_info(io->ocs, "[%s]" SCSI_IOFMT fmt, \
io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__)
#define scsi_io_trace(io, fmt, ...) \
do { \
if (OCS_LOG_ENABLE_SCSI_TRACE(io->ocs)) \
scsi_io_printf(io, fmt, ##__VA_ARGS__); \
} while (0)
#define scsi_log(ocs, fmt, ...) \
do { \
if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) \
ocs_log_info(ocs, fmt, ##__VA_ARGS__); \
} while (0)
static int32_t ocs_target_send_bls_resp(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg);
static int32_t ocs_scsi_abort_io_cb(struct ocs_hw_io_s *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status,
uint32_t ext, void *arg);
static void ocs_scsi_io_free_ovfl(ocs_io_t *io);
static uint32_t ocs_scsi_count_sgls(ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count);
static int ocs_scsi_dif_guard_is_crc(uint8_t direction, ocs_hw_dif_info_t *dif_info);
static ocs_scsi_io_status_e ocs_scsi_dif_check_unknown(ocs_io_t *io, uint32_t length, uint32_t check_length, int is_crc);
static uint32_t ocs_scsi_dif_check_guard(ocs_hw_dif_info_t *dif_info, ocs_scsi_vaddr_len_t addrlen[],
uint32_t addrlen_count, ocs_dif_t *dif, int is_crc);
static uint32_t ocs_scsi_dif_check_app_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint16_t exp_app_tag, ocs_dif_t *dif);
static uint32_t ocs_scsi_dif_check_ref_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint32_t exp_ref_tag, ocs_dif_t *dif);
static int32_t ocs_scsi_convert_dif_info(ocs_t *ocs, ocs_scsi_dif_info_t *scsi_dif_info,
ocs_hw_dif_info_t *hw_dif_info);
static int32_t ocs_scsi_io_dispatch_hw_io(ocs_io_t *io, ocs_hw_io_t *hio);
static int32_t ocs_scsi_io_dispatch_no_hw_io(ocs_io_t *io);
static void _ocs_scsi_io_free(void *arg);
/**
* @ingroup scsi_api_base
* @brief Returns a big-endian 32-bit value given a pointer.
*
* @param p Pointer to the 32-bit big-endian location.
*
* @return Returns the byte-swapped 32-bit value.
*/
static inline uint32_t
ocs_fc_getbe32(void *p)
{
return ocs_be32toh(*((uint32_t*)p));
}
/**
* @ingroup scsi_api_base
* @brief Enable IO allocation.
*
* @par Description
* The SCSI and Transport IO allocation functions are enabled. If the allocation functions
* are not enabled, then calls to ocs_scsi_io_alloc() (and ocs_els_io_alloc() for FC) will
* fail.
*
* @param node Pointer to node object.
*
* @return None.
*/
void
ocs_scsi_io_alloc_enable(ocs_node_t *node)
{
ocs_assert(node != NULL);
ocs_lock(&node->active_ios_lock);
node->io_alloc_enabled = TRUE;
ocs_unlock(&node->active_ios_lock);
}
/**
* @ingroup scsi_api_base
* @brief Disable IO allocation
*
* @par Description
* The SCSI and Transport IO allocation functions are disabled. If the allocation functions
* are not enabled, then calls to ocs_scsi_io_alloc() (and ocs_els_io_alloc() for FC) will
* fail.
*
* @param node Pointer to node object
*
* @return None.
*/
void
ocs_scsi_io_alloc_disable(ocs_node_t *node)
{
ocs_assert(node != NULL);
ocs_lock(&node->active_ios_lock);
node->io_alloc_enabled = FALSE;
ocs_unlock(&node->active_ios_lock);
}
/**
* @ingroup scsi_api_base
* @brief Allocate a SCSI IO context.
*
* @par Description
* A SCSI IO context is allocated and associated with a @c node. This function
* is called by an initiator-client when issuing SCSI commands to remote
* target devices. On completion, ocs_scsi_io_free() is called.
* @n @n
* The returned ocs_io_t structure has an element of type ocs_scsi_ini_io_t named
* "ini_io" that is declared and used by an initiator-client for private information.
*
* @param node Pointer to the associated node structure.
* @param role Role for IO (originator/responder).
*
* @return Returns the pointer to the IO context, or NULL.
*
*/
ocs_io_t *
ocs_scsi_io_alloc(ocs_node_t *node, ocs_scsi_io_role_e role)
{
ocs_t *ocs;
ocs_xport_t *xport;
ocs_io_t *io;
ocs_assert(node, NULL);
ocs_assert(node->ocs, NULL);
ocs = node->ocs;
ocs_assert(ocs->xport, NULL);
xport = ocs->xport;
ocs_lock(&node->active_ios_lock);
if (!node->io_alloc_enabled) {
ocs_unlock(&node->active_ios_lock);
return NULL;
}
io = ocs_io_alloc(ocs);
if (io == NULL) {
ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
ocs_unlock(&node->active_ios_lock);
return NULL;
}
/* initialize refcount */
ocs_ref_init(&io->ref, _ocs_scsi_io_free, io);
if (io->hio != NULL) {
ocs_log_err(node->ocs, "assertion failed: io->hio is not NULL\n");
ocs_io_free(ocs, io);
ocs_unlock(&node->active_ios_lock);
return NULL;
}
/* set generic fields */
io->ocs = ocs;
io->node = node;
/* set type and name */
io->io_type = OCS_IO_TYPE_IO;
io->display_name = "scsi_io";
switch (role) {
case OCS_SCSI_IO_ROLE_ORIGINATOR:
io->cmd_ini = TRUE;
io->cmd_tgt = FALSE;
break;
case OCS_SCSI_IO_ROLE_RESPONDER:
io->cmd_ini = FALSE;
io->cmd_tgt = TRUE;
break;
}
/* Add to node's active_ios list */
ocs_list_add_tail(&node->active_ios, io);
ocs_unlock(&node->active_ios_lock);
return io;
}
/**
* @ingroup scsi_api_base
* @brief Free a SCSI IO context (internal).
*
* @par Description
* The IO context previously allocated using ocs_scsi_io_alloc()
* is freed. This is called from within the transport layer,
* when the reference count goes to zero.
*
* @param arg Pointer to the IO context.
*
* @return None.
*/
static void
_ocs_scsi_io_free(void *arg)
{
ocs_io_t *io = (ocs_io_t *)arg;
ocs_t *ocs = io->ocs;
ocs_node_t *node = io->node;
int send_empty_event;
ocs_assert(io != NULL);
scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
ocs_assert(ocs_io_busy(io));
ocs_lock(&node->active_ios_lock);
ocs_list_remove(&node->active_ios, io);
send_empty_event = (!node->io_alloc_enabled) && ocs_list_empty(&node->active_ios);
ocs_unlock(&node->active_ios_lock);
if (send_empty_event) {
ocs_node_post_event(node, OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
}
io->node = NULL;
ocs_io_free(ocs, io);
}
/**
* @ingroup scsi_api_base
* @brief Free a SCSI IO context.
*
* @par Description
* The IO context previously allocated using ocs_scsi_io_alloc() is freed.
*
* @param io Pointer to the IO context.
*
* @return None.
*/
void
ocs_scsi_io_free(ocs_io_t *io)
{
scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
ocs_assert(ocs_ref_read_count(&io->ref) > 0);
ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_scsi_io_alloc() */
}
static int32_t
ocs_scsi_send_io(ocs_hw_io_type_e type, ocs_node_t *node, ocs_io_t *io, uint64_t lun,
ocs_scsi_tmf_cmd_e tmf, uint8_t *cdb, uint32_t cdb_len,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags);
/**
* @brief Target response completion callback.
*
* @par Description
* Function is called upon the completion of a target IO request.
*
* @param hio Pointer to the HW IO structure.
* @param rnode Remote node associated with the IO that is completing.
* @param length Length of the response payload.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param app Application-specific data (generally a pointer to the IO context).
*
* @return None.
*/
static void
ocs_target_io_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length,
int32_t status, uint32_t ext_status, void *app)
{
ocs_io_t *io = app;
ocs_t *ocs;
ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_GOOD;
uint16_t additional_length;
uint8_t edir;
uint8_t tdpv;
ocs_hw_dif_info_t *dif_info = &io->hw_dif;
int is_crc;
ocs_assert(io);
scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
ocs = io->ocs;
ocs_assert(ocs);
ocs_scsi_io_free_ovfl(io);
io->transferred += length;
/* Call target server completion */
if (io->scsi_tgt_cb) {
ocs_scsi_io_cb_t cb = io->scsi_tgt_cb;
uint32_t flags = 0;
/* Clear the callback before invoking the callback */
io->scsi_tgt_cb = NULL;
/* if status was good, and auto-good-response was set, then callback
* target-server with IO_CMPL_RSP_SENT, otherwise send IO_CMPL
*/
if ((status == 0) && (io->auto_resp))
flags |= OCS_SCSI_IO_CMPL_RSP_SENT;
else
flags |= OCS_SCSI_IO_CMPL;
switch (status) {
case SLI4_FC_WCQE_STATUS_SUCCESS:
scsi_status = OCS_SCSI_STATUS_GOOD;
break;
case SLI4_FC_WCQE_STATUS_DI_ERROR:
if (ext_status & SLI4_FC_DI_ERROR_GE) {
scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
} else if (ext_status & SLI4_FC_DI_ERROR_AE) {
scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR;
} else if (ext_status & SLI4_FC_DI_ERROR_RE) {
scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR;
} else {
additional_length = ((ext_status >> 16) & 0xFFFF);
/* Capture the EDIR and TDPV bits as 0 or 1 for easier printing. */
edir = !!(ext_status & SLI4_FC_DI_ERROR_EDIR);
tdpv = !!(ext_status & SLI4_FC_DI_ERROR_TDPV);
is_crc = ocs_scsi_dif_guard_is_crc(edir, dif_info);
if (edir == 0) {
/* For reads, we have everything in memory. Start checking from beginning. */
scsi_status = ocs_scsi_dif_check_unknown(io, 0, io->wire_len, is_crc);
} else {
/* For writes, use the additional length to determine where to look for the error.
* The additional_length field is set to 0 if it is not supported.
* The additional length field is valid if:
* . additional_length is not zero
* . Total Data Placed is valid
* . Error Direction is RX (1)
* . Operation is a pass thru (CRC or CKSUM on IN, and CRC or CHKSUM on OUT) (all pass-thru cases except raw)
*/
if ((additional_length != 0) && (tdpv != 0) &&
(dif_info->dif == SLI4_DIF_PASS_THROUGH) && (dif_info->dif_oper != OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW) ) {
scsi_status = ocs_scsi_dif_check_unknown(io, length, additional_length, is_crc);
} else {
/* If we can't do additional checking, then fall-back to guard error */
scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
}
}
}
break;
case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
switch (ext_status) {
case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
scsi_status = OCS_SCSI_STATUS_ABORTED;
break;
case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
scsi_status = OCS_SCSI_STATUS_NEXUS_LOST;
break;
case SLI4_FC_LOCAL_REJECT_NO_XRI:
scsi_status = OCS_SCSI_STATUS_NO_IO;
break;
default:
/* TODO: we have seen 0x0d (TX_DMA_FAILED error) */
scsi_status = OCS_SCSI_STATUS_ERROR;
break;
}
break;
case SLI4_FC_WCQE_STATUS_WQE_TIMEOUT:
/* target IO timed out */
scsi_status = OCS_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
break;
case SLI4_FC_WCQE_STATUS_SHUTDOWN:
/* Target IO cancelled by HW */
scsi_status = OCS_SCSI_STATUS_SHUTDOWN;
break;
default:
scsi_status = OCS_SCSI_STATUS_ERROR;
break;
}
cb(io, scsi_status, flags, io->scsi_tgt_cb_arg);
}
ocs_scsi_check_pending(ocs);
}
/**
* @brief Determine if an IO is using CRC for DIF guard format.
*
* @param direction IO direction: 1 for write, 0 for read.
* @param dif_info Pointer to HW DIF info data.
*
* @return Returns TRUE if using CRC, FALSE if not.
*/
static int
ocs_scsi_dif_guard_is_crc(uint8_t direction, ocs_hw_dif_info_t *dif_info)
{
int is_crc;
if (direction) {
/* For writes, check if operation is "OUT_CRC" or not */
switch(dif_info->dif_oper) {
case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
is_crc = TRUE;
break;
default:
is_crc = FALSE;
break;
}
} else {
/* For reads, check if operation is "IN_CRC" or not */
switch(dif_info->dif_oper) {
case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
is_crc = TRUE;
break;
default:
is_crc = FALSE;
break;
}
}
return is_crc;
}
/**
* @brief Check a block and DIF data, computing the appropriate SCSI status
*
* @par Description
* This function is used to check blocks and DIF when given an unknown DIF
* status using the following logic:
*
* Given the address of the last good block, and a length of bytes that includes
* the block with the DIF error, find the bad block. If a block is found with an
* app_tag or ref_tag error, then return the appropriate error. No block is expected
* to have a block guard error since hardware "fixes" the crc. So if no block in the
* range of blocks has an error, then it is presumed to be a BLOCK GUARD error.
*
* @param io Pointer to the IO object.
* @param length Length of bytes covering the good blocks.
* @param check_length Length of bytes that covers the bad block.
* @param is_crc True if guard is using CRC format.
*
* @return Returns SCSI status.
*/
static ocs_scsi_io_status_e
ocs_scsi_dif_check_unknown(ocs_io_t *io, uint32_t length, uint32_t check_length, int is_crc)
{
uint32_t i;
ocs_t *ocs = io->ocs;
ocs_hw_dif_info_t *dif_info = &io->hw_dif;
ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
uint32_t blocksize; /* data block size */
uint64_t first_check_block; /* first block following total data placed */
uint64_t last_check_block; /* last block to check */
uint32_t check_count; /* count of blocks to check */
ocs_scsi_vaddr_len_t addrlen[4]; /* address-length pairs returned from target */
int32_t addrlen_count; /* count of address-length pairs */
ocs_dif_t *dif; /* pointer to DIF block returned from target */
ocs_scsi_dif_info_t scsi_dif_info = io->scsi_dif_info;
blocksize = ocs_hw_dif_mem_blocksize(&io->hw_dif, TRUE);
first_check_block = length / blocksize;
last_check_block = ((length + check_length) / blocksize);
check_count = last_check_block - first_check_block;
ocs_log_debug(ocs, "blocksize %d first check_block %" PRId64 " last_check_block %" PRId64 " check_count %d\n",
blocksize, first_check_block, last_check_block, check_count);
for (i = first_check_block; i < last_check_block; i++) {
addrlen_count = ocs_scsi_get_block_vaddr(io, (scsi_dif_info.lba + i), addrlen, ARRAY_SIZE(addrlen), (void**) &dif);
if (addrlen_count < 0) {
ocs_log_test(ocs, "ocs_scsi_get_block_vaddr() failed: %d\n", addrlen_count);
scsi_status = OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR;
break;
}
if (! ocs_scsi_dif_check_guard(dif_info, addrlen, addrlen_count, dif, is_crc)) {
ocs_log_debug(ocs, "block guard check error, lba %" PRId64 "\n", scsi_dif_info.lba + i);
scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
break;
}
if (! ocs_scsi_dif_check_app_tag(ocs, dif_info, scsi_dif_info.app_tag, dif)) {
ocs_log_debug(ocs, "app tag check error, lba %" PRId64 "\n", scsi_dif_info.lba + i);
scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR;
break;
}
if (! ocs_scsi_dif_check_ref_tag(ocs, dif_info, (scsi_dif_info.ref_tag + i), dif)) {
ocs_log_debug(ocs, "ref tag check error, lba %" PRId64 "\n", scsi_dif_info.lba + i);
scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR;
break;
}
}
return scsi_status;
}
/**
* @brief Check the block guard of block data
*
* @par Description
* Using the dif_info for the transfer, check the block guard value.
*
* @param dif_info Pointer to HW DIF info data.
* @param addrlen Array of address length pairs.
* @param addrlen_count Number of entries in the addrlen[] array.
* @param dif Pointer to the DIF data block being checked.
* @param is_crc True if guard is using CRC format.
*
* @return Returns TRUE if block guard check is ok.
*/
static uint32_t
ocs_scsi_dif_check_guard(ocs_hw_dif_info_t *dif_info, ocs_scsi_vaddr_len_t addrlen[], uint32_t addrlen_count,
ocs_dif_t *dif, int is_crc)
{
uint16_t crc = dif_info->dif_seed;
uint32_t i;
uint16_t checksum;
if ((dif == NULL) || !dif_info->check_guard) {
return TRUE;
}
if (is_crc) {
for (i = 0; i < addrlen_count; i++) {
crc = ocs_scsi_dif_calc_crc(addrlen[i].vaddr, addrlen[i].length, crc);
}
return (crc == ocs_be16toh(dif->crc));
} else {
checksum = ocs_scsi_dif_calc_checksum(addrlen, addrlen_count);
return (checksum == dif->crc);
}
}
/**
* @brief Check the app tag of dif data
*
* @par Description
* Using the dif_info for the transfer, check the app tag.
*
* @param ocs Pointer to the ocs structure for logging.
* @param dif_info Pointer to HW DIF info data.
* @param exp_app_tag The value the app tag is expected to be.
* @param dif Pointer to the DIF data block being checked.
*
* @return Returns TRUE if app tag check is ok.
*/
static uint32_t
ocs_scsi_dif_check_app_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint16_t exp_app_tag, ocs_dif_t *dif)
{
if ((dif == NULL) || !dif_info->check_app_tag) {
return TRUE;
}
ocs_log_debug(ocs, "expected app tag 0x%x, actual 0x%x\n",
exp_app_tag, ocs_be16toh(dif->app_tag));
return (exp_app_tag == ocs_be16toh(dif->app_tag));
}
/**
* @brief Check the ref tag of dif data
*
* @par Description
* Using the dif_info for the transfer, check the app tag.
*
* @param ocs Pointer to the ocs structure for logging.
* @param dif_info Pointer to HW DIF info data.
* @param exp_ref_tag The value the ref tag is expected to be.
* @param dif Pointer to the DIF data block being checked.
*
* @return Returns TRUE if ref tag check is ok.
*/
static uint32_t
ocs_scsi_dif_check_ref_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint32_t exp_ref_tag, ocs_dif_t *dif)
{
if ((dif == NULL) || !dif_info->check_ref_tag) {
return TRUE;
}
if (exp_ref_tag != ocs_be32toh(dif->ref_tag)) {
ocs_log_debug(ocs, "expected ref tag 0x%x, actual 0x%x\n",
exp_ref_tag, ocs_be32toh(dif->ref_tag));
return FALSE;
} else {
return TRUE;
}
}
/**
* @brief Return count of SGE's required for request
*
* @par Description
* An accurate count of SGEs is computed and returned.
*
* @param hw_dif Pointer to HW dif information.
* @param sgl Pointer to SGL from back end.
* @param sgl_count Count of SGEs in SGL.
*
* @return Count of SGEs.
*/
static uint32_t
ocs_scsi_count_sgls(ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count)
{
uint32_t count = 0;
uint32_t i;
/* Convert DIF Information */
if (hw_dif->dif_oper != OCS_HW_DIF_OPER_DISABLED) {
/* If we're not DIF separate, then emit a seed SGE */
if (!hw_dif->dif_separate) {
count++;
}
for (i = 0; i < sgl_count; i++) {
/* If DIF is enabled, and DIF is separate, then append a SEED then DIF SGE */
if (hw_dif->dif_separate) {
count += 2;
}
count++;
}
} else {
count = sgl_count;
}
return count;
}
static int32_t
ocs_scsi_build_sgls(ocs_hw_t *hw, ocs_hw_io_t *hio, ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, ocs_hw_io_type_e type)
{
int32_t rc;
uint32_t i;
ocs_t *ocs = hw->os;
uint32_t blocksize = 0;
uint32_t blockcount;
ocs_assert(hio, -1);
/* Initialize HW SGL */
rc = ocs_hw_io_init_sges(hw, hio, type);
if (rc) {
ocs_log_err(ocs, "ocs_hw_io_init_sges failed: %d\n", rc);
return -1;
}
/* Convert DIF Information */
if (hw_dif->dif_oper != OCS_HW_DIF_OPER_DISABLED) {
/* If we're not DIF separate, then emit a seed SGE */
if (!hw_dif->dif_separate) {
rc = ocs_hw_io_add_seed_sge(hw, hio, hw_dif);
if (rc) {
return rc;
}
}
/* if we are doing DIF separate, then figure out the block size so that we
* can update the ref tag in the DIF seed SGE. Also verify that the
* the sgl lengths are all multiples of the blocksize
*/
if (hw_dif->dif_separate) {
switch(hw_dif->blk_size) {
case OCS_HW_DIF_BK_SIZE_512: blocksize = 512; break;
case OCS_HW_DIF_BK_SIZE_1024: blocksize = 1024; break;
case OCS_HW_DIF_BK_SIZE_2048: blocksize = 2048; break;
case OCS_HW_DIF_BK_SIZE_4096: blocksize = 4096; break;
case OCS_HW_DIF_BK_SIZE_520: blocksize = 520; break;
case OCS_HW_DIF_BK_SIZE_4104: blocksize = 4104; break;
default:
- ocs_log_test(hw->os, "Inavlid hw_dif blocksize %d\n", hw_dif->blk_size);
+ ocs_log_test(hw->os, "Invalid hw_dif blocksize %d\n", hw_dif->blk_size);
return -1;
}
for (i = 0; i < sgl_count; i++) {
if ((sgl[i].len % blocksize) != 0) {
ocs_log_test(hw->os, "sgl[%d] len of %ld is not multiple of blocksize\n",
i, sgl[i].len);
return -1;
}
}
}
for (i = 0; i < sgl_count; i++) {
ocs_assert(sgl[i].addr, -1);
ocs_assert(sgl[i].len, -1);
/* If DIF is enabled, and DIF is separate, then append a SEED then DIF SGE */
if (hw_dif->dif_separate) {
rc = ocs_hw_io_add_seed_sge(hw, hio, hw_dif);
if (rc) {
return rc;
}
rc = ocs_hw_io_add_dif_sge(hw, hio, sgl[i].dif_addr);
if (rc) {
return rc;
}
/* Update the ref_tag for the next DIF seed SGE */
blockcount = sgl[i].len / blocksize;
if (hw_dif->dif_oper == OCS_HW_DIF_OPER_INSERT) {
hw_dif->ref_tag_repl += blockcount;
} else {
hw_dif->ref_tag_cmp += blockcount;
}
}
/* Add data SGE */
rc = ocs_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
if (rc) {
ocs_log_err(ocs, "ocs_hw_io_add_sge failed: count=%d rc=%d\n",
sgl_count, rc);
return rc;
}
}
} else {
for (i = 0; i < sgl_count; i++) {
ocs_assert(sgl[i].addr, -1);
ocs_assert(sgl[i].len, -1);
/* Add data SGE */
rc = ocs_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
if (rc) {
ocs_log_err(ocs, "ocs_hw_io_add_sge failed: count=%d rc=%d\n",
sgl_count, rc);
return rc;
}
}
}
return 0;
}
/**
* @ingroup scsi_api_base
* @brief Convert SCSI API T10 DIF information into the FC HW format.
*
* @param ocs Pointer to the ocs structure for logging.
* @param scsi_dif_info Pointer to the SCSI API T10 DIF fields.
* @param hw_dif_info Pointer to the FC HW API T10 DIF fields.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
static int32_t
ocs_scsi_convert_dif_info(ocs_t *ocs, ocs_scsi_dif_info_t *scsi_dif_info, ocs_hw_dif_info_t *hw_dif_info)
{
uint32_t dif_seed;
ocs_memset(hw_dif_info, 0, sizeof(ocs_hw_dif_info_t));
if (scsi_dif_info == NULL) {
hw_dif_info->dif_oper = OCS_HW_DIF_OPER_DISABLED;
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_NA;
return 0;
}
/* Convert the DIF operation */
switch(scsi_dif_info->dif_oper) {
case OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC;
hw_dif_info->dif = SLI4_DIF_INSERT;
break;
case OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF;
hw_dif_info->dif = SLI4_DIF_STRIP;
break;
case OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
hw_dif_info->dif = SLI4_DIF_INSERT;
break;
case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
hw_dif_info->dif = SLI4_DIF_STRIP;
break;
case OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC;
hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
break;
case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
break;
case OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
break;
case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
break;
case OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW:
hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW;
hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
break;
default:
ocs_log_test(ocs, "unhandled SCSI DIF operation %d\n",
scsi_dif_info->dif_oper);
return -1;
}
switch(scsi_dif_info->blk_size) {
case OCS_SCSI_DIF_BK_SIZE_512:
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_512;
break;
case OCS_SCSI_DIF_BK_SIZE_1024:
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_1024;
break;
case OCS_SCSI_DIF_BK_SIZE_2048:
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_2048;
break;
case OCS_SCSI_DIF_BK_SIZE_4096:
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_4096;
break;
case OCS_SCSI_DIF_BK_SIZE_520:
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_520;
break;
case OCS_SCSI_DIF_BK_SIZE_4104:
hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_4104;
break;
default:
ocs_log_test(ocs, "unhandled SCSI DIF block size %d\n",
scsi_dif_info->blk_size);
return -1;
}
/* If the operation is an INSERT the tags provided are the ones that should be
* inserted, otherwise they're the ones to be checked against. */
if (hw_dif_info->dif == SLI4_DIF_INSERT ) {
hw_dif_info->ref_tag_repl = scsi_dif_info->ref_tag;
hw_dif_info->app_tag_repl = scsi_dif_info->app_tag;
} else {
hw_dif_info->ref_tag_cmp = scsi_dif_info->ref_tag;
hw_dif_info->app_tag_cmp = scsi_dif_info->app_tag;
}
hw_dif_info->check_ref_tag = scsi_dif_info->check_ref_tag;
hw_dif_info->check_app_tag = scsi_dif_info->check_app_tag;
hw_dif_info->check_guard = scsi_dif_info->check_guard;
hw_dif_info->auto_incr_ref_tag = 1;
hw_dif_info->dif_separate = scsi_dif_info->dif_separate;
hw_dif_info->disable_app_ffff = scsi_dif_info->disable_app_ffff;
hw_dif_info->disable_app_ref_ffff = scsi_dif_info->disable_app_ref_ffff;
ocs_hw_get(&ocs->hw, OCS_HW_DIF_SEED, &dif_seed);
hw_dif_info->dif_seed = dif_seed;
return 0;
}
/**
* @ingroup scsi_api_base
* @brief This function logs the SGLs for an IO.
*
* @param io Pointer to the IO context.
*/
static void ocs_log_sgl(ocs_io_t *io)
{
ocs_hw_io_t *hio = io->hio;
sli4_sge_t *data = NULL;
uint32_t *dword = NULL;
uint32_t i;
uint32_t n_sge;
scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
ocs_addr32_hi(hio->def_sgl.phys),
ocs_addr32_lo(hio->def_sgl.phys));
n_sge = (hio->sgl == &hio->def_sgl ? hio->n_sge : hio->def_sgl_count);
for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
dword = (uint32_t*)data;
scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, dword[0], dword[1], dword[2], dword[3]);
if (dword[2] & (1U << 31)) {
break;
}
}
if (hio->ovfl_sgl != NULL &&
hio->sgl == hio->ovfl_sgl) {
scsi_io_trace(io, "Overflow at 0x%x 0x%08x\n",
ocs_addr32_hi(hio->ovfl_sgl->phys),
ocs_addr32_lo(hio->ovfl_sgl->phys));
for (i = 0, data = hio->ovfl_sgl->virt; i < hio->n_sge; i++, data++) {
dword = (uint32_t*)data;
scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, dword[0], dword[1], dword[2], dword[3]);
if (dword[2] & (1U << 31)) {
break;
}
}
}
}
/**
* @brief Check pending error asynchronous callback function.
*
* @par Description
* Invoke the HW callback function for a given IO. This function is called
* from the NOP mailbox completion context.
*
* @param hw Pointer to HW object.
* @param status Completion status.
* @param mqe Mailbox completion queue entry.
* @param arg General purpose argument.
*
* @return Returns 0.
*/
static int32_t
ocs_scsi_check_pending_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
{
ocs_io_t *io = arg;
if (io != NULL) {
if (io->hw_cb != NULL) {
ocs_hw_done_t cb = io->hw_cb;
io->hw_cb = NULL;
cb(io->hio, NULL, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
}
}
return 0;
}
/**
* @brief Check for pending IOs to dispatch.
*
* @par Description
* If there are IOs on the pending list, and a HW IO is available, then
* dispatch the IOs.
*
* @param ocs Pointer to the OCS structure.
*
* @return None.
*/
void
ocs_scsi_check_pending(ocs_t *ocs)
{
ocs_xport_t *xport = ocs->xport;
ocs_io_t *io;
ocs_hw_io_t *hio;
int32_t status;
int count = 0;
int dispatch;
/* Guard against recursion */
if (ocs_atomic_add_return(&xport->io_pending_recursing, 1)) {
/* This function is already running. Decrement and return. */
ocs_atomic_sub_return(&xport->io_pending_recursing, 1);
return;
}
do {
ocs_lock(&xport->io_pending_lock);
status = 0;
hio = NULL;
io = ocs_list_remove_head(&xport->io_pending_list);
if (io != NULL) {
if (io->io_type == OCS_IO_TYPE_ABORT) {
hio = NULL;
} else {
hio = ocs_hw_io_alloc(&ocs->hw);
if (hio == NULL) {
/*
* No HW IO available.
* Put IO back on the front of pending list
*/
ocs_list_add_head(&xport->io_pending_list, io);
io = NULL;
} else {
hio->eq = io->hw_priv;
}
}
}
/* Must drop the lock before dispatching the IO */
ocs_unlock(&xport->io_pending_lock);
if (io != NULL) {
count++;
/*
* We pulled an IO off the pending list,
* and either got an HW IO or don't need one
*/
ocs_atomic_sub_return(&xport->io_pending_count, 1);
if (hio == NULL) {
status = ocs_scsi_io_dispatch_no_hw_io(io);
} else {
status = ocs_scsi_io_dispatch_hw_io(io, hio);
}
if (status) {
/*
* Invoke the HW callback, but do so in the separate execution context,
* provided by the NOP mailbox completion processing context by using
* ocs_hw_async_call()
*/
if (ocs_hw_async_call(&ocs->hw, ocs_scsi_check_pending_async_cb, io)) {
ocs_log_test(ocs, "call to ocs_hw_async_call() failed\n");
}
}
}
} while (io != NULL);
/*
* If nothing was removed from the list,
* we might be in a case where we need to abort an
* active IO and the abort is on the pending list.
* Look for an abort we can dispatch.
*/
if (count == 0 ) {
dispatch = 0;
ocs_lock(&xport->io_pending_lock);
ocs_list_foreach(&xport->io_pending_list, io) {
if (io->io_type == OCS_IO_TYPE_ABORT) {
if (io->io_to_abort->hio != NULL) {
/* This IO has a HW IO, so it is active. Dispatch the abort. */
dispatch = 1;
} else {
/* Leave this abort on the pending list and keep looking */
dispatch = 0;
}
}
if (dispatch) {
ocs_list_remove(&xport->io_pending_list, io);
ocs_atomic_sub_return(&xport->io_pending_count, 1);
break;
}
}
ocs_unlock(&xport->io_pending_lock);
if (dispatch) {
status = ocs_scsi_io_dispatch_no_hw_io(io);
if (status) {
if (ocs_hw_async_call(&ocs->hw, ocs_scsi_check_pending_async_cb, io)) {
ocs_log_test(ocs, "call to ocs_hw_async_call() failed\n");
}
}
}
}
ocs_atomic_sub_return(&xport->io_pending_recursing, 1);
return;
}
/**
* @brief Attempt to dispatch a non-abort IO
*
* @par Description
* An IO is dispatched:
* - if the pending list is not empty, add IO to pending list
* and call a function to process the pending list.
* - if pending list is empty, try to allocate a HW IO. If none
* is available, place this IO at the tail of the pending IO
* list.
* - if HW IO is available, attach this IO to the HW IO and
* submit it.
*
* @param io Pointer to IO structure.
* @param cb Callback function.
*
* @return Returns 0 on success, a negative error code value on failure.
*/
int32_t
ocs_scsi_io_dispatch(ocs_io_t *io, void *cb)
{
ocs_hw_io_t *hio;
ocs_t *ocs = io->ocs;
ocs_xport_t *xport = ocs->xport;
ocs_assert(io->cmd_tgt || io->cmd_ini, -1);
ocs_assert((io->io_type != OCS_IO_TYPE_ABORT), -1);
io->hw_cb = cb;
/*
* if this IO already has a HW IO, then this is either not the first phase of
* the IO. Send it to the HW.
*/
if (io->hio != NULL) {
return ocs_scsi_io_dispatch_hw_io(io, io->hio);
}
/*
* We don't already have a HW IO associated with the IO. First check
* the pending list. If not empty, add IO to the tail and process the
* pending list.
*/
ocs_lock(&xport->io_pending_lock);
if (!ocs_list_empty(&xport->io_pending_list)) {
/*
* If this is a low latency request, the put at the front of the IO pending
* queue, otherwise put it at the end of the queue.
*/
if (io->low_latency) {
ocs_list_add_head(&xport->io_pending_list, io);
} else {
ocs_list_add_tail(&xport->io_pending_list, io);
}
ocs_unlock(&xport->io_pending_lock);
ocs_atomic_add_return(&xport->io_pending_count, 1);
ocs_atomic_add_return(&xport->io_total_pending, 1);
/* process pending list */
ocs_scsi_check_pending(ocs);
return 0;
}
ocs_unlock(&xport->io_pending_lock);
/*
* We don't have a HW IO associated with the IO and there's nothing
* on the pending list. Attempt to allocate a HW IO and dispatch it.
*/
hio = ocs_hw_io_alloc(&io->ocs->hw);
if (hio == NULL) {
/* Couldn't get a HW IO. Save this IO on the pending list */
ocs_lock(&xport->io_pending_lock);
ocs_list_add_tail(&xport->io_pending_list, io);
ocs_unlock(&xport->io_pending_lock);
ocs_atomic_add_return(&xport->io_total_pending, 1);
ocs_atomic_add_return(&xport->io_pending_count, 1);
return 0;
}
/* We successfully allocated a HW IO; dispatch to HW */
return ocs_scsi_io_dispatch_hw_io(io, hio);
}
/**
* @brief Attempt to dispatch an Abort IO.
*
* @par Description
* An Abort IO is dispatched:
* - if the pending list is not empty, add IO to pending list
* and call a function to process the pending list.
* - if pending list is empty, send abort to the HW.
*
* @param io Pointer to IO structure.
* @param cb Callback function.
*
* @return Returns 0 on success, a negative error code value on failure.
*/
int32_t
ocs_scsi_io_dispatch_abort(ocs_io_t *io, void *cb)
{
ocs_t *ocs = io->ocs;
ocs_xport_t *xport = ocs->xport;
ocs_assert((io->io_type == OCS_IO_TYPE_ABORT), -1);
io->hw_cb = cb;
/*
* For aborts, we don't need a HW IO, but we still want to pass through
* the pending list to preserve ordering. Thus, if the pending list is
* not empty, add this abort to the pending list and process the pending list.
*/
ocs_lock(&xport->io_pending_lock);
if (!ocs_list_empty(&xport->io_pending_list)) {
ocs_list_add_tail(&xport->io_pending_list, io);
ocs_unlock(&xport->io_pending_lock);
ocs_atomic_add_return(&xport->io_pending_count, 1);
ocs_atomic_add_return(&xport->io_total_pending, 1);
/* process pending list */
ocs_scsi_check_pending(ocs);
return 0;
}
ocs_unlock(&xport->io_pending_lock);
/* nothing on pending list, dispatch abort */
return ocs_scsi_io_dispatch_no_hw_io(io);
}
/**
* @brief Dispatch IO
*
* @par Description
* An IO and its associated HW IO is dispatched to the HW.
*
* @param io Pointer to IO structure.
* @param hio Pointer to HW IO structure from which IO will be
* dispatched.
*
* @return Returns 0 on success, a negative error code value on failure.
*/
static int32_t
ocs_scsi_io_dispatch_hw_io(ocs_io_t *io, ocs_hw_io_t *hio)
{
int32_t rc;
ocs_t *ocs = io->ocs;
/* Got a HW IO; update ini/tgt_task_tag with HW IO info and dispatch */
io->hio = hio;
if (io->cmd_tgt) {
io->tgt_task_tag = hio->indicator;
} else if (io->cmd_ini) {
io->init_task_tag = hio->indicator;
}
io->hw_tag = hio->reqtag;
hio->eq = io->hw_priv;
/* Copy WQ steering */
switch(io->wq_steering) {
case OCS_SCSI_WQ_STEERING_CLASS >> OCS_SCSI_WQ_STEERING_SHIFT:
hio->wq_steering = OCS_HW_WQ_STEERING_CLASS;
break;
case OCS_SCSI_WQ_STEERING_REQUEST >> OCS_SCSI_WQ_STEERING_SHIFT:
hio->wq_steering = OCS_HW_WQ_STEERING_REQUEST;
break;
case OCS_SCSI_WQ_STEERING_CPU >> OCS_SCSI_WQ_STEERING_SHIFT:
hio->wq_steering = OCS_HW_WQ_STEERING_CPU;
break;
}
switch (io->io_type) {
case OCS_IO_TYPE_IO: {
uint32_t max_sgl;
uint32_t total_count;
uint32_t host_allocated;
ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl);
ocs_hw_get(&ocs->hw, OCS_HW_SGL_CHAINING_HOST_ALLOCATED, &host_allocated);
/*
* If the requested SGL is larger than the default size, then we can allocate
* an overflow SGL.
*/
total_count = ocs_scsi_count_sgls(&io->hw_dif, io->sgl, io->sgl_count);
/*
* Lancer requires us to allocate the chained memory area, but
* Skyhawk must use the SGL list associated with another XRI.
*/
if (host_allocated && total_count > max_sgl) {
/* Compute count needed, the number extra plus 1 for the link sge */
uint32_t count = total_count - max_sgl + 1;
rc = ocs_dma_alloc(ocs, &io->ovfl_sgl, count*sizeof(sli4_sge_t), 64);
if (rc) {
ocs_log_err(ocs, "ocs_dma_alloc overflow sgl failed\n");
break;
}
rc = ocs_hw_io_register_sgl(&ocs->hw, io->hio, &io->ovfl_sgl, count);
if (rc) {
ocs_scsi_io_free_ovfl(io);
ocs_log_err(ocs, "ocs_hw_io_register_sgl() failed\n");
break;
}
/* EVT: update chained_io_count */
io->node->chained_io_count++;
}
rc = ocs_scsi_build_sgls(&ocs->hw, io->hio, &io->hw_dif, io->sgl, io->sgl_count, io->hio_type);
if (rc) {
ocs_scsi_io_free_ovfl(io);
break;
}
if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) {
ocs_log_sgl(io);
}
if (io->app_id) {
io->iparam.fcp_tgt.app_id = io->app_id;
}
rc = ocs_hw_io_send(&io->ocs->hw, io->hio_type, io->hio, io->wire_len, &io->iparam, &io->node->rnode,
io->hw_cb, io);
break;
}
case OCS_IO_TYPE_ELS:
case OCS_IO_TYPE_CT: {
rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio,
&io->els_req, io->wire_len,
&io->els_rsp, &io->node->rnode, &io->iparam,
io->hw_cb, io);
break;
}
case OCS_IO_TYPE_CT_RESP: {
rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio,
&io->els_rsp, io->wire_len,
NULL, &io->node->rnode, &io->iparam,
io->hw_cb, io);
break;
}
case OCS_IO_TYPE_BLS_RESP: {
/* no need to update tgt_task_tag for BLS response since the RX_ID
* will be specified by the payload, not the XRI */
rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio,
NULL, 0, NULL, &io->node->rnode, &io->iparam, io->hw_cb, io);
break;
}
default:
scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
rc = -1;
break;
}
return rc;
}
/**
* @brief Dispatch IO
*
* @par Description
* An IO that does require a HW IO is dispatched to the HW.
*
* @param io Pointer to IO structure.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
static int32_t
ocs_scsi_io_dispatch_no_hw_io(ocs_io_t *io)
{
int32_t rc;
switch (io->io_type) {
case OCS_IO_TYPE_ABORT: {
ocs_hw_io_t *hio_to_abort = NULL;
ocs_assert(io->io_to_abort, -1);
hio_to_abort = io->io_to_abort->hio;
if (hio_to_abort == NULL) {
/*
* If "IO to abort" does not have an associated HW IO, immediately
* make callback with success. The command must have been sent to
* the backend, but the data phase has not yet started, so we don't
* have a HW IO.
*
* Note: since the backend shims should be taking a reference
* on io_to_abort, it should not be possible to have been completed
* and freed by the backend before the abort got here.
*/
scsi_io_printf(io, "IO: " SCSI_IOFMT " not active\n",
SCSI_IOFMT_ARGS(io->io_to_abort));
((ocs_hw_done_t)io->hw_cb)(io->hio, NULL, 0, SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
rc = 0;
} else {
/* HW IO is valid, abort it */
scsi_io_printf(io, "aborting " SCSI_IOFMT "\n", SCSI_IOFMT_ARGS(io->io_to_abort));
rc = ocs_hw_io_abort(&io->ocs->hw, hio_to_abort, io->send_abts,
io->hw_cb, io);
if (rc) {
int status = SLI4_FC_WCQE_STATUS_SUCCESS;
if ((rc != OCS_HW_RTN_IO_NOT_ACTIVE) &&
(rc != OCS_HW_RTN_IO_ABORT_IN_PROGRESS)) {
status = -1;
scsi_io_printf(io, "Failed to abort IO: " SCSI_IOFMT " status=%d\n",
SCSI_IOFMT_ARGS(io->io_to_abort), rc);
}
((ocs_hw_done_t)io->hw_cb)(io->hio, NULL, 0, status, 0, io);
rc = 0;
}
}
break;
}
default:
scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
rc = -1;
break;
}
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Send read/write data.
*
* @par Description
* This call is made by a target-server to initiate a SCSI read or write data phase, transferring
* data between the target to the remote initiator. The payload is specified by the
* scatter-gather list @c sgl of length @c sgl_count. The @c wire_len argument
* specifies the payload length (independent of the scatter-gather list cumulative length).
* @n @n
* The @c flags argument has one bit, OCS_SCSI_LAST_DATAPHASE, which is a hint to the base
* driver that it may use auto SCSI response features if the hardware supports it.
* @n @n
* Upon completion, the callback function @b cb is called with flags indicating that the
* IO has completed (OCS_SCSI_IO_COMPL) and another data phase or response may be sent;
* that the IO has completed and no response needs to be sent (OCS_SCSI_IO_COMPL_NO_RSP);
* or that the IO was aborted (OCS_SCSI_IO_ABORTED).
*
* @param io Pointer to the IO context.
* @param flags Flags controlling the sending of data.
* @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
* @param sgl Pointer to the payload scatter-gather list.
* @param sgl_count Count of the scatter-gather list elements.
* @param xwire_len Length of the payload on wire, in bytes.
* @param type HW IO type.
* @param enable_ar Enable auto-response if true.
* @param cb Completion callback.
* @param arg Application-supplied callback data.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
static inline int32_t
ocs_scsi_xfer_data(ocs_io_t *io, uint32_t flags,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t xwire_len,
ocs_hw_io_type_e type, int enable_ar,
ocs_scsi_io_cb_t cb, void *arg)
{
int32_t rc;
ocs_t *ocs;
uint32_t disable_ar_tgt_dif = FALSE;
size_t residual = 0;
if ((dif_info != NULL) && (dif_info->dif_oper == OCS_SCSI_DIF_OPER_DISABLED)) {
dif_info = NULL;
}
ocs_assert(io, -1);
if (dif_info != NULL) {
ocs_hw_get(&io->ocs->hw, OCS_HW_DISABLE_AR_TGT_DIF, &disable_ar_tgt_dif);
if (disable_ar_tgt_dif) {
enable_ar = FALSE;
}
}
io->sgl_count = sgl_count;
/* If needed, copy SGL */
if (sgl && (sgl != io->sgl)) {
ocs_assert(sgl_count <= io->sgl_allocated, -1);
ocs_memcpy(io->sgl, sgl, sgl_count*sizeof(*io->sgl));
}
ocs = io->ocs;
ocs_assert(ocs, -1);
ocs_assert(io->node, -1);
scsi_io_trace(io, "%s wire_len %d\n", (type == OCS_HW_IO_TARGET_READ) ? "send" : "recv", xwire_len);
ocs_assert(sgl, -1);
ocs_assert(sgl_count > 0, -1);
ocs_assert(io->exp_xfer_len > io->transferred, -1);
io->hio_type = type;
io->scsi_tgt_cb = cb;
io->scsi_tgt_cb_arg = arg;
rc = ocs_scsi_convert_dif_info(ocs, dif_info, &io->hw_dif);
if (rc) {
return rc;
}
/* If DIF is used, then save lba for error recovery */
if (dif_info) {
io->scsi_dif_info = *dif_info;
}
io->wire_len = MIN(xwire_len, io->exp_xfer_len - io->transferred);
residual = (xwire_len - io->wire_len);
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.fcp_tgt.ox_id = io->init_task_tag;
io->iparam.fcp_tgt.offset = io->transferred;
io->iparam.fcp_tgt.dif_oper = io->hw_dif.dif;
io->iparam.fcp_tgt.blk_size = io->hw_dif.blk_size;
io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
io->iparam.fcp_tgt.timeout = io->timeout;
/* if this is the last data phase and there is no residual, enable
* auto-good-response
*/
if (enable_ar && (flags & OCS_SCSI_LAST_DATAPHASE) &&
(residual == 0) && ((io->transferred + io->wire_len) == io->exp_xfer_len) && (!(flags & OCS_SCSI_NO_AUTO_RESPONSE))) {
io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
io->auto_resp = TRUE;
} else {
io->auto_resp = FALSE;
}
/* save this transfer length */
io->xfer_req = io->wire_len;
/* Adjust the transferred count to account for overrun
* when the residual is calculated in ocs_scsi_send_resp
*/
io->transferred += residual;
/* Adjust the SGL size if there is overrun */
if (residual) {
ocs_scsi_sgl_t *sgl_ptr = &io->sgl[sgl_count-1];
while (residual) {
size_t len = sgl_ptr->len;
if ( len > residual) {
sgl_ptr->len = len - residual;
residual = 0;
} else {
sgl_ptr->len = 0;
residual -= len;
io->sgl_count--;
}
sgl_ptr--;
}
}
/* Set latency and WQ steering */
io->low_latency = (flags & OCS_SCSI_LOW_LATENCY) != 0;
io->wq_steering = (flags & OCS_SCSI_WQ_STEERING_MASK) >> OCS_SCSI_WQ_STEERING_SHIFT;
io->wq_class = (flags & OCS_SCSI_WQ_CLASS_MASK) >> OCS_SCSI_WQ_CLASS_SHIFT;
return ocs_scsi_io_dispatch(io, ocs_target_io_cb);
}
int32_t
ocs_scsi_send_rd_data(ocs_io_t *io, uint32_t flags,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len,
ocs_scsi_io_cb_t cb, void *arg)
{
return ocs_scsi_xfer_data(io, flags, dif_info, sgl, sgl_count, len, OCS_HW_IO_TARGET_READ,
enable_tsend_auto_resp(io->ocs), cb, arg);
}
int32_t
ocs_scsi_recv_wr_data(ocs_io_t *io, uint32_t flags,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len,
ocs_scsi_io_cb_t cb, void *arg)
{
return ocs_scsi_xfer_data(io, flags, dif_info, sgl, sgl_count, len, OCS_HW_IO_TARGET_WRITE,
enable_treceive_auto_resp(io->ocs), cb, arg);
}
/**
* @ingroup scsi_api_base
* @brief Free overflow SGL.
*
* @par Description
* Free the overflow SGL if it is present.
*
* @param io Pointer to IO object.
*
* @return None.
*/
static void
ocs_scsi_io_free_ovfl(ocs_io_t *io) {
if (io->ovfl_sgl.size) {
ocs_dma_free(io->ocs, &io->ovfl_sgl);
}
}
/**
* @ingroup scsi_api_base
* @brief Send response data.
*
* @par Description
* This function is used by a target-server to send the SCSI response data to a remote
* initiator node. The target-server populates the @c ocs_scsi_cmd_resp_t
* argument with scsi status, status qualifier, sense data, and response data, as
* needed.
* @n @n
* Upon completion, the callback function @c cb is invoked. The target-server will generally
* clean up its IO context resources and call ocs_scsi_io_complete().
*
* @param io Pointer to the IO context.
* @param flags Flags to control sending of the SCSI response.
* @param rsp Pointer to the response data populated by the caller.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_scsi_send_resp(ocs_io_t *io, uint32_t flags, ocs_scsi_cmd_resp_t *rsp, ocs_scsi_io_cb_t cb, void *arg)
{
ocs_t *ocs;
int32_t residual;
int auto_resp = TRUE; /* Always try auto resp */
uint8_t scsi_status = 0;
uint16_t scsi_status_qualifier = 0;
uint8_t *sense_data = NULL;
uint32_t sense_data_length = 0;
ocs_assert(io, -1);
ocs = io->ocs;
ocs_assert(ocs, -1);
ocs_assert(io->node, -1);
ocs_scsi_convert_dif_info(ocs, NULL, &io->hw_dif);
if (rsp) {
scsi_status = rsp->scsi_status;
scsi_status_qualifier = rsp->scsi_status_qualifier;
sense_data = rsp->sense_data;
sense_data_length = rsp->sense_data_length;
residual = rsp->residual;
} else {
residual = io->exp_xfer_len - io->transferred;
}
io->wire_len = 0;
io->hio_type = OCS_HW_IO_TARGET_RSP;
io->scsi_tgt_cb = cb;
io->scsi_tgt_cb_arg = arg;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.fcp_tgt.ox_id = io->init_task_tag;
io->iparam.fcp_tgt.offset = 0;
io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
io->iparam.fcp_tgt.timeout = io->timeout;
/* Set low latency queueing request */
io->low_latency = (flags & OCS_SCSI_LOW_LATENCY) != 0;
io->wq_steering = (flags & OCS_SCSI_WQ_STEERING_MASK) >> OCS_SCSI_WQ_STEERING_SHIFT;
io->wq_class = (flags & OCS_SCSI_WQ_CLASS_MASK) >> OCS_SCSI_WQ_CLASS_SHIFT;
if ((scsi_status != 0) || residual || sense_data_length) {
fcp_rsp_iu_t *fcprsp = io->rspbuf.virt;
if (!fcprsp) {
ocs_log_err(ocs, "NULL response buffer\n");
return -1;
}
auto_resp = FALSE;
ocs_memset(fcprsp, 0, sizeof(*fcprsp));
io->wire_len += (sizeof(*fcprsp) - sizeof(fcprsp->data));
fcprsp->scsi_status = scsi_status;
*((uint16_t*)fcprsp->status_qualifier) = ocs_htobe16(scsi_status_qualifier);
/* set residual status if necessary */
if (residual != 0) {
/* FCP: if data transferred is less than the amount expected, then this is an
* underflow. If data transferred would have been greater than the amount expected
* then this is an overflow
*/
if (residual > 0) {
fcprsp->flags |= FCP_RESID_UNDER;
*((uint32_t *)fcprsp->fcp_resid) = ocs_htobe32(residual);
} else {
fcprsp->flags |= FCP_RESID_OVER;
*((uint32_t *)fcprsp->fcp_resid) = ocs_htobe32(-residual);
}
}
if (sense_data && sense_data_length) {
ocs_assert(sense_data_length <= sizeof(fcprsp->data), -1);
fcprsp->flags |= FCP_SNS_LEN_VALID;
ocs_memcpy(fcprsp->data, sense_data, sense_data_length);
*((uint32_t*)fcprsp->fcp_sns_len) = ocs_htobe32(sense_data_length);
io->wire_len += sense_data_length;
}
io->sgl[0].addr = io->rspbuf.phys;
io->sgl[0].dif_addr = 0;
io->sgl[0].len = io->wire_len;
io->sgl_count = 1;
}
if (auto_resp) {
io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
}
return ocs_scsi_io_dispatch(io, ocs_target_io_cb);
}
/**
* @ingroup scsi_api_base
* @brief Send TMF response data.
*
* @par Description
* This function is used by a target-server to send SCSI TMF response data to a remote
* initiator node.
* Upon completion, the callback function @c cb is invoked. The target-server will generally
* clean up its IO context resources and call ocs_scsi_io_complete().
*
* @param io Pointer to the IO context.
* @param rspcode TMF response code.
* @param addl_rsp_info Additional TMF response information (may be NULL for zero data).
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_scsi_send_tmf_resp(ocs_io_t *io, ocs_scsi_tmf_resp_e rspcode, uint8_t addl_rsp_info[3],
ocs_scsi_io_cb_t cb, void *arg)
{
int32_t rc = -1;
ocs_t *ocs = NULL;
fcp_rsp_iu_t *fcprsp = NULL;
fcp_rsp_info_t *rspinfo = NULL;
uint8_t fcp_rspcode;
ocs_assert(io, -1);
ocs_assert(io->ocs, -1);
ocs_assert(io->node, -1);
ocs = io->ocs;
io->wire_len = 0;
ocs_scsi_convert_dif_info(ocs, NULL, &io->hw_dif);
switch(rspcode) {
case OCS_SCSI_TMF_FUNCTION_COMPLETE:
fcp_rspcode = FCP_TMF_COMPLETE;
break;
case OCS_SCSI_TMF_FUNCTION_SUCCEEDED:
case OCS_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
fcp_rspcode = FCP_TMF_SUCCEEDED;
break;
case OCS_SCSI_TMF_FUNCTION_REJECTED:
fcp_rspcode = FCP_TMF_REJECTED;
break;
case OCS_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
fcp_rspcode = FCP_TMF_INCORRECT_LUN;
break;
case OCS_SCSI_TMF_SERVICE_DELIVERY:
fcp_rspcode = FCP_TMF_FAILED;
break;
default:
fcp_rspcode = FCP_TMF_REJECTED;
break;
}
io->hio_type = OCS_HW_IO_TARGET_RSP;
io->scsi_tgt_cb = cb;
io->scsi_tgt_cb_arg = arg;
if (io->tmf_cmd == OCS_SCSI_TMF_ABORT_TASK) {
rc = ocs_target_send_bls_resp(io, cb, arg);
return rc;
}
/* populate the FCP TMF response */
fcprsp = io->rspbuf.virt;
ocs_memset(fcprsp, 0, sizeof(*fcprsp));
fcprsp->flags |= FCP_RSP_LEN_VALID;
rspinfo = (fcp_rsp_info_t*) fcprsp->data;
if (addl_rsp_info != NULL) {
ocs_memcpy(rspinfo->addl_rsp_info, addl_rsp_info, sizeof(rspinfo->addl_rsp_info));
}
rspinfo->rsp_code = fcp_rspcode;
io->wire_len = sizeof(*fcprsp) - sizeof(fcprsp->data) + sizeof(*rspinfo);
*((uint32_t*)fcprsp->fcp_rsp_len) = ocs_htobe32(sizeof(*rspinfo));
io->sgl[0].addr = io->rspbuf.phys;
io->sgl[0].dif_addr = 0;
io->sgl[0].len = io->wire_len;
io->sgl_count = 1;
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.fcp_tgt.ox_id = io->init_task_tag;
io->iparam.fcp_tgt.offset = 0;
io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
io->iparam.fcp_tgt.timeout = io->timeout;
rc = ocs_scsi_io_dispatch(io, ocs_target_io_cb);
return rc;
}
/**
* @brief Process target abort callback.
*
* @par Description
* Accepts HW abort requests.
*
* @param hio HW IO context.
* @param rnode Remote node.
* @param length Length of response data.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param app Application-specified callback data.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
static int32_t
ocs_target_abort_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
{
ocs_io_t *io = app;
ocs_t *ocs;
ocs_scsi_io_status_e scsi_status;
ocs_assert(io, -1);
ocs_assert(io->ocs, -1);
ocs = io->ocs;
if (io->abort_cb) {
ocs_scsi_io_cb_t abort_cb = io->abort_cb;
void *abort_cb_arg = io->abort_cb_arg;
io->abort_cb = NULL;
io->abort_cb_arg = NULL;
switch (status) {
case SLI4_FC_WCQE_STATUS_SUCCESS:
scsi_status = OCS_SCSI_STATUS_GOOD;
break;
case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
switch (ext_status) {
case SLI4_FC_LOCAL_REJECT_NO_XRI:
scsi_status = OCS_SCSI_STATUS_NO_IO;
break;
case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
scsi_status = OCS_SCSI_STATUS_ABORT_IN_PROGRESS;
break;
default:
/* TODO: we have seen 0x15 (abort in progress) */
scsi_status = OCS_SCSI_STATUS_ERROR;
break;
}
break;
case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
scsi_status = OCS_SCSI_STATUS_CHECK_RESPONSE;
break;
default:
scsi_status = OCS_SCSI_STATUS_ERROR;
break;
}
/* invoke callback */
abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
}
ocs_assert(io != io->io_to_abort, -1);
/* done with IO to abort */
ocs_ref_put(&io->io_to_abort->ref); /* ocs_ref_get(): ocs_scsi_tgt_abort_io() */
ocs_io_free(ocs, io);
ocs_scsi_check_pending(ocs);
return 0;
}
/**
* @ingroup scsi_api_base
* @brief Abort a target IO.
*
* @par Description
* This routine is called from a SCSI target-server. It initiates an abort of a
* previously-issued target data phase or response request.
*
* @param io IO context.
* @param cb SCSI target server callback.
* @param arg SCSI target server supplied callback argument.
*
* @return Returns 0 on success, or a non-zero value on failure.
*/
int32_t
ocs_scsi_tgt_abort_io(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg)
{
ocs_t *ocs;
ocs_xport_t *xport;
int32_t rc;
ocs_io_t *abort_io = NULL;
ocs_assert(io, -1);
ocs_assert(io->node, -1);
ocs_assert(io->ocs, -1);
ocs = io->ocs;
xport = ocs->xport;
/* take a reference on IO being aborted */
if ((ocs_ref_get_unless_zero(&io->ref) == 0)) {
/* command no longer active */
scsi_io_printf(io, "command no longer active\n");
return -1;
}
/*
* allocate a new IO to send the abort request. Use ocs_io_alloc() directly, as
* we need an IO object that will not fail allocation due to allocations being
* disabled (in ocs_scsi_io_alloc())
*/
abort_io = ocs_io_alloc(ocs);
if (abort_io == NULL) {
ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
return -1;
}
/* Save the target server callback and argument */
ocs_assert(abort_io->hio == NULL, -1);
/* set generic fields */
abort_io->cmd_tgt = TRUE;
abort_io->node = io->node;
/* set type and abort-specific fields */
abort_io->io_type = OCS_IO_TYPE_ABORT;
abort_io->display_name = "tgt_abort";
abort_io->io_to_abort = io;
abort_io->send_abts = FALSE;
abort_io->abort_cb = cb;
abort_io->abort_cb_arg = arg;
/* now dispatch IO */
rc = ocs_scsi_io_dispatch_abort(abort_io, ocs_target_abort_cb);
if (rc) {
ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
}
return rc;
}
/**
* @brief Process target BLS response callback.
*
* @par Description
* Accepts HW abort requests.
*
* @param hio HW IO context.
* @param rnode Remote node.
* @param length Length of response data.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param app Application-specified callback data.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
static int32_t
ocs_target_bls_resp_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
{
ocs_io_t *io = app;
ocs_t *ocs;
ocs_scsi_io_status_e bls_status;
ocs_assert(io, -1);
ocs_assert(io->ocs, -1);
ocs = io->ocs;
/* BLS isn't really a "SCSI" concept, but use SCSI status */
if (status) {
io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
bls_status = OCS_SCSI_STATUS_ERROR;
} else {
bls_status = OCS_SCSI_STATUS_GOOD;
}
if (io->bls_cb) {
ocs_scsi_io_cb_t bls_cb = io->bls_cb;
void *bls_cb_arg = io->bls_cb_arg;
io->bls_cb = NULL;
io->bls_cb_arg = NULL;
/* invoke callback */
bls_cb(io, bls_status, 0, bls_cb_arg);
}
ocs_scsi_check_pending(ocs);
return 0;
}
/**
* @brief Complete abort request.
*
* @par Description
* An abort request is completed by posting a BA_ACC for the IO that requested the abort.
*
* @param io Pointer to the IO context.
* @param cb Callback function to invoke upon completion.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
static int32_t
ocs_target_send_bls_resp(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg)
{
int32_t rc;
fc_ba_acc_payload_t *acc;
ocs_assert(io, -1);
/* fill out IO structure with everything needed to send BA_ACC */
ocs_memset(&io->iparam, 0, sizeof(io->iparam));
io->iparam.bls.ox_id = io->init_task_tag;
io->iparam.bls.rx_id = io->abort_rx_id;
acc = (void *)io->iparam.bls.payload;
ocs_memset(io->iparam.bls.payload, 0, sizeof(io->iparam.bls.payload));
acc->ox_id = io->iparam.bls.ox_id;
acc->rx_id = io->iparam.bls.rx_id;
acc->high_seq_cnt = UINT16_MAX;
/* generic io fields have already been populated */
/* set type and BLS-specific fields */
io->io_type = OCS_IO_TYPE_BLS_RESP;
io->display_name = "bls_rsp";
io->hio_type = OCS_HW_BLS_ACC;
io->bls_cb = cb;
io->bls_cb_arg = arg;
/* dispatch IO */
rc = ocs_scsi_io_dispatch(io, ocs_target_bls_resp_cb);
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Notify the base driver that the IO is complete.
*
* @par Description
* This function is called by a target-server to notify the base driver that an IO
* has completed, allowing for the base driver to free resources.
* @n
* @n @b Note: This function is not called by initiator-clients.
*
* @param io Pointer to IO context.
*
* @return None.
*/
void
ocs_scsi_io_complete(ocs_io_t *io)
{
ocs_assert(io);
if (!ocs_io_busy(io)) {
ocs_log_test(io->ocs, "Got completion for non-busy io with tag 0x%x\n", io->tag);
return;
}
scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
ocs_assert(ocs_ref_read_count(&io->ref) > 0);
ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_scsi_io_alloc() */
}
/**
* @brief Handle initiator IO completion.
*
* @par Description
* This callback is made upon completion of an initiator operation (initiator read/write command).
*
* @param hio HW IO context.
* @param rnode Remote node.
* @param length Length of completion data.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param app Application-specified callback data.
*
* @return None.
*/
static void
ocs_initiator_io_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length,
int32_t status, uint32_t ext_status, void *app)
{
ocs_io_t *io = app;
ocs_t *ocs;
ocs_scsi_io_status_e scsi_status;
ocs_assert(io);
ocs_assert(io->scsi_ini_cb);
scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
ocs = io->ocs;
ocs_assert(ocs);
ocs_scsi_io_free_ovfl(io);
/* Call target server completion */
if (io->scsi_ini_cb) {
fcp_rsp_iu_t *fcprsp = io->rspbuf.virt;
ocs_scsi_cmd_resp_t rsp;
ocs_scsi_rsp_io_cb_t cb = io->scsi_ini_cb;
uint32_t flags = 0;
uint8_t *pd = fcprsp->data;
/* Clear the callback before invoking the callback */
io->scsi_ini_cb = NULL;
ocs_memset(&rsp, 0, sizeof(rsp));
/* Unless status is FCP_RSP_FAILURE, fcprsp is not filled in */
switch (status) {
case SLI4_FC_WCQE_STATUS_SUCCESS:
scsi_status = OCS_SCSI_STATUS_GOOD;
break;
case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
scsi_status = OCS_SCSI_STATUS_CHECK_RESPONSE;
rsp.scsi_status = fcprsp->scsi_status;
rsp.scsi_status_qualifier = ocs_be16toh(*((uint16_t*)fcprsp->status_qualifier));
if (fcprsp->flags & FCP_RSP_LEN_VALID) {
rsp.response_data = pd;
rsp.response_data_length = ocs_fc_getbe32(fcprsp->fcp_rsp_len);
pd += rsp.response_data_length;
}
if (fcprsp->flags & FCP_SNS_LEN_VALID) {
uint32_t sns_len = ocs_fc_getbe32(fcprsp->fcp_sns_len);
rsp.sense_data = pd;
rsp.sense_data_length = sns_len;
pd += sns_len;
}
/* Set residual */
if (fcprsp->flags & FCP_RESID_OVER) {
rsp.residual = -ocs_fc_getbe32(fcprsp->fcp_resid);
rsp.response_wire_length = length;
} else if (fcprsp->flags & FCP_RESID_UNDER) {
rsp.residual = ocs_fc_getbe32(fcprsp->fcp_resid);
rsp.response_wire_length = length;
}
/*
* Note: The FCP_RSP_FAILURE can be returned for initiator IOs when the total data
* placed does not match the requested length even if the status is good. If
* the status is all zeroes, then we have to assume that a frame(s) were
* dropped and change the status to LOCAL_REJECT/OUT_OF_ORDER_DATA
*/
if (length != io->wire_len) {
uint32_t rsp_len = ext_status;
uint8_t *rsp_bytes = io->rspbuf.virt;
uint32_t i;
uint8_t all_zeroes = (rsp_len > 0);
/* Check if the rsp is zero */
for (i = 0; i < rsp_len; i++) {
if (rsp_bytes[i] != 0) {
all_zeroes = FALSE;
break;
}
}
if (all_zeroes) {
scsi_status = OCS_SCSI_STATUS_ERROR;
ocs_log_test(io->ocs, "[%s]" SCSI_IOFMT "local reject=0x%02x\n",
io->node->display_name, SCSI_IOFMT_ARGS(io),
SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_DATA);
}
}
break;
case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
if (ext_status == SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT) {
scsi_status = OCS_SCSI_STATUS_COMMAND_TIMEOUT;
} else {
scsi_status = OCS_SCSI_STATUS_ERROR;
}
break;
case SLI4_FC_WCQE_STATUS_WQE_TIMEOUT:
/* IO timed out */
scsi_status = OCS_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
break;
case SLI4_FC_WCQE_STATUS_DI_ERROR:
if (ext_status & 0x01) {
scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
} else if (ext_status & 0x02) {
scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR;
} else if (ext_status & 0x04) {
scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR;
} else {
scsi_status = OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR;
}
break;
default:
scsi_status = OCS_SCSI_STATUS_ERROR;
break;
}
cb(io, scsi_status, &rsp, flags, io->scsi_ini_cb_arg);
}
ocs_scsi_check_pending(ocs);
}
/**
* @ingroup scsi_api_base
* @brief Initiate initiator read IO.
*
* @par Description
* This call is made by an initiator-client to send a SCSI read command. The payload
* for the command is given by a scatter-gather list @c sgl for @c sgl_count
* entries.
* @n @n
* Upon completion, the callback @b cb is invoked and passed request status.
* If the command completed successfully, the callback is given SCSI response data.
*
* @param node Pointer to the node.
* @param io Pointer to the IO context.
* @param lun LUN value.
* @param cdb Pointer to the CDB.
* @param cdb_len Length of the CDB.
* @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
* @param sgl Pointer to the scatter-gather list.
* @param sgl_count Count of the scatter-gather list elements.
* @param wire_len Length of the payload.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_scsi_send_rd_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len,
ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags)
{
int32_t rc;
rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_READ, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count,
wire_len, 0, cb, arg, flags);
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Initiate initiator write IO.
*
* @par Description
* This call is made by an initiator-client to send a SCSI write command. The payload
* for the command is given by a scatter-gather list @c sgl for @c sgl_count
* entries.
* @n @n
* Upon completion, the callback @c cb is invoked and passed request status. If the command
* completed successfully, the callback is given SCSI response data.
*
* @param node Pointer to the node.
* @param io Pointer to IO context.
* @param lun LUN value.
* @param cdb Pointer to the CDB.
* @param cdb_len Length of the CDB.
* @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
* @param sgl Pointer to the scatter-gather list.
* @param sgl_count Count of the scatter-gather list elements.
* @param wire_len Length of the payload.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t ocs_scsi_send_wr_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len,
ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags)
{
int32_t rc;
rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_WRITE, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count,
wire_len, 0, cb, arg, flags);
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Initiate initiator write IO.
*
* @par Description
* This call is made by an initiator-client to send a SCSI write command. The payload
* for the command is given by a scatter-gather list @c sgl for @c sgl_count
* entries.
* @n @n
* Upon completion, the callback @c cb is invoked and passed request status. If the command
* completed successfully, the callback is given SCSI response data.
*
* @param node Pointer to the node.
* @param io Pointer to IO context.
* @param lun LUN value.
* @param cdb Pointer to the CDB.
* @param cdb_len Length of the CDB.
* @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
* @param sgl Pointer to the scatter-gather list.
* @param sgl_count Count of the scatter-gather list elements.
* @param wire_len Length of the payload.
* @param first_burst Number of first burst bytes to send.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_scsi_send_wr_io_first_burst(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags)
{
int32_t rc;
rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_WRITE, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count,
wire_len, 0, cb, arg, flags);
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Initiate initiator SCSI command with no data.
*
* @par Description
* This call is made by an initiator-client to send a SCSI command with no data.
* @n @n
* Upon completion, the callback @c cb is invoked and passed request status. If the command
* completed successfully, the callback is given SCSI response data.
*
* @param node Pointer to the node.
* @param io Pointer to the IO context.
* @param lun LUN value.
* @param cdb Pointer to the CDB.
* @param cdb_len Length of the CDB.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t ocs_scsi_send_nodata_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags)
{
int32_t rc;
rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_NODATA, node, io, lun, 0, cdb, cdb_len, NULL, NULL, 0, 0, 0, cb, arg, flags);
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Initiate initiator task management operation.
*
* @par Description
* This command is used to send a SCSI task management function command. If the command
* requires it (QUERY_TASK_SET for example), a payload may be associated with the command.
* If no payload is required, then @c sgl_count may be zero and @c sgl is ignored.
* @n @n
* Upon completion @c cb is invoked with status and SCSI response data.
*
* @param node Pointer to the node.
* @param io Pointer to the IO context.
* @param io_to_abort Pointer to the IO context to abort in the
* case of OCS_SCSI_TMF_ABORT_TASK. Note: this can point to the
* same the same ocs_io_t as @c io, provided that @c io does not
* have any outstanding work requests.
* @param lun LUN value.
* @param tmf Task management command.
* @param sgl Pointer to the scatter-gather list.
* @param sgl_count Count of the scatter-gather list elements.
* @param len Length of the payload.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_scsi_send_tmf(ocs_node_t *node, ocs_io_t *io, ocs_io_t *io_to_abort, uint64_t lun, ocs_scsi_tmf_cmd_e tmf,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_rsp_io_cb_t cb, void *arg)
{
int32_t rc;
ocs_assert(io, -1);
if (tmf == OCS_SCSI_TMF_ABORT_TASK) {
ocs_assert(io_to_abort, -1);
/* take a reference on IO being aborted */
if ((ocs_ref_get_unless_zero(&io_to_abort->ref) == 0)) {
/* command no longer active */
scsi_io_printf(io, "command no longer active\n");
return -1;
}
/* generic io fields have already been populated */
/* abort-specific fields */
io->io_type = OCS_IO_TYPE_ABORT;
io->display_name = "abort_task";
io->io_to_abort = io_to_abort;
io->send_abts = TRUE;
io->scsi_ini_cb = cb;
io->scsi_ini_cb_arg = arg;
/* now dispatch IO */
rc = ocs_scsi_io_dispatch_abort(io, ocs_scsi_abort_io_cb);
if (rc) {
scsi_io_printf(io, "Failed to dispatch abort\n");
ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
}
} else {
io->display_name = "tmf";
rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_READ, node, io, lun, tmf, NULL, 0, NULL,
sgl, sgl_count, len, 0, cb, arg, 0);
}
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Send an FCP IO.
*
* @par Description
* An FCP read/write IO command, with optional task management flags, is sent to @c node.
*
* @param type HW IO type to send.
* @param node Pointer to the node destination of the IO.
* @param io Pointer to the IO context.
* @param lun LUN value.
* @param tmf Task management command.
* @param cdb Pointer to the SCSI CDB.
* @param cdb_len Length of the CDB, in bytes.
* @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
* @param sgl Pointer to the scatter-gather list.
* @param sgl_count Number of SGL entries in SGL.
* @param wire_len Payload length, in bytes, of data on wire.
* @param first_burst Number of first burst bytes to send.
* @param cb Completion callback.
* @param arg Application-specified completion callback argument.
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
/* tc: could elminiate LUN, as it's part of the IO structure */
static int32_t ocs_scsi_send_io(ocs_hw_io_type_e type, ocs_node_t *node, ocs_io_t *io, uint64_t lun,
ocs_scsi_tmf_cmd_e tmf, uint8_t *cdb, uint32_t cdb_len,
ocs_scsi_dif_info_t *dif_info,
ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags)
{
int32_t rc;
ocs_t *ocs;
fcp_cmnd_iu_t *cmnd;
uint32_t cmnd_bytes = 0;
uint32_t *fcp_dl;
uint8_t tmf_flags = 0;
ocs_assert(io->node, -1);
ocs_assert(io->node == node, -1);
ocs_assert(io, -1);
ocs = io->ocs;
ocs_assert(cb, -1);
io->sgl_count = sgl_count;
/* Copy SGL if needed */
if (sgl != io->sgl) {
ocs_assert(sgl_count <= io->sgl_allocated, -1);
ocs_memcpy(io->sgl, sgl, sizeof(*io->sgl) * sgl_count);
}
/* save initiator and target task tags for debugging */
io->tgt_task_tag = 0xffff;
io->wire_len = wire_len;
io->hio_type = type;
if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) {
char buf[80];
ocs_textbuf_t txtbuf;
uint32_t i;
ocs_textbuf_init(ocs, &txtbuf, buf, sizeof(buf));
ocs_textbuf_printf(&txtbuf, "cdb%d: ", cdb_len);
for (i = 0; i < cdb_len; i ++) {
ocs_textbuf_printf(&txtbuf, "%02X%s", cdb[i], (i == (cdb_len-1)) ? "" : " ");
}
scsi_io_printf(io, "%s len %d, %s\n", (io->hio_type == OCS_HW_IO_INITIATOR_READ) ? "read" :
(io->hio_type == OCS_HW_IO_INITIATOR_WRITE) ? "write" : "", io->wire_len,
ocs_textbuf_get_buffer(&txtbuf));
}
ocs_assert(io->cmdbuf.virt, -1);
cmnd = io->cmdbuf.virt;
ocs_assert(sizeof(*cmnd) <= io->cmdbuf.size, -1);
ocs_memset(cmnd, 0, sizeof(*cmnd));
/* Default FCP_CMND IU doesn't include additional CDB bytes but does include FCP_DL */
cmnd_bytes = sizeof(fcp_cmnd_iu_t) - sizeof(cmnd->fcp_cdb_and_dl) + sizeof(uint32_t);
fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl));
if (cdb) {
if (cdb_len <= 16) {
ocs_memcpy(cmnd->fcp_cdb, cdb, cdb_len);
} else {
uint32_t addl_cdb_bytes;
ocs_memcpy(cmnd->fcp_cdb, cdb, 16);
addl_cdb_bytes = cdb_len - 16;
ocs_memcpy(cmnd->fcp_cdb_and_dl, &(cdb[16]), addl_cdb_bytes);
/* additional_fcp_cdb_length is in words, not bytes */
cmnd->additional_fcp_cdb_length = (addl_cdb_bytes + 3) / 4;
fcp_dl += cmnd->additional_fcp_cdb_length;
/* Round up additional CDB bytes */
cmnd_bytes += (addl_cdb_bytes + 3) & ~0x3;
}
}
be64enc(cmnd->fcp_lun, CAM_EXTLUN_BYTE_SWIZZLE(lun));
if (node->fcp2device) {
if(ocs_get_crn(node, &cmnd->command_reference_number,
lun)) {
return -1;
}
}
if (flags & OCS_SCSI_CMD_HEAD_OF_QUEUE)
cmnd->task_attribute = FCP_TASK_ATTR_HEAD_OF_QUEUE;
else if (flags & OCS_SCSI_CMD_ORDERED)
cmnd->task_attribute = FCP_TASK_ATTR_ORDERED;
else if (flags & OCS_SCSI_CMD_UNTAGGED)
cmnd->task_attribute = FCP_TASK_ATTR_UNTAGGED;
else if (flags & OCS_SCSI_CMD_ACA)
cmnd->task_attribute = FCP_TASK_ATTR_ACA;
else
cmnd->task_attribute = FCP_TASK_ATTR_SIMPLE;
cmnd->command_priority = (flags & OCS_SCSI_PRIORITY_MASK) >>
OCS_SCSI_PRIORITY_SHIFT;
switch (tmf) {
case OCS_SCSI_TMF_QUERY_TASK_SET:
tmf_flags = FCP_QUERY_TASK_SET;
break;
case OCS_SCSI_TMF_ABORT_TASK_SET:
tmf_flags = FCP_ABORT_TASK_SET;
break;
case OCS_SCSI_TMF_CLEAR_TASK_SET:
tmf_flags = FCP_CLEAR_TASK_SET;
break;
case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
tmf_flags = FCP_QUERY_ASYNCHRONOUS_EVENT;
break;
case OCS_SCSI_TMF_LOGICAL_UNIT_RESET:
tmf_flags = FCP_LOGICAL_UNIT_RESET;
break;
case OCS_SCSI_TMF_CLEAR_ACA:
tmf_flags = FCP_CLEAR_ACA;
break;
case OCS_SCSI_TMF_TARGET_RESET:
tmf_flags = FCP_TARGET_RESET;
break;
default:
tmf_flags = 0;
}
cmnd->task_management_flags = tmf_flags;
*fcp_dl = ocs_htobe32(io->wire_len);
switch (io->hio_type) {
case OCS_HW_IO_INITIATOR_READ:
cmnd->rddata = 1;
break;
case OCS_HW_IO_INITIATOR_WRITE:
cmnd->wrdata = 1;
break;
case OCS_HW_IO_INITIATOR_NODATA:
/* sets neither */
break;
default:
ocs_log_test(ocs, "bad IO type %d\n", io->hio_type);
return -1;
}
rc = ocs_scsi_convert_dif_info(ocs, dif_info, &io->hw_dif);
if (rc) {
return rc;
}
io->scsi_ini_cb = cb;
io->scsi_ini_cb_arg = arg;
/* set command and response buffers in the iparam */
io->iparam.fcp_ini.cmnd = &io->cmdbuf;
io->iparam.fcp_ini.cmnd_size = cmnd_bytes;
io->iparam.fcp_ini.rsp = &io->rspbuf;
io->iparam.fcp_ini.flags = 0;
io->iparam.fcp_ini.dif_oper = io->hw_dif.dif;
io->iparam.fcp_ini.blk_size = io->hw_dif.blk_size;
io->iparam.fcp_ini.timeout = io->timeout;
io->iparam.fcp_ini.first_burst = first_burst;
return ocs_scsi_io_dispatch(io, ocs_initiator_io_cb);
}
/**
* @ingroup scsi_api_base
* @brief Callback for an aborted IO.
*
* @par Description
* Callback function invoked upon completion of an IO abort request.
*
* @param hio HW IO context.
* @param rnode Remote node.
* @param len Response length.
* @param status Completion status.
* @param ext_status Extended completion status.
* @param arg Application-specific callback, usually IO context.
* @return Returns 0 on success, or a negative error code value on failure.
*/
static int32_t
ocs_scsi_abort_io_cb(struct ocs_hw_io_s *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status,
uint32_t ext_status, void *arg)
{
ocs_io_t *io = arg;
ocs_t *ocs;
ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_GOOD;
ocs_assert(io, -1);
ocs_assert(ocs_io_busy(io), -1);
ocs_assert(io->ocs, -1);
ocs_assert(io->io_to_abort, -1);
ocs = io->ocs;
ocs_log_debug(ocs, "status %d ext %d\n", status, ext_status);
/* done with IO to abort */
ocs_ref_put(&io->io_to_abort->ref); /* ocs_ref_get(): ocs_scsi_send_tmf() */
ocs_scsi_io_free_ovfl(io);
switch (status) {
case SLI4_FC_WCQE_STATUS_SUCCESS:
scsi_status = OCS_SCSI_STATUS_GOOD;
break;
case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
if (ext_status == SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED) {
scsi_status = OCS_SCSI_STATUS_ABORTED;
} else if (ext_status == SLI4_FC_LOCAL_REJECT_NO_XRI) {
scsi_status = OCS_SCSI_STATUS_NO_IO;
} else if (ext_status == SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS) {
scsi_status = OCS_SCSI_STATUS_ABORT_IN_PROGRESS;
} else {
ocs_log_test(ocs, "Unhandled local reject 0x%x/0x%x\n", status, ext_status);
scsi_status = OCS_SCSI_STATUS_ERROR;
}
break;
default:
scsi_status = OCS_SCSI_STATUS_ERROR;
break;
}
if (io->scsi_ini_cb) {
(*io->scsi_ini_cb)(io, scsi_status, NULL, 0, io->scsi_ini_cb_arg);
} else {
ocs_scsi_io_free(io);
}
ocs_scsi_check_pending(ocs);
return 0;
}
/**
* @ingroup scsi_api_base
* @brief Return SCSI API integer valued property.
*
* @par Description
* This function is called by a target-server or initiator-client to
* retrieve an integer valued property.
*
* @param ocs Pointer to the ocs.
* @param prop Property value to return.
*
* @return Returns a value, or 0 if invalid property was requested.
*/
uint32_t
ocs_scsi_get_property(ocs_t *ocs, ocs_scsi_property_e prop)
{
ocs_xport_t *xport = ocs->xport;
uint32_t val;
switch (prop) {
case OCS_SCSI_MAX_SGE:
if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &val)) {
return val;
}
break;
case OCS_SCSI_MAX_SGL:
if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) {
/*
* If chain SGL test-mode is enabled, the number of HW SGEs
* has been limited; report back original max.
*/
return (OCS_FC_MAX_SGL);
}
if (0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &val)) {
return val;
}
break;
case OCS_SCSI_MAX_IOS:
return ocs_io_pool_allocated(xport->io_pool);
case OCS_SCSI_DIF_CAPABLE:
if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &val)) {
return val;
}
break;
case OCS_SCSI_MAX_FIRST_BURST:
return 0;
case OCS_SCSI_DIF_MULTI_SEPARATE:
if (ocs_hw_get(&ocs->hw, OCS_HW_DIF_MULTI_SEPARATE, &val) == 0) {
return val;
}
break;
case OCS_SCSI_ENABLE_TASK_SET_FULL:
/* Return FALSE if we are send frame capable */
if (ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &val) == 0) {
return ! val;
}
break;
default:
break;
}
ocs_log_debug(ocs, "invalid property request %d\n", prop);
return 0;
}
/**
* @ingroup scsi_api_base
* @brief Return a property pointer.
*
* @par Description
* This function is called by a target-server or initiator-client to
* retrieve a pointer to the requested property.
*
* @param ocs Pointer to the ocs.
* @param prop Property value to return.
*
* @return Returns pointer to the requested property, or NULL otherwise.
*/
void *ocs_scsi_get_property_ptr(ocs_t *ocs, ocs_scsi_property_e prop)
{
void *rc = NULL;
switch (prop) {
case OCS_SCSI_WWNN:
rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE);
break;
case OCS_SCSI_WWPN:
rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT);
break;
case OCS_SCSI_PORTNUM:
rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_PORTNUM);
break;
case OCS_SCSI_BIOS_VERSION_STRING:
rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_BIOS_VERSION_STRING);
break;
case OCS_SCSI_SERIALNUMBER:
{
uint8_t *pvpd;
uint32_t vpd_len;
if (ocs_hw_get(&ocs->hw, OCS_HW_VPD_LEN, &vpd_len)) {
ocs_log_test(ocs, "Can't get VPD length\n");
rc = "\012sn-unknown";
break;
}
pvpd = ocs_hw_get_ptr(&ocs->hw, OCS_HW_VPD);
if (pvpd) {
rc = ocs_find_vpd(pvpd, vpd_len, "SN");
}
if (rc == NULL ||
ocs_strlen(rc) == 0) {
/* Note: VPD is missing, using wwnn for serial number */
scsi_log(ocs, "Note: VPD is missing, using wwnn for serial number\n");
/* Use the last 32 bits of the WWN */
if ((ocs == NULL) || (ocs->domain == NULL) || (ocs->domain->sport == NULL)) {
rc = "\011(Unknown)";
} else {
rc = &ocs->domain->sport->wwnn_str[8];
}
}
break;
}
case OCS_SCSI_PARTNUMBER:
{
uint8_t *pvpd;
uint32_t vpd_len;
if (ocs_hw_get(&ocs->hw, OCS_HW_VPD_LEN, &vpd_len)) {
ocs_log_test(ocs, "Can't get VPD length\n");
rc = "\012pn-unknown";
break;
}
pvpd = ocs_hw_get_ptr(&ocs->hw, OCS_HW_VPD);
if (pvpd) {
rc = ocs_find_vpd(pvpd, vpd_len, "PN");
if (rc == NULL) {
rc = "\012pn-unknown";
}
} else {
rc = "\012pn-unknown";
}
break;
}
default:
break;
}
if (rc == NULL) {
ocs_log_debug(ocs, "invalid property request %d\n", prop);
}
return rc;
}
/**
* @ingroup scsi_api_base
* @brief Notify that delete initiator is complete.
*
* @par Description
* Sent by the target-server to notify the base driver that the work started from
* ocs_scsi_del_initiator() is now complete and that it is safe for the node to
* release the rest of its resources.
*
* @param node Pointer to the node.
*
* @return None.
*/
void
ocs_scsi_del_initiator_complete(ocs_node_t *node)
{
/* Notify the node to resume */
ocs_node_post_event(node, OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
}
/**
* @ingroup scsi_api_base
* @brief Notify that delete target is complete.
*
* @par Description
* Sent by the initiator-client to notify the base driver that the work started from
* ocs_scsi_del_target() is now complete and that it is safe for the node to
* release the rest of its resources.
*
* @param node Pointer to the node.
*
* @return None.
*/
void
ocs_scsi_del_target_complete(ocs_node_t *node)
{
/* Notify the node to resume */
ocs_node_post_event(node, OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
}
/**
* @brief Update transferred count
*
* @par Description
* Updates io->transferred, as required when using first burst, when the amount
* of first burst data processed differs from the amount of first burst
* data received.
*
* @param io Pointer to the io object.
* @param transferred Number of bytes transferred out of first burst buffers.
*
* @return None.
*/
void
ocs_scsi_update_first_burst_transferred(ocs_io_t *io, uint32_t transferred)
{
io->transferred = transferred;
}
/**
* @brief Register bounce callback for multi-threading.
*
* @par Description
* Register the back end bounce function.
*
* @param ocs Pointer to device object.
* @param fctn Function pointer of bounce function.
*
* @return None.
*/
void
ocs_scsi_register_bounce(ocs_t *ocs, void(*fctn)(void(*fctn)(void *arg), void *arg, uint32_t s_id, uint32_t d_id,
uint32_t ox_id))
{
ocs_hw_rtn_e rc;
rc = ocs_hw_callback(&ocs->hw, OCS_HW_CB_BOUNCE, fctn, NULL);
if (rc) {
ocs_log_test(ocs, "ocs_hw_callback(OCS_HW_CB_BOUNCE) failed: %d\n", rc);
}
}
diff --git a/sys/dev/ocs_fc/ocs_xport.c b/sys/dev/ocs_fc/ocs_xport.c
index d997ea245132..9e69bf0ed98f 100644
--- a/sys/dev/ocs_fc/ocs_xport.c
+++ b/sys/dev/ocs_fc/ocs_xport.c
@@ -1,1133 +1,1133 @@
/*-
* Copyright (c) 2017 Broadcom. All rights reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* FC transport API
*
*/
#include "ocs.h"
#include "ocs_device.h"
static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg);
static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg);
/**
* @brief Post node event callback argument.
*/
typedef struct {
ocs_sem_t sem;
ocs_node_t *node;
ocs_sm_event_t evt;
void *context;
} ocs_xport_post_node_event_t;
/**
* @brief Allocate a transport object.
*
* @par Description
* A transport object is allocated, and associated with a device instance.
*
* @param ocs Pointer to device instance.
*
* @return Returns the pointer to the allocated transport object, or NULL if failed.
*/
ocs_xport_t *
ocs_xport_alloc(ocs_t *ocs)
{
ocs_xport_t *xport;
ocs_assert(ocs, NULL);
xport = ocs_malloc(ocs, sizeof(*xport), OCS_M_ZERO);
if (xport != NULL) {
xport->ocs = ocs;
}
return xport;
}
/**
* @brief Create the RQ threads and the circular buffers used to pass sequences.
*
* @par Description
* Creates the circular buffers and the servicing threads for RQ processing.
*
* @param xport Pointer to transport object
*
* @return Returns 0 on success, or a non-zero value on failure.
*/
static void
ocs_xport_rq_threads_teardown(ocs_xport_t *xport)
{
ocs_t *ocs = xport->ocs;
uint32_t i;
if (xport->num_rq_threads == 0 ||
xport->rq_thread_info == NULL) {
return;
}
/* Abort any threads */
for (i = 0; i < xport->num_rq_threads; i++) {
if (xport->rq_thread_info[i].thread_started) {
ocs_thread_terminate(&xport->rq_thread_info[i].thread);
/* wait for the thread to exit */
ocs_log_debug(ocs, "wait for thread %d to exit\n", i);
while (xport->rq_thread_info[i].thread_started) {
ocs_udelay(10000);
}
ocs_log_debug(ocs, "thread %d to exited\n", i);
}
if (xport->rq_thread_info[i].seq_cbuf != NULL) {
ocs_cbuf_free(xport->rq_thread_info[i].seq_cbuf);
xport->rq_thread_info[i].seq_cbuf = NULL;
}
}
}
/**
* @brief Create the RQ threads and the circular buffers used to pass sequences.
*
* @par Description
* Creates the circular buffers and the servicing threads for RQ processing.
*
* @param xport Pointer to transport object.
* @param num_rq_threads Number of RQ processing threads that the
* driver creates.
*
* @return Returns 0 on success, or a non-zero value on failure.
*/
static int32_t
ocs_xport_rq_threads_create(ocs_xport_t *xport, uint32_t num_rq_threads)
{
ocs_t *ocs = xport->ocs;
int32_t rc = 0;
uint32_t i;
xport->num_rq_threads = num_rq_threads;
ocs_log_debug(ocs, "number of RQ threads %d\n", num_rq_threads);
if (num_rq_threads == 0) {
return 0;
}
/* Allocate the space for the thread objects */
xport->rq_thread_info = ocs_malloc(ocs, sizeof(ocs_xport_rq_thread_info_t) * num_rq_threads, OCS_M_ZERO);
if (xport->rq_thread_info == NULL) {
ocs_log_err(ocs, "memory allocation failure\n");
return -1;
}
/* Create the circular buffers and threads. */
for (i = 0; i < num_rq_threads; i++) {
xport->rq_thread_info[i].ocs = ocs;
xport->rq_thread_info[i].seq_cbuf = ocs_cbuf_alloc(ocs, OCS_HW_RQ_NUM_HDR);
if (xport->rq_thread_info[i].seq_cbuf == NULL) {
goto ocs_xport_rq_threads_create_error;
}
ocs_snprintf(xport->rq_thread_info[i].thread_name,
sizeof(xport->rq_thread_info[i].thread_name),
"ocs_unsol_rq:%d:%d", ocs->instance_index, i);
rc = ocs_thread_create(ocs, &xport->rq_thread_info[i].thread, ocs_unsol_rq_thread,
xport->rq_thread_info[i].thread_name,
&xport->rq_thread_info[i], OCS_THREAD_RUN);
if (rc) {
ocs_log_err(ocs, "ocs_thread_create failed: %d\n", rc);
goto ocs_xport_rq_threads_create_error;
}
xport->rq_thread_info[i].thread_started = TRUE;
}
return 0;
ocs_xport_rq_threads_create_error:
ocs_xport_rq_threads_teardown(xport);
return -1;
}
/**
* @brief Do as much allocation as possible, but do not initialization the device.
*
* @par Description
* Performs the functions required to get a device ready to run.
*
* @param xport Pointer to transport object.
*
* @return Returns 0 on success, or a non-zero value on failure.
*/
int32_t
ocs_xport_attach(ocs_xport_t *xport)
{
ocs_t *ocs = xport->ocs;
int32_t rc;
uint32_t max_sgl;
uint32_t n_sgl;
uint32_t i;
uint32_t value;
uint32_t max_remote_nodes;
/* booleans used for cleanup if initialization fails */
uint8_t io_pool_created = FALSE;
uint8_t node_pool_created = FALSE;
ocs_list_init(&ocs->domain_list, ocs_domain_t, link);
for (i = 0; i < SLI4_MAX_FCFI; i++) {
xport->fcfi[i].hold_frames = 1;
ocs_lock_init(ocs, &xport->fcfi[i].pend_frames_lock, "xport pend_frames[%d]", i);
ocs_list_init(&xport->fcfi[i].pend_frames, ocs_hw_sequence_t, link);
}
rc = ocs_hw_set_ptr(&ocs->hw, OCS_HW_WAR_VERSION, ocs->hw_war_version);
if (rc) {
ocs_log_test(ocs, "can't set OCS_HW_WAR_VERSION\n");
return -1;
}
rc = ocs_hw_setup(&ocs->hw, ocs, SLI4_PORT_TYPE_FC);
if (rc) {
ocs_log_err(ocs, "%s: Can't setup hardware\n", ocs->desc);
return -1;
} else if (ocs->ctrlmask & OCS_CTRLMASK_CRASH_RESET) {
ocs_log_debug(ocs, "stopping after ocs_hw_setup\n");
return -1;
}
ocs_hw_set(&ocs->hw, OCS_HW_BOUNCE, ocs->hw_bounce);
ocs_log_debug(ocs, "HW bounce: %d\n", ocs->hw_bounce);
ocs_hw_set(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, ocs->rq_selection_policy);
ocs_hw_set(&ocs->hw, OCS_HW_RR_QUANTA, ocs->rr_quanta);
ocs_hw_get(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, &value);
ocs_log_debug(ocs, "RQ Selection Policy: %d\n", value);
ocs_hw_set_ptr(&ocs->hw, OCS_HW_FILTER_DEF, (void*) ocs->filter_def);
ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl);
max_sgl -= SLI4_SGE_MAX_RESERVED;
n_sgl = MIN(OCS_FC_MAX_SGL, max_sgl);
/* EVT: For chained SGL testing */
if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) {
n_sgl = 4;
}
/* Note: number of SGLs must be set for ocs_node_create_pool */
if (ocs_hw_set(&ocs->hw, OCS_HW_N_SGL, n_sgl) != OCS_HW_RTN_SUCCESS) {
ocs_log_err(ocs, "%s: Can't set number of SGLs\n", ocs->desc);
return -1;
} else {
ocs_log_debug(ocs, "%s: Configured for %d SGLs\n", ocs->desc, n_sgl);
}
ocs_hw_get(&ocs->hw, OCS_HW_MAX_NODES, &max_remote_nodes);
if (!ocs->max_remote_nodes)
ocs->max_remote_nodes = max_remote_nodes;
rc = ocs_node_create_pool(ocs, ocs->max_remote_nodes);
if (rc) {
ocs_log_err(ocs, "Can't allocate node pool\n");
goto ocs_xport_attach_cleanup;
} else {
node_pool_created = TRUE;
}
/* EVT: if testing chained SGLs allocate OCS_FC_MAX_SGL SGE's in the IO */
xport->io_pool = ocs_io_pool_create(ocs, ocs->num_scsi_ios,
(ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) ? OCS_FC_MAX_SGL : n_sgl);
if (xport->io_pool == NULL) {
ocs_log_err(ocs, "Can't allocate IO pool\n");
goto ocs_xport_attach_cleanup;
} else {
io_pool_created = TRUE;
}
/*
* setup the RQ processing threads
*/
if (ocs_xport_rq_threads_create(xport, ocs->rq_threads) != 0) {
ocs_log_err(ocs, "failure creating RQ threads\n");
goto ocs_xport_attach_cleanup;
}
return 0;
ocs_xport_attach_cleanup:
if (io_pool_created) {
ocs_io_pool_free(xport->io_pool);
}
if (node_pool_created) {
ocs_node_free_pool(ocs);
}
return -1;
}
/**
* @brief Determines how to setup auto Xfer ready.
*
* @par Description
* @param xport Pointer to transport object.
*
* @return Returns 0 on success or a non-zero value on failure.
*/
static int32_t
ocs_xport_initialize_auto_xfer_ready(ocs_xport_t *xport)
{
ocs_t *ocs = xport->ocs;
uint32_t auto_xfer_rdy;
char prop_buf[32];
uint32_t ramdisc_blocksize = 512;
uint8_t p_type = 0;
ocs_hw_get(&ocs->hw, OCS_HW_AUTO_XFER_RDY_CAPABLE, &auto_xfer_rdy);
if (!auto_xfer_rdy) {
ocs->auto_xfer_rdy_size = 0;
ocs_log_test(ocs, "Cannot enable auto xfer rdy for this port\n");
return 0;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_SIZE, ocs->auto_xfer_rdy_size)) {
ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc);
return -1;
}
/*
* Determine if we are doing protection in the backend. We are looking
* at the modules parameters here. The backend cannot allow a format
* command to change the protection mode when using this feature,
* otherwise the firmware will not do the proper thing.
*/
if (ocs_get_property("p_type", prop_buf, sizeof(prop_buf)) == 0) {
p_type = ocs_strtoul(prop_buf, 0, 0);
}
if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
}
if (ocs_get_property("external_dif", prop_buf, sizeof(prop_buf)) == 0) {
if(ocs_strlen(prop_buf)) {
if (p_type == 0) {
p_type = 1;
}
}
}
if (p_type != 0) {
if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, TRUE)) {
ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, ramdisc_blocksize)) {
ocs_log_test(ocs, "%s: Can't set auto xfer rdy blk size\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_P_TYPE, p_type)) {
ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, TRUE)) {
ocs_log_test(ocs, "%s: Can't set auto xfer rdy ref tag\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, FALSE)) {
ocs_log_test(ocs, "%s: Can't set auto xfer rdy app tag valid\n", ocs->desc);
return -1;
}
}
ocs_log_debug(ocs, "Auto xfer rdy is enabled, p_type=%d, blksize=%d\n",
p_type, ramdisc_blocksize);
return 0;
}
/**
* @brief Initialize link topology config
*
* @par Description
* Topology can be fetched from mod-param or Persistet Topology(PT).
* a. Mod-param value is used when the value is 1(P2P) or 2(LOOP).
* a. PT is used if mod-param is not provided( i.e, default value of AUTO)
* Also, if mod-param is used, update PT.
*
* @param ocs Pointer to ocs
*
* @return Returns 0 on success, or a non-zero value on failure.
*/
static int
ocs_topology_setup(ocs_t *ocs)
{
uint32_t topology;
if (ocs->topology == OCS_HW_TOPOLOGY_AUTO) {
topology = ocs_hw_get_config_persistent_topology(&ocs->hw);
} else {
topology = ocs->topology;
/* ignore failure here. link will come-up either in auto mode
* if PT is not supported or last saved PT value */
ocs_hw_set_persistent_topology(&ocs->hw, topology, OCS_CMD_POLL);
}
return ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topology);
}
/**
* @brief Initializes the device.
*
* @par Description
* Performs the functions required to make a device functional.
*
* @param xport Pointer to transport object.
*
* @return Returns 0 on success, or a non-zero value on failure.
*/
int32_t
ocs_xport_initialize(ocs_xport_t *xport)
{
ocs_t *ocs = xport->ocs;
int32_t rc;
uint32_t i;
uint32_t max_hw_io;
uint32_t max_sgl;
uint32_t hlm;
uint32_t rq_limit;
uint32_t dif_capable;
uint8_t dif_separate = 0;
char prop_buf[32];
/* booleans used for cleanup if initialization fails */
uint8_t ini_device_set = FALSE;
uint8_t tgt_device_set = FALSE;
uint8_t hw_initialized = FALSE;
ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io);
if (ocs_hw_set(&ocs->hw, OCS_HW_N_IO, max_hw_io) != OCS_HW_RTN_SUCCESS) {
ocs_log_err(ocs, "%s: Can't set number of IOs\n", ocs->desc);
return -1;
}
ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl);
max_sgl -= SLI4_SGE_MAX_RESERVED;
if (ocs->enable_hlm) {
ocs_hw_get(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, &hlm);
if (!hlm) {
ocs->enable_hlm = FALSE;
ocs_log_err(ocs, "Cannot enable high login mode for this port\n");
} else {
ocs_log_debug(ocs, "High login mode is enabled\n");
if (ocs_hw_set(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, TRUE)) {
ocs_log_err(ocs, "%s: Can't set high login mode\n", ocs->desc);
return -1;
}
}
}
/* validate the auto xfer_rdy size */
if (ocs->auto_xfer_rdy_size > 0 &&
(ocs->auto_xfer_rdy_size < 2048 ||
ocs->auto_xfer_rdy_size > 65536)) {
ocs_log_err(ocs, "Auto XFER_RDY size is out of range (2K-64K)\n");
return -1;
}
ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io);
if (ocs->auto_xfer_rdy_size > 0) {
if (ocs_xport_initialize_auto_xfer_ready(xport)) {
ocs_log_err(ocs, "%s: Failed auto xfer ready setup\n", ocs->desc);
return -1;
}
if (ocs->esoc){
ocs_hw_set(&ocs->hw, OCS_ESOC, TRUE);
}
}
if (ocs->explicit_buffer_list) {
/* Are pre-registered SGL's required? */
ocs_hw_get(&ocs->hw, OCS_HW_PREREGISTER_SGL, &i);
if (i == TRUE) {
ocs_log_err(ocs, "Explicit Buffer List not supported on this device, not enabled\n");
} else {
ocs_hw_set(&ocs->hw, OCS_HW_PREREGISTER_SGL, FALSE);
}
}
/* Setup persistent topology based on topology mod-param value */
rc = ocs_topology_setup(ocs);
if (rc) {
- ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc);
+ ocs_log_err(ocs, "%s: Can't set the topology\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) {
- ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc);
+ ocs_log_err(ocs, "%s: Can't set the topology\n", ocs->desc);
return -1;
}
ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT);
if (ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, ocs->speed) != OCS_HW_RTN_SUCCESS) {
ocs_log_err(ocs, "%s: Can't set the link speed\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_ETH_LICENSE, ocs->ethernet_license) != OCS_HW_RTN_SUCCESS) {
ocs_log_err(ocs, "%s: Can't set the ethernet license\n", ocs->desc);
return -1;
}
/* currently only lancer support setting the CRC seed value */
if (ocs->hw.sli.asic_type == SLI4_ASIC_TYPE_LANCER) {
if (ocs_hw_set(&ocs->hw, OCS_HW_DIF_SEED, OCS_FC_DIF_SEED) != OCS_HW_RTN_SUCCESS) {
ocs_log_err(ocs, "%s: Can't set the DIF seed\n", ocs->desc);
return -1;
}
}
/* Set the Dif mode */
if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &dif_capable)) {
if (dif_capable) {
if (ocs_get_property("dif_separate", prop_buf, sizeof(prop_buf)) == 0) {
dif_separate = ocs_strtoul(prop_buf, 0, 0);
}
if ((rc = ocs_hw_set(&ocs->hw, OCS_HW_DIF_MODE,
(dif_separate == 0 ? OCS_HW_DIF_MODE_INLINE : OCS_HW_DIF_MODE_SEPARATE)))) {
ocs_log_err(ocs, "Requested DIF MODE not supported\n");
}
}
}
if (ocs->target_io_timer_sec || ocs->enable_ini) {
if (ocs->target_io_timer_sec)
ocs_log_debug(ocs, "setting target io timer=%d\n", ocs->target_io_timer_sec);
ocs_hw_set(&ocs->hw, OCS_HW_EMULATE_WQE_TIMEOUT, TRUE);
}
ocs_hw_callback(&ocs->hw, OCS_HW_CB_DOMAIN, ocs_domain_cb, ocs);
ocs_hw_callback(&ocs->hw, OCS_HW_CB_REMOTE_NODE, ocs_remote_node_cb, ocs);
ocs_hw_callback(&ocs->hw, OCS_HW_CB_UNSOLICITED, ocs_unsolicited_cb, ocs);
ocs_hw_callback(&ocs->hw, OCS_HW_CB_PORT, ocs_port_cb, ocs);
ocs->fw_version = (const char*) ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV);
/* Initialize vport list */
ocs_list_init(&xport->vport_list, ocs_vport_spec_t, link);
ocs_lock_init(ocs, &xport->io_pending_lock, "io_pending_lock[%d]", ocs->instance_index);
ocs_list_init(&xport->io_pending_list, ocs_io_t, io_pending_link);
ocs_atomic_init(&xport->io_active_count, 0);
ocs_atomic_init(&xport->io_pending_count, 0);
ocs_atomic_init(&xport->io_total_free, 0);
ocs_atomic_init(&xport->io_total_pending, 0);
ocs_atomic_init(&xport->io_alloc_failed_count, 0);
ocs_atomic_init(&xport->io_pending_recursing, 0);
ocs_lock_init(ocs, &ocs->hw.watchdog_lock, " Watchdog Lock[%d]", ocs_instance(ocs));
rc = ocs_hw_init(&ocs->hw);
if (rc) {
ocs_log_err(ocs, "ocs_hw_init failure\n");
goto ocs_xport_init_cleanup;
} else {
hw_initialized = TRUE;
}
rq_limit = max_hw_io/2;
if (ocs_hw_set(&ocs->hw, OCS_HW_RQ_PROCESS_LIMIT, rq_limit) != OCS_HW_RTN_SUCCESS) {
ocs_log_err(ocs, "%s: Can't set the RQ process limit\n", ocs->desc);
}
if (ocs->config_tgt) {
rc = ocs_scsi_tgt_new_device(ocs);
if (rc) {
ocs_log_err(ocs, "failed to initialize target\n");
goto ocs_xport_init_cleanup;
} else {
tgt_device_set = TRUE;
}
}
if (ocs->enable_ini) {
rc = ocs_scsi_ini_new_device(ocs);
if (rc) {
ocs_log_err(ocs, "failed to initialize initiator\n");
goto ocs_xport_init_cleanup;
} else {
ini_device_set = TRUE;
}
}
/* Add vports */
if (ocs->num_vports != 0) {
uint32_t max_vports;
ocs_hw_get(&ocs->hw, OCS_HW_MAX_VPORTS, &max_vports);
if (ocs->num_vports < max_vports) {
ocs_log_debug(ocs, "Provisioning %d vports\n", ocs->num_vports);
for (i = 0; i < ocs->num_vports; i++) {
ocs_vport_create_spec(ocs, 0, 0, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt, NULL, NULL);
}
} else {
ocs_log_err(ocs, "failed to create vports. num_vports range should be (1-%d) \n", max_vports-1);
goto ocs_xport_init_cleanup;
}
}
return 0;
ocs_xport_init_cleanup:
if (ini_device_set) {
ocs_scsi_ini_del_device(ocs);
}
if (tgt_device_set) {
ocs_scsi_tgt_del_device(ocs);
}
if (hw_initialized) {
/* ocs_hw_teardown can only execute after ocs_hw_init */
ocs_hw_teardown(&ocs->hw);
}
return -1;
}
/**
* @brief Detaches the transport from the device.
*
* @par Description
* Performs the functions required to shut down a device.
*
* @param xport Pointer to transport object.
*
* @return Returns 0 on success or a non-zero value on failure.
*/
int32_t
ocs_xport_detach(ocs_xport_t *xport)
{
ocs_t *ocs = xport->ocs;
/* free resources associated with target-server and initiator-client */
if (ocs->config_tgt)
ocs_scsi_tgt_del_device(ocs);
if (ocs->enable_ini) {
ocs_scsi_ini_del_device(ocs);
/*Shutdown FC Statistics timer*/
if (ocs_timer_pending(&ocs->xport->stats_timer))
ocs_del_timer(&ocs->xport->stats_timer);
}
ocs_hw_teardown(&ocs->hw);
return 0;
}
/**
* @brief domain list empty callback
*
* @par Description
* Function is invoked when the device domain list goes empty. By convention
* @c arg points to an ocs_sem_t instance, that is incremented.
*
* @param ocs Pointer to device object.
* @param arg Pointer to semaphore instance.
*
* @return None.
*/
static void
ocs_xport_domain_list_empty_cb(ocs_t *ocs, void *arg)
{
ocs_sem_t *sem = arg;
ocs_assert(ocs);
ocs_assert(sem);
ocs_sem_v(sem);
}
/**
* @brief post node event callback
*
* @par Description
* This function is called from the mailbox completion interrupt context to post an
* event to a node object. By doing this in the interrupt context, it has
* the benefit of only posting events in the interrupt context, deferring the need to
* create a per event node lock.
*
* @param hw Pointer to HW structure.
* @param status Completion status for mailbox command.
* @param mqe Mailbox queue completion entry.
* @param arg Callback argument.
*
* @return Returns 0 on success, a negative error code value on failure.
*/
static int32_t
ocs_xport_post_node_event_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
{
ocs_xport_post_node_event_t *payload = arg;
if (payload != NULL) {
ocs_node_post_event(payload->node, payload->evt, payload->context);
ocs_sem_v(&payload->sem);
}
return 0;
}
/**
* @brief Initiate force free.
*
* @par Description
* Perform force free of OCS.
*
* @param xport Pointer to transport object.
*
* @return None.
*/
static void
ocs_xport_force_free(ocs_xport_t *xport)
{
ocs_t *ocs = xport->ocs;
ocs_domain_t *domain;
ocs_domain_t *next;
ocs_log_debug(ocs, "reset required, do force shutdown\n");
ocs_device_lock(ocs);
ocs_list_foreach_safe(&ocs->domain_list, domain, next) {
ocs_domain_force_free(domain);
}
ocs_device_unlock(ocs);
}
/**
* @brief Perform transport attach function.
*
* @par Description
* Perform the attach function, which for the FC transport makes a HW call
* to bring up the link.
*
* @param xport pointer to transport object.
* @param cmd command to execute.
*
* ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_ONLINE)
* ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_OFFLINE)
* ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_SHUTDOWN)
* ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_POST_NODE_EVENT, ocs_node_t *node, ocs_sm_event_t, void *context)
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_xport_control(ocs_xport_t *xport, ocs_xport_ctrl_e cmd, ...)
{
uint32_t rc = 0;
ocs_t *ocs = NULL;
va_list argp;
ocs_assert(xport, -1);
ocs_assert(xport->ocs, -1);
ocs = xport->ocs;
switch (cmd) {
case OCS_XPORT_PORT_ONLINE: {
/* Bring the port on-line */
rc = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_INIT, 0, NULL, NULL);
if (rc) {
ocs_log_err(ocs, "%s: Can't init port\n", ocs->desc);
} else {
xport->configured_link_state = cmd;
}
break;
}
case OCS_XPORT_PORT_OFFLINE: {
if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) {
ocs_log_err(ocs, "port shutdown failed\n");
} else {
xport->configured_link_state = cmd;
}
break;
}
case OCS_XPORT_SHUTDOWN: {
ocs_sem_t sem;
uint32_t reset_required;
/* if a PHYSDEV reset was performed (e.g. hw dump), will affect
* all PCI functions; orderly shutdown won't work, just force free
*/
/* TODO: need to poll this regularly... */
if (ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required) != OCS_HW_RTN_SUCCESS) {
reset_required = 0;
}
if (reset_required) {
ocs_log_debug(ocs, "reset required, do force shutdown\n");
ocs_xport_force_free(xport);
break;
}
ocs_sem_init(&sem, 0, "domain_list_sem");
ocs_register_domain_list_empty_cb(ocs, ocs_xport_domain_list_empty_cb, &sem);
if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) {
ocs_log_debug(ocs, "port shutdown failed, do force shutdown\n");
ocs_xport_force_free(xport);
} else {
ocs_log_debug(ocs, "Waiting %d seconds for domain shutdown.\n", (OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC/1000000));
rc = ocs_sem_p(&sem, OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC);
if (rc) {
ocs_log_debug(ocs, "Note: Domain shutdown timed out\n");
ocs_xport_force_free(xport);
}
}
ocs_register_domain_list_empty_cb(ocs, NULL, NULL);
/* Free up any saved virtual ports */
ocs_vport_del_all(ocs);
break;
}
/*
* POST_NODE_EVENT: post an event to a node object
*
* This transport function is used to post an event to a node object. It does
* this by submitting a NOP mailbox command to defer execution to the
* interrupt context (thereby enforcing the serialized execution of event posting
* to the node state machine instances)
*
* A counting semaphore is used to make the call synchronous (we wait until
* the callback increments the semaphore before returning (or times out)
*/
case OCS_XPORT_POST_NODE_EVENT: {
ocs_node_t *node;
ocs_sm_event_t evt;
void *context;
ocs_xport_post_node_event_t payload;
ocs_t *ocs;
ocs_hw_t *hw;
/* Retrieve arguments */
va_start(argp, cmd);
node = va_arg(argp, ocs_node_t*);
evt = va_arg(argp, ocs_sm_event_t);
context = va_arg(argp, void *);
va_end(argp);
ocs_assert(node, -1);
ocs_assert(node->ocs, -1);
ocs = node->ocs;
hw = &ocs->hw;
/* if node's state machine is disabled, don't bother continuing */
if (!node->sm.current_state) {
ocs_log_test(ocs, "node %p state machine disabled\n", node);
return -1;
}
/* Setup payload */
ocs_memset(&payload, 0, sizeof(payload));
ocs_sem_init(&payload.sem, 0, "xport_post_node_Event");
payload.node = node;
payload.evt = evt;
payload.context = context;
if (ocs_hw_async_call(hw, ocs_xport_post_node_event_cb, &payload)) {
ocs_log_test(ocs, "ocs_hw_async_call failed\n");
rc = -1;
break;
}
/* Wait for completion */
if (ocs_sem_p(&payload.sem, OCS_SEM_FOREVER)) {
ocs_log_test(ocs, "POST_NODE_EVENT: sem wait failed\n");
rc = -1;
}
break;
}
/*
* Set wwnn for the port. This will be used instead of the default provided by FW.
*/
case OCS_XPORT_WWNN_SET: {
uint64_t wwnn;
/* Retrieve arguments */
va_start(argp, cmd);
wwnn = va_arg(argp, uint64_t);
va_end(argp);
ocs_log_debug(ocs, " WWNN %016" PRIx64 "\n", wwnn);
xport->req_wwnn = wwnn;
break;
}
/*
* Set wwpn for the port. This will be used instead of the default provided by FW.
*/
case OCS_XPORT_WWPN_SET: {
uint64_t wwpn;
/* Retrieve arguments */
va_start(argp, cmd);
wwpn = va_arg(argp, uint64_t);
va_end(argp);
ocs_log_debug(ocs, " WWPN %016" PRIx64 "\n", wwpn);
xport->req_wwpn = wwpn;
break;
}
default:
break;
}
return rc;
}
/**
* @brief Return status on a link.
*
* @par Description
* Returns status information about a link.
*
* @param xport Pointer to transport object.
* @param cmd Command to execute.
* @param result Pointer to result value.
*
* ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_PORT_STATUS)
* ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_SPEED, ocs_xport_stats_t *result)
* return link speed in MB/sec
* ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_IS_SUPPORTED_LINK_SPEED, ocs_xport_stats_t *result)
* [in] *result is speed to check in MB/s
* returns 1 if supported, 0 if not
* ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STATISTICS, ocs_xport_stats_t *result)
* return link/host port stats
* ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STAT_RESET, ocs_xport_stats_t *result)
* resets link/host stats
*
*
* @return Returns 0 on success, or a negative error code value on failure.
*/
int32_t
ocs_xport_status(ocs_xport_t *xport, ocs_xport_status_e cmd, ocs_xport_stats_t *result)
{
uint32_t rc = 0;
ocs_t *ocs = NULL;
ocs_xport_stats_t value;
ocs_hw_rtn_e hw_rc;
ocs_assert(xport, -1);
ocs_assert(xport->ocs, -1);
ocs = xport->ocs;
switch (cmd) {
case OCS_XPORT_CONFIG_PORT_STATUS:
ocs_assert(result, -1);
if (xport->configured_link_state == 0) {
/* Initial state is offline. configured_link_state is */
/* set to online explicitly when port is brought online. */
xport->configured_link_state = OCS_XPORT_PORT_OFFLINE;
}
result->value = xport->configured_link_state;
break;
case OCS_XPORT_PORT_STATUS:
ocs_assert(result, -1);
/* Determine port status based on link speed. */
hw_rc = ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value.value);
if (hw_rc == OCS_HW_RTN_SUCCESS) {
if (value.value == 0) {
result->value = 0;
} else {
result->value = 1;
}
rc = 0;
} else {
rc = -1;
}
break;
case OCS_XPORT_LINK_SPEED: {
uint32_t speed;
ocs_assert(result, -1);
result->value = 0;
rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &speed);
if (rc == 0) {
result->value = speed;
}
break;
}
case OCS_XPORT_IS_SUPPORTED_LINK_SPEED: {
uint32_t speed;
uint32_t link_module_type;
ocs_assert(result, -1);
speed = result->value;
rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_MODULE_TYPE, &link_module_type);
if (rc == 0) {
switch(speed) {
case 1000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_1GB) != 0; break;
case 2000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_2GB) != 0; break;
case 4000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_4GB) != 0; break;
case 8000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_8GB) != 0; break;
case 10000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_10GB) != 0; break;
case 16000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_16GB) != 0; break;
case 32000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_32GB) != 0; break;
default: rc = 0; break;
}
} else {
rc = 0;
}
break;
}
case OCS_XPORT_LINK_STATISTICS:
ocs_device_lock(ocs);
ocs_memcpy((void *)result, &ocs->xport->fc_xport_stats, sizeof(ocs_xport_stats_t));
ocs_device_unlock(ocs);
break;
case OCS_XPORT_LINK_STAT_RESET: {
/* Create a semaphore to synchronize the stat reset process. */
ocs_sem_init(&(result->stats.semaphore), 0, "fc_stats_reset");
/* First reset the link stats */
if ((rc = ocs_hw_get_link_stats(&ocs->hw, 0, 1, 1, ocs_xport_link_stats_cb, result)) != 0) {
ocs_log_err(ocs, "%s: Failed to reset link statistics\n", __func__);
break;
}
/* Wait for semaphore to be signaled when the command completes */
/* TODO: Should there be a timeout on this? If so, how long? */
if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) {
/* Undefined failure */
ocs_log_test(ocs, "ocs_sem_p failed\n");
rc = -ENXIO;
break;
}
/* Next reset the host stats */
if ((rc = ocs_hw_get_host_stats(&ocs->hw, 1, ocs_xport_host_stats_cb, result)) != 0) {
ocs_log_err(ocs, "%s: Failed to reset host statistics\n", __func__);
break;
}
/* Wait for semaphore to be signaled when the command completes */
if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) {
/* Undefined failure */
ocs_log_test(ocs, "ocs_sem_p failed\n");
rc = -ENXIO;
break;
}
break;
}
case OCS_XPORT_IS_QUIESCED:
ocs_device_lock(ocs);
result->value = ocs_list_empty(&ocs->domain_list);
ocs_device_unlock(ocs);
break;
default:
rc = -1;
break;
}
return rc;
}
static void
ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg)
{
ocs_xport_stats_t *result = arg;
result->stats.link_stats.link_failure_error_count = counters[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
result->stats.link_stats.loss_of_sync_error_count = counters[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
result->stats.link_stats.primitive_sequence_error_count = counters[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
result->stats.link_stats.invalid_transmission_word_error_count = counters[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
result->stats.link_stats.crc_error_count = counters[OCS_HW_LINK_STAT_CRC_COUNT].counter;
ocs_sem_v(&(result->stats.semaphore));
}
static void
ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg)
{
ocs_xport_stats_t *result = arg;
result->stats.host_stats.transmit_kbyte_count = counters[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
result->stats.host_stats.receive_kbyte_count = counters[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
result->stats.host_stats.transmit_frame_count = counters[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter;
result->stats.host_stats.receive_frame_count = counters[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter;
ocs_sem_v(&(result->stats.semaphore));
}
/**
* @brief Free a transport object.
*
* @par Description
* The transport object is freed.
*
* @param xport Pointer to transport object.
*
* @return None.
*/
void
ocs_xport_free(ocs_xport_t *xport)
{
ocs_t *ocs;
uint32_t i;
if (xport) {
ocs = xport->ocs;
ocs_io_pool_free(xport->io_pool);
ocs_node_free_pool(ocs);
if(mtx_initialized(&xport->io_pending_lock.lock))
ocs_lock_free(&xport->io_pending_lock);
for (i = 0; i < SLI4_MAX_FCFI; i++) {
ocs_lock_free(&xport->fcfi[i].pend_frames_lock);
}
ocs_xport_rq_threads_teardown(xport);
ocs_free(ocs, xport, sizeof(*xport));
}
}